query_id
stringlengths
32
32
query
stringlengths
9
4.01k
positive_passages
listlengths
1
1
negative_passages
listlengths
88
101
625027057adfb81e74205b67f5098fb6
Initialises an bootstrap regression object.
[ { "docid": "15bf4fe75cae3b8a7bb5d8e482c58e9f", "score": "0.6192862", "text": "def __init__(self, X_data, y_data, reg):\n\n assert X_data.shape[0] == len(y_data), (\"x and y data not of equal\"\n \" lengths\")\n\n assert hasattr(reg, \"fit\"), (\"regression method must have \"\n \"attribute fit()\")\n assert hasattr(reg, \"predict\"), (\"regression method must have \"\n \"attribute predict()\")\n\n self.X_data = cp.deepcopy(X_data)\n self.y_data = cp.deepcopy(y_data)\n self._reg = reg", "title": "" } ]
[ { "docid": "ee09708f26f2200f9859604ff3d61d46", "score": "0.67858565", "text": "def initialize(self):\n self.logreg = linear_model.LogisticRegression(C=1e5)", "title": "" }, { "docid": "f589575f2419ec69db992f331c348cab", "score": "0.64760876", "text": "def __test_bootstrap_fit():\n from regression import OLSRegression\n import sklearn.preprocessing as sk_preproc\n\n # Initial values\n deg = 2\n N_bs = 1000\n n = 100\n test_percent = 0.35\n noise = 0.3\n np.random.seed(1234)\n\n # Sets up random matrices\n x = np.random.rand(n, 1)\n\n poly = sk_preproc.PolynomialFeatures(degree=deg, include_bias=True)\n\n y = 2*x*x + np.exp(-2*x) + noise * \\\n np.random.randn(x.shape[0], x.shape[1])\n\n # Sets up design matrix\n X = poly.fit_transform(x)\n\n # Performs regression\n reg = OLSRegression()\n reg.fit(X, y)\n y_predict = reg.predict(X).ravel()\n print(\"Regular linear regression\")\n print(\"r2: {:-20.16f}\".format(reg.score(X, y)))\n print(\"mse: {:-20.16f}\".format(metrics.mse(y, reg.predict(X))))\n print(\"Beta: \", reg.coef_.ravel())\n print(\"var(Beta): \", reg.coef_var.ravel())\n print(\"\")\n\n # Performs a bootstrap\n print(\"Bootstrapping\")\n bs_reg = BootstrapRegression(X, y, OLSRegression())\n bs_reg.bootstrap(N_bs, test_percent=test_percent)\n\n print(\"r2: {:-20.16f}\".format(bs_reg.r2))\n print(\"mse: {:-20.16f}\".format(bs_reg.mse))\n print(\"Bias^2:{:-20.16f}\".format(bs_reg.bias))\n print(\"Var(y):{:-20.16f}\".format(bs_reg.var))\n print(\"Beta: \", bs_reg.coef_.ravel())\n print(\"var(Beta): \", bs_reg.coef_var.ravel())\n print(\"mse = Bias^2 + Var(y) = \")\n print(\"{} = {} + {} = {}\".format(bs_reg.mse, bs_reg.bias, bs_reg.var,\n bs_reg.bias + bs_reg.var))\n print(\"Diff: {}\".format(abs(bs_reg.bias + bs_reg.var - bs_reg.mse)))\n\n import matplotlib.pyplot as plt\n plt.plot(x.ravel(), y, \"o\", label=\"Data\")\n plt.plot(x.ravel(), y_predict, \"o\",\n label=r\"Pred, R^2={:.4f}\".format(reg.score(X, y)))\n plt.errorbar(bs_reg.x_pred_test, bs_reg.y_pred,\n yerr=np.sqrt(bs_reg.y_pred_var), fmt=\"o\",\n label=r\"Bootstrap Prediction, $R^2={:.4f}$\".format(bs_reg.r2))\n plt.xlabel(r\"$x$\")\n plt.ylabel(r\"$y$\")\n plt.title(r\"$2x^2 + \\sigma^2$\")\n plt.legend()\n plt.show()", "title": "" }, { "docid": "842b581f2c01ad7f0044eb57004d871b", "score": "0.63958067", "text": "def __init__(self, regression_model):\n super(ModelExploration, self).__init__()\n self.model = regression_model", "title": "" }, { "docid": "00dcc1dd48cad501863c258131e99f86", "score": "0.62544835", "text": "def BootstrapWrapper(X_train, y_train, reg, N_bs, test_percent=0.4,\n X_test=None, y_test=None, shuffle=False):\n\n # Checks if we have provided test data or not\n if ((isinstance(X_test, type(None))) and\n (isinstance(y_test, type(None)))):\n\n # Splits X data and design matrix data\n X_train, X_test, y_train, y_test = \\\n sk_modsel.train_test_split(X_train, y_train,\n test_size=test_percent,\n shuffle=shuffle)\n\n bs_reg = BootstrapRegression(X_train, y_train, reg)\n bs_reg.bootstrap(N_bs, test_percent=test_percent, X_test=X_test,\n y_test=y_test)\n\n return {\n \"r2\": bs_reg.r2, \"mse\": bs_reg.mse, \"bias\": bs_reg.bias,\n \"var\": bs_reg.var, \"diff\": bs_reg.mse - bs_reg.bias - bs_reg.var,\n \"coef\": bs_reg.beta_coefs,\n \"coef_var\": bs_reg.beta_coefs_var, \"x_pred\": bs_reg.x_pred_test,\n \"y_pred\": bs_reg.y_pred, \"y_pred_var\": bs_reg.y_pred_var}", "title": "" }, { "docid": "5dd79a93bb34d89314a415c33763e744", "score": "0.6095724", "text": "def __init__(self, attributes=None, labels=None):\n self.attributes = attributes\n self.labels = labels\n\n self.test_size = None\n self.verbose = None\n\n self.linear_regression = None\n self.random_forest = None\n self.SVR = None\n self.nu_SVR = None\n self.linear_SVR = None\n self.XGB_regressor = None\n\n self._regression_models = {\"Model\": [\"R2 Score\", \"Time (seconds)\"]}\n self._failures = []", "title": "" }, { "docid": "cc08f77a58d7c74d29d3a3cb26ab978f", "score": "0.609151", "text": "def _initialize(self, X, freeze_genes=False):\n bp, dp, xi, eta, theta, beta = self._setup(X, freeze_genes,\n reinit=True)\n self.bp = bp\n self.dp = dp\n self.xi = xi\n self.eta = eta\n self.theta = theta\n self.beta = beta", "title": "" }, { "docid": "9414a6bcaeaaa10a9e4b444ef5b5f681", "score": "0.603875", "text": "def __init__(self, rff_dim=1, alpha=1.0, sigma=1.0):\n self.fitted = False\n self.rff_dim = rff_dim\n self.sigma = sigma\n self.lm = Ridge(alpha=alpha)\n self.b_ = None\n self.W_ = None", "title": "" }, { "docid": "9dea1bbe83690606f82b81e1f1e04629", "score": "0.60377675", "text": "def __init_bootstrap_kernel(self):\r\n ctype_indices = dtype_to_ctype(self.dtype_indices)\r\n self.bootstrap_fill= mk_kernel((ctype_indices,), \"bootstrap_fill\",\r\n \"bootstrap_fill.cu\")\r\n self.bootstrap_reshuffle, tex_ref = mk_tex_kernel((ctype_indices, 128), \"bootstrap_reshuffle\",\r\n \"tex_mark\", \"bootstrap_reshuffle.cu\")\r\n \r\n self.bootstrap_fill.prepare(\"PPii\")\r\n self.bootstrap_reshuffle.prepare(\"PPPi\")\r\n self.mark_table.bind_to_texref_ext(tex_ref)", "title": "" }, { "docid": "3564b54525e5c08df76a1fc477ff8d95", "score": "0.60367686", "text": "def bootstrap(new_x, new_t, new_y, new_df, t1, model, name='s_learner'):\n tmp_1, tmp_2, tmp_3 = list(), list(), list()\n n = [*range(len(new_df))]\n for b in range(50):\n n_b = np.random.choice(n, len(n))\n df_b = new_df.loc[n_b]\n df_b = df_b.loc[:, (df_b != 0).any(axis=0)]\n t_b = new_t.take(n_b)\n y_b = new_y.take(n_b)\n x_b = new_x[n_b]\n if name == 's_learner':\n tmp_2.append(s_learner(x_b, t_b, y_b, model_name='Lasso'))\n tmp_3.append(s_learner(x_b, t_b, y_b, model_name='Ridge'))\n if name == 't_learner':\n tmp_2.append(T_learner(x_b, y_b, df_b, t1, model_name='Lasso'))\n tmp_3.append(T_learner(x_b, y_b, df_b, t1, model_name='Ridge'))\n if name == 'IPW':\n propensity_score_b = propensity_score_bootstrap(x_b, model)\n tmp_1.append(IPW_bootstrap(propensity_score_b, t_b, y_b))\n return tmp_1, tmp_2, tmp_3", "title": "" }, { "docid": "5fc8fc69439ceddfa58d1b35172d1346", "score": "0.60220194", "text": "def __init__(self, fit_bias=True):\n self.weight = None\n self.fit_bias = fit_bias", "title": "" }, { "docid": "5fc8fc69439ceddfa58d1b35172d1346", "score": "0.60220194", "text": "def __init__(self, fit_bias=True):\n self.weight = None\n self.fit_bias = fit_bias", "title": "" }, { "docid": "5c52ca4e68afdf80d31db4a1a4178730", "score": "0.60132176", "text": "def __init__(self, in_features, out_features):\n \n ########################\n # PUT YOUR CODE HERE #\n #######################\n self.params = {'weight': None, 'bias': None}\n self.grads = {'weight': None, 'bias': None}\n mu = 0\n sigma = 0.0001\n self.params['weight'] = np.random.normal(mu, sigma, (out_features, in_features))\n self.params['bias'] = np.zeros((out_features,1))\n self.grads['weight'] = np.zeros((out_features, in_features))\n self.grads['bias'] = np.zeros((out_features,1))\n ########################\n # END OF YOUR CODE #\n #######################", "title": "" }, { "docid": "85fa63d8fad5a7681470a803775a78bd", "score": "0.5979155", "text": "def __init__(self, coefficients, bias):\n self.coefficients = list(np.ravel(coefficients))\n self.bias = bias", "title": "" }, { "docid": "68521fbb2b6d6381687d866fb03168df", "score": "0.5975265", "text": "def __init__(self, layers):\n self.weights, self.biases = weightInit.RandomGaussian().init_w_and_b(layers)\n self.layers = layers", "title": "" }, { "docid": "2c2611e13868e8ee5bda2d9499f3b7fb", "score": "0.5957557", "text": "def __init__(self, beta: float = 1.0, **kwargs):\n super(swish, self).__init__(**kwargs)\n # self.beta = self.add_weight(shape=None, dtype=self.dtype, trainable=trainable)\n # self.set_weights([np.array(beta)])\n self.beta = float(beta)", "title": "" }, { "docid": "76729805409c13debe78573d269d5021", "score": "0.59420806", "text": "def __init__(self, W=np.array(0, dtype=float), b=0, epoch=1000, learn_rate=0.5):\n self.w_ = W\n self.b_ = b\n self.e_ = epoch\n self.l_ = learn_rate\n np.random.seed(143)", "title": "" }, { "docid": "0ac50ba4c309ada66b29c26aa4e59179", "score": "0.59287727", "text": "def bootstrap():\n bootstrap.bootstrap()", "title": "" }, { "docid": "d5d43322ccfce489ef225669a8460e9e", "score": "0.59244454", "text": "def _setup(self):\n # delayed initialization state\n self.bootstrap_state = 3\n # (should be a customizable param.) - number of examples in 1st batch\n self.bootstrap_count = 1024", "title": "" }, { "docid": "6d33e2d400ccbcc62664db0ecdcc0147", "score": "0.5923605", "text": "def __init__(self, x, y):\n\n self.x = x\n self.y = y\n self.beta = np.zeros((np.shape(x)[1], 1))", "title": "" }, { "docid": "6279c5004ddcd28eb03d554bd63c249f", "score": "0.5920065", "text": "def bootstrap(self):\n pass", "title": "" }, { "docid": "246e5b57a831bbb904e7bd82302d9121", "score": "0.59190065", "text": "def __init__(self, fit_intercept: bool = True, normalize: bool = False, **kwargs):\n self.fit_intercept = fit_intercept\n self.normalize = normalize\n self.kwargs = kwargs\n super().__init__(\n regressor=LinearRegression(fit_intercept=self.fit_intercept, normalize=self.normalize, **self.kwargs)\n )", "title": "" }, { "docid": "246e5b57a831bbb904e7bd82302d9121", "score": "0.59190065", "text": "def __init__(self, fit_intercept: bool = True, normalize: bool = False, **kwargs):\n self.fit_intercept = fit_intercept\n self.normalize = normalize\n self.kwargs = kwargs\n super().__init__(\n regressor=LinearRegression(fit_intercept=self.fit_intercept, normalize=self.normalize, **self.kwargs)\n )", "title": "" }, { "docid": "e6e3eba9e5e34819fba3da128c94549f", "score": "0.59178376", "text": "def __init__(self, num_features, learning_rate=0.05):\n\n self.beta = zeros(num_features)\n self.learning_rate = learning_rate", "title": "" }, { "docid": "f414d6019039890987907f42078acab7", "score": "0.5916264", "text": "def __init__(self, min_func, model_class, parameters, X, y):\r\n self.error = min_func\r\n self.model_class = model_class\r\n self.parameters = parameters\r\n self.X = X\r\n self.y = y", "title": "" }, { "docid": "68fd9bf9abd45e06bbd95911c95b7604", "score": "0.5911269", "text": "def __init__(self):\n self.feature_labels_ = None\n self.model = RandomForestRegressor(n_estimators=100)", "title": "" }, { "docid": "6257b8e0307117192122fe791cf44b55", "score": "0.5909974", "text": "def __init__(self):\n super().__init__()\n import sklearn\n import sklearn.linear_model\n self.model = sklearn.linear_model.LarsCV", "title": "" }, { "docid": "2bb1a87d023233e698cc94a7783ce4d6", "score": "0.5908077", "text": "def SKLearnBootstrap(X_train, y_train, reg, N_bs, test_percent=0.4,\n X_test=None, y_test=None, shuffle=False):\n\n # Checks if we have provided test data or not\n if ((isinstance(X_test, type(None))) and\n (isinstance(y_test, type(None)))):\n\n # Splits X data and design matrix data\n X_train, X_test, y_train, y_test = \\\n sk_modsel.train_test_split(X_train, y_train,\n test_size=test_percent,\n shuffle=shuffle)\n\n # Storage containers for results\n y_pred_array = np.empty((y_test.shape[0], N_bs))\n r2_array = np.empty(N_bs)\n mse_array = np.empty(N_bs)\n\n beta_coefs = []\n\n # for i_bs, val_ in enumerate(bs):\n for i_bs in tqdm(range(N_bs), desc=\"SKLearnBootstrap\"):\n X_boot, y_boot = sk_utils.resample(X_train, y_train)\n # X_boot, y_boot = X_train[train_index], y_train[train_index]\n\n reg.fit(X_boot, y_boot)\n y_predict = reg.predict(X_test)\n y_pred_array[:, i_bs] = y_predict.ravel()\n\n r2_array[i_bs] = sk_metrics.r2_score(y_test, y_predict)\n # r2_array[i_bs] = metrics.r2(y_test, y_predict)\n # mse_array[i_bs] = sk_metrics.mean_squared_error(y_test, y_predict)\n\n beta_coefs.append(reg.coef_)\n\n # R^2 score, 1 - sum(y-y_approx)/sum(y-mean(y))\n r2 = np.mean(r2_array)\n\n # # Mean Square Error, mean((y - y_approx)**2)\n # _mse = np.mean((y_test.ravel() - y_pred_list)**2,\n # axis=0, keepdims=True)\n # mse = np.mean(mse_array)\n _mse = np.mean((y_test - y_pred_array)**2,\n axis=1, keepdims=True)\n mse = np.mean(_mse)\n\n # Bias, (y - mean(y_approx))^2\n _y_pred_mean = np.mean(y_pred_array, axis=1, keepdims=True)\n bias = np.mean((y_test - _y_pred_mean)**2)\n\n # Variance, var(y_approx)\n var = np.mean(np.var(y_pred_array, axis=1, keepdims=True))\n\n beta_coefs = np.asarray(beta_coefs)\n\n coef_var = np.asarray(beta_coefs).var(axis=0)\n coef_ = np.asarray(beta_coefs).mean(axis=0)\n\n X_pred_test = X_test\n y_pred = y_pred_array.mean(axis=1)\n y_pred_var = y_pred_array.var(axis=1)\n\n return {\n \"r2\": r2, \"mse\": mse, \"bias\": bias,\n \"var\": var, \"diff\": mse - bias - var,\n \"coef\": coef_, \"coef_var\": coef_var, \"x_pred\": X_test[:, 1],\n \"y_pred\": y_pred, \"y_pred_var\": y_pred_var}", "title": "" }, { "docid": "0103d832dda689a7005989a5a147658f", "score": "0.5902127", "text": "def __init__(\n self,\n seq_len: int,\n alphabet: str,\n r_squared_threshold: float = 0.5,\n models: Optional[List[flexs.Model]] = None,\n ):\n super().__init__(name=\"DynaPPOEnsemble\")\n\n if models is None:\n models = [\n # FLEXS models\n baselines.models.GlobalEpistasisModel(seq_len, 100, alphabet),\n baselines.models.MLP(seq_len, 200, alphabet),\n baselines.models.CNN(seq_len, 32, 100, alphabet),\n # Sklearn models\n baselines.models.LinearRegression(alphabet),\n baselines.models.RandomForest(alphabet),\n baselines.models.SklearnRegressor(\n sklearn.neighbors.KNeighborsRegressor(),\n alphabet,\n \"nearest_neighbors\",\n ),\n baselines.models.SklearnRegressor(\n sklearn.linear_model.Lasso(), alphabet, \"lasso\"\n ),\n baselines.models.SklearnRegressor(\n sklearn.linear_model.BayesianRidge(),\n alphabet,\n \"bayesian_ridge\",\n ),\n baselines.models.SklearnRegressor(\n sklearn.gaussian_process.GaussianProcessRegressor(),\n alphabet,\n \"gaussian_process\",\n ),\n baselines.models.SklearnRegressor(\n sklearn.ensemble.GradientBoostingRegressor(),\n alphabet,\n \"gradient_boosting\",\n ),\n baselines.models.SklearnRegressor(\n sklearn.tree.ExtraTreeRegressor(), alphabet, \"extra_trees\"\n ),\n ]\n\n self.models = models\n self.r_squared_vals = np.ones(len(self.models))\n self.r_squared_threshold = r_squared_threshold", "title": "" }, { "docid": "5a502e660224691c096072c5175f9e59", "score": "0.58921844", "text": "def initialize(self):\n self.securities = tickers\n\n self.sids = [self.symbol(security) for security in self.securities]\n\n # there needs to be enough data points to make a good model\n self.data_points = 100\n\n # amount of prior bars to study\n self.window_length = 50\n\n # Use a random forest regressor\n self.mdl = RandomForestRegressor()\n\n # stores recent prices\n self.recent_prices = OrderedDict()\n\n for security in self.securities:\n self.recent_prices[security] = []\n\n # initialise the model\n self.imp = Imputer(missing_values='NaN', strategy='mean', axis=0)\n\n self.set_commission(commission.PerShare(cost=0.013, min_trade_cost=1.3))", "title": "" }, { "docid": "9b2a4209b078556ca7999faaa4b9dfbd", "score": "0.58337677", "text": "def __init__(self, \r\n n_estimators = 10, \r\n max_features = None, \r\n min_samples_split = 1, \r\n bootstrap = True, \r\n verbose = False, \r\n debug = False): \r\n self.max_features = max_features\r\n self.min_samples_split = min_samples_split\r\n self.bootstrap = bootstrap\r\n self.verbose = verbose\r\n self.n_estimators = n_estimators\r\n self.debug = debug\r\n self._trees = list()", "title": "" }, { "docid": "242d4a1a1419f4625e3cc64bdfa546b5", "score": "0.5826027", "text": "def __init__(self, lam):\r\n self.weights_ = None\r\n self.lambda_ = lam", "title": "" }, { "docid": "204b16c9aeebdf82a2e9e038dd059ca3", "score": "0.5824278", "text": "def __init__(self,\n phoenix_spec,\n alpha=0.05,\n degree=3,\n n_mono=5,\n min_for_regression=3,\n num_random_samples=10000,\n seed=None):\n # Spec of phoenix\n self._phoenix_spec = phoenix_spec\n # The alpha of lasso solver (please read on lasso solver to understand this\n # constant. In a nutshell, this control regularization for lasso. alpha\n # equal zero means regular linear regression - however, try not to use\n # alpha = 0 because of numerical stability of the lasso implementation with\n # that value.\n self._alpha = alpha\n # The degree of the polynomial we try to fit the data with.\n self._degree = degree\n # The number of monomials in the function that fits the data. I.e., we take\n # the n_mono highest coefficients in the function and remove all the rest.\n self._n_mono = n_mono\n # Seed for np.random.randint\n self._seed = seed\n self._num_params = self._phoenix_spec.minimum_depth * len(\n self._phoenix_spec.blocks_to_use)\n # Blocktype to index -- needed to translate block type to a one-hot vector.\n self._block_indices = common.block_indices(phoenix_spec)\n # Minimal amount of data point before applying regression.\n self._min_for_regression = min_for_regression\n # Must be greater than 1.\n self._num_random_samples = num_random_samples\n assert self._num_random_samples > 1", "title": "" }, { "docid": "84ebeaf94914754e7c49046678116476", "score": "0.58224523", "text": "def __init__(self, nbins=10, metric=\"Accuracy\", models=[], targetcols=[]):\n self._nbins = nbins\n self._models = models\n self._targetcols = targetcols\n self.w_ = pd.DataFrame()\n self.threshold = 0.5\n self._metric = metric\n self.pred_df = pd.DataFrame()", "title": "" }, { "docid": "b081d0440fd1d9efc9cf1af524533b2a", "score": "0.57963395", "text": "def __init__(self,\n name,\n input_size,\n output_size,\n with_bias=True,\n w_init=initializers.GlorotUniform(),\n b_init=initializers.Zeros()):\n self._input_size = input_size\n self._output_size = output_size\n self._with_bias = with_bias\n self._w_init = w_init\n self._b_init = b_init\n super(Linear, self).__init__(name)", "title": "" }, { "docid": "f091eb1877650b01705ed7488b4a2f3e", "score": "0.5792195", "text": "def __init__(self, uniform_features, regularization=5.):\n self.uniform_features = uniform_features\n # real matrix and vector will be computed during fitting\n self.A = None\n self.A_t = None\n self.w = None\n HessianLossFunction.__init__(self, regularization=regularization)", "title": "" }, { "docid": "bfdeaefc7ec03936fe7c022df45c3e76", "score": "0.5778312", "text": "def __init__(self):\n self.scaler = MinMaxScaler()\n self.output_feature = \"quality\"\n self.model_name_GD = model_name_GD\n self.model_name_OLS = model_name_OLS", "title": "" }, { "docid": "ac912f848d1de1d240afea06fb3abde8", "score": "0.5777511", "text": "def _init_model(X, y):\n with pm.Model() as model:\n # Define hyper-prior\n alpha = pm.Gamma(\"alpha\", alpha=1e-2, beta=1e-4)\n\n # Define priors'\n w = pm.Normal(\"w\", mu=0, sd=alpha, shape=X.get_value().shape[1])\n sigma = pm.HalfCauchy(\"sigma\", beta=10)\n mu = tt.dot(w, X.T)\n\n # Define likelihood\n likelihood = pm.StudentT(\"y\", nu=1, mu=mu, lam=sigma, observed=y)\n return model", "title": "" }, { "docid": "0e6a48311e3b6fd8ec49f70dd0ba8bf7", "score": "0.5775099", "text": "def __init__(self, lambda_=1, fit_bias=True):\n self.weight = None\n self.lambda_ = lambda_\n self.fit_bias = fit_bias", "title": "" }, { "docid": "84f12b3bf7cb9b604d9767c83d5502b1", "score": "0.5770319", "text": "def __init__(self, lam, mu, nr_arr, seed=None):\n\n # todo: consider to remove seed\n self.lam = lam # arrivals per hour\n self.mu = mu # departures per hour\n self.nr_arr = nr_arr # nr of customers\n\n np.random.seed(seed)", "title": "" }, { "docid": "70cd2dd98bd0ad6d124b78ef60321062", "score": "0.57583326", "text": "def __init__(self,\n model_dir=None,\n label_dimension=1,\n weight_column_name=None,\n config=None,\n feature_engineering_fn=None):\n\n params = {\n \"head\":\n head_lib.regression_head(\n weight_column_name=weight_column_name,\n label_dimension=label_dimension,\n enable_centered_bias=True)\n }\n\n super(DebugRegressor, self).__init__(\n model_fn=debug_model_fn,\n model_dir=model_dir,\n config=config,\n params=params,\n feature_engineering_fn=feature_engineering_fn)", "title": "" }, { "docid": "a644acaa11246c134ad4e6ea650dec41", "score": "0.57529885", "text": "def __init__(self, beta=1.0, reduction='mean', loss_weight=1.0):\n super(CustomSmoothL1Loss, self).__init__()\n self.beta = beta\n self.reduction = reduction\n self.loss_weight = loss_weight", "title": "" }, { "docid": "4784f60eb06f9647ac62fea635982beb", "score": "0.57472897", "text": "def __init__(self):\n\n\t\tself.training_inputs = np.array([[0,0,1],\n\t\t\t\t\t\t\t \t\t\t [1,1,1],\n\t\t\t\t\t\t\t \t\t \t [1,0,1],\n\t\t\t\t\t\t\t \t\t\t [0,1,1]])\n\n\t\tself.training_outputs = np.array([[0,1,1,0]]).T\n\n\t\tnp.random.seed(1)\n\t\tself.weights = 2 * np.random.random((3,1)) - 1\t# - 1 to 1 \n\n\t\tprint(\"Random Synaptic Weights:\")\n\t\tprint(self.weights)", "title": "" }, { "docid": "5aa0e5ed7945657fc469b7995ba11349", "score": "0.5741899", "text": "def __init__(self, data, labels, num_input):\n self.data = data\n self.labels = labels\n self.num_input = num_input\n\n # Weights and biases\n self.w = tfe.Variable(np.random.rand(1,\n self.num_input).astype(np.float32))\n self.b = tfe.Variable(np.random.rand(1, 1).astype(np.float32))", "title": "" }, { "docid": "5e798452937304585b6d5740cec1c9a2", "score": "0.5737281", "text": "def __init__(self, in_features, out_features):\n \n ########################\n # PUT YOUR CODE HERE #\n #######################\n self.params = {'weight': None, 'bias': None}\n self.grads = {'weight': None, 'bias': None}\n #raise NotImplementedError\n\n #Mean 0, std = 0.0001 and W was dl x dl-1 dimension and bias dl dimension\n size_weight = (out_features,in_features)\n size_bias = (out_features,1)\n self.params['weight'] = np.random.normal(loc = 0,scale = 0.0001,size = size_weight)\n self.params['bias'] = np.zeros(size_bias)\n self.grads['weight'] = np.zeros(size_weight)\n self.grads['bias'] = np.zeros(size_bias)\n\n ########################\n # END OF YOUR CODE #\n #######################", "title": "" }, { "docid": "ed0cc07bb812ba1fd03eb6c019df0d92", "score": "0.57362074", "text": "def __init__(self, bias = 0):\n self.weights = []\n self.bias = bias", "title": "" }, { "docid": "02c82ef3c621bb1ae7a74f433f46d679", "score": "0.57294047", "text": "def __init__(self, cost, gradient, predict_func, \n alpha=0.01,\n num_iterations=10000):\n # Initialize coefficients in fit method once you know how many features\n # you have.\n self.coeffs = None\n self.cost = cost\n self.gradient = gradient\n self.predict_func = predict_func\n self.alpha = alpha\n self.num_iterations = num_iterations", "title": "" }, { "docid": "b88a21dd685baaff68b6fa1e6875252c", "score": "0.57290465", "text": "def __init__(self, X, y, lamb, rho):\n\n self.y = y\n self.X = np.insert(X, 0, 1., axis=1)\n self.lamb, self.rho = lamb, rho\n self.beta = np.array([self.y[0] / x for x in self.X[0]]).reshape(len(self.X[0]), 1)\n self.theta = self.beta[:]\n self.mu = np.zeros((len(self.X[0]), 1), dtype='float64')\n self.num_data = len(self.y)\n self.num_parameters = len(self.X[0])", "title": "" }, { "docid": "ee4311eb5906deb0d6092a65302ec556", "score": "0.57068825", "text": "def __init__(self, name, hparams, optimizer='RMS'):\n\n self.name = name\n self.hparams = hparams\n self.optimizer_n = optimizer\n\n self.training_freq = hparams.training_freq\n self.training_epochs = hparams.training_epochs\n self.t = 0\n\n self.q = hparams.q\n self.p = hparams.p\n\n self.datasets = [\n ContextualDataset(hparams.context_dim,\n hparams.num_actions,\n hparams.buffer_s)\n for _ in range(self.q)\n ]\n\n self.bnn_boot = [\n NeuralBanditModel(optimizer, hparams, '{}-{}-bnn'.format(name, i))\n for i in range(self.q)\n ]", "title": "" }, { "docid": "30039f3bcf6b586fa239e19b65cbc990", "score": "0.56955135", "text": "def __init__(self, epsilons, v_0, w_0, a_0, b_0, basis, RBF_func = None):\n\tself.alpha_const = 1.0\n\tself.delta = 0.05\n\tself.dim = shape(v_0)[0]\n\tself.size = size(epsilons)\n\tself.linreg_list = []\n\tfor epsilon in epsilons:\n\t self.linreg_list.append(BayesLinModel(v_0, w_0, a_0, b_0, epsilon, basis, RBF_func))", "title": "" }, { "docid": "5f8cc3e285469f79976284b26e5d1bc3", "score": "0.56916004", "text": "def __init__(self, input_units, output_units, learning_rate=0.1):\n self.learning_rate = learning_rate\n self.weights = np.random.normal(loc=0.0,\n scale=np.sqrt(2 / (input_units + output_units)),\n size=(input_units, output_units))\n self.biases = np.zeros(output_units)", "title": "" }, { "docid": "79359667c926c9d440e34f73ca912132", "score": "0.56850445", "text": "def __init__(self, num_features, mu, step=lambda x: 0.05):\n\n self.beta = zeros(num_features)\n self.mu = mu\n self.step = step\n self.last_update = {}\n self.last_error = 0\n\n assert self.mu >= 0, \"Regularization parameter must be non-negative\"", "title": "" }, { "docid": "68364b12d8247ae2d6b86053f215e4c1", "score": "0.56841844", "text": "def __init__(self):\n self.gnb = GaussianNB()", "title": "" }, { "docid": "27e0bef86773bddfd4793fb8dea3b65c", "score": "0.56838477", "text": "def __init__(self, lmb):\n\n self.x = None\n self.y = None\n self.coeff = None\n self.lmb = lmb", "title": "" }, { "docid": "cb6d4d50935358522d66a9b049acc3e3", "score": "0.5683454", "text": "def __init__(self,**kwargs):\n LGBMClassifier.__init__(self,**kwargs)\n \n pass", "title": "" }, { "docid": "b0b78350008408b63295555621950891", "score": "0.5668307", "text": "def initialize(self):\n \n ARCPY.SetProgressor(\"default\", (\"Starting to perform Spatial Error \"\n \"regression. Loading features...\"))\n\n #### Shorthand Attributes ####\n ssdo = self.ssdo\n\n #### MasterField Can Not Be The Dependent Variable ####\n if ssdo.masterField == self.depVarName:\n ARCPY.AddIDMessage(\"ERROR\", 945, ssdo.masterField, \n ARCPY.GetIDMessage(84112))\n raise SystemExit()\n\n #### Remove the MasterField from Independent Vars #### \n if ssdo.masterField in self.indVarNames:\n self.indVarNames.remove(ssdo.masterField)\n ARCPY.AddIDMessage(\"Warning\", 736, ssdo.masterField)\n\n #### Remove the Dependent Variable from Independent Vars ####\n if self.depVarName in self.indVarNames:\n self.indVarNames.remove(self.depVarName)\n ARCPY.AddIDMessage(\"Warning\", 850, self.depVarName)\n\n #### Raise Error If No Independent Vars ####\n if not len(self.indVarNames):\n ARCPY.AddIDMessage(\"Error\", 737)\n raise SystemExit()\n\n #### Create Dependent Variable ####\n self.allVars = [self.depVarName] + self.indVarNames\n self.y = ssdo.fields[self.depVarName].returnDouble()\n self.n = ssdo.numObs\n self.y.shape = (self.n, 1)\n\n #### Assure that Variance is Larger than Zero ####\n yVar = NUM.var(self.y)\n if NUM.isnan(yVar) or yVar <= 0.0:\n ARCPY.AddIDMessage(\"Error\", 906)\n raise SystemExit()\n\n #### Create Design Matrix ####\n self.k = len(self.indVarNames) + 1\n self.x = NUM.ones((self.n, self.k - 1), dtype = float)\n for column, variable in enumerate(self.indVarNames):\n self.x[:,column] = ssdo.fields[variable].data\n\n #### Set Weights Info ####\n self.w = self.patW.w\n self.wName = self.patW.wName", "title": "" }, { "docid": "5b7313b233f319800585c140d4b95ba2", "score": "0.56633425", "text": "def __init__(self, X, Y, lam=None):\n self.logger = logging.getLogger(__name__)\n #self.logger.setLevel(logging.INFO)\n\n self.X = X\n self.Y = Y\n self.lam = lam\n \n self.lin_clf = None", "title": "" }, { "docid": "3a8b500237648e26bc478fe143966112", "score": "0.56567293", "text": "def __init__(\n self,\n data: RegressionData,\n kernel: Kernel,\n inducing_variable: InducingPointsLike,\n *,\n mean_function: Optional[MeanFunction] = None,\n num_latent_gps: Optional[int] = None,\n noise_variance: Optional[TensorData] = None,\n likelihood: Optional[Gaussian] = None,\n ):\n assert (noise_variance is None) or (\n likelihood is None\n ), \"Cannot set both `noise_variance` and `likelihood`.\"\n if likelihood is None:\n if noise_variance is None:\n noise_variance = 1.0\n likelihood = Gaussian(noise_variance)\n X_data, Y_data = data_input_to_tensor(data)\n num_latent_gps = Y_data.shape[-1] if num_latent_gps is None else num_latent_gps\n super().__init__(kernel, likelihood, mean_function, num_latent_gps=num_latent_gps)\n\n self.data = X_data, Y_data\n self.num_data = X_data.shape[0]\n\n self.inducing_variable: InducingPoints = inducingpoint_wrapper(inducing_variable)", "title": "" }, { "docid": "fc78e6b28da3411e8ae2c96beb3c3567", "score": "0.56416947", "text": "def __init__(self, bias):\n self.bias = bias\n self.weights = []", "title": "" }, { "docid": "316fdd59862f2d6f8edd91e01049b0c9", "score": "0.5640264", "text": "def __init__(self, X_init, Y_init, l=1, sigma_f=1):\n self.X = X_init\n self.Y = Y_init\n self.l = l\n self.sigma_f = sigma_f\n self.K = self.kernel(X_init, X_init)", "title": "" }, { "docid": "316fdd59862f2d6f8edd91e01049b0c9", "score": "0.5640264", "text": "def __init__(self, X_init, Y_init, l=1, sigma_f=1):\n self.X = X_init\n self.Y = Y_init\n self.l = l\n self.sigma_f = sigma_f\n self.K = self.kernel(X_init, X_init)", "title": "" }, { "docid": "8b8f6cc949c86f49ce277ffd55285fe7", "score": "0.56263447", "text": "def __init__(self, num_features, mu, step=lambda x: 0.05):\n \n self.beta = zeros(num_features)\n self.mu = mu\n self.step = step\n self.last_update = defaultdict(int)\n\n assert self.mu >= 0, \"Regularization parameter must be non-negative\"", "title": "" }, { "docid": "578b610ad8f8862701bf3f7a2140bd2f", "score": "0.5625727", "text": "def setUp(self):\n\n self.hyperp = {'regularization_strength': 1.,\n 'init_learning_rate': 0.1,\n 'decay': 0.,\n 'print_log': False,\n 'warm_start': False}\n self.init_param = 1.\n self.X = np.array([[1.]]).astype(np.float32)\n self.Y = np.array([0.]).astype(np.float32)", "title": "" }, { "docid": "f7ff361afab4c34f5282f4443c432b33", "score": "0.5623827", "text": "def __init__(self, kernel):\n # if the hard constrained SVM is feasible\n self.undefined = True\n # primal variables\n self.lambdas = None\n self.lambda_zero = None\n # dual variables\n self.multipliers = None\n # kernel function\n self.kernel = kernel", "title": "" }, { "docid": "a9cf8906894df09a898ed28dd4f2ac0c", "score": "0.56156856", "text": "def __init__(self, *args):\n this = _ndlml.new_biasKern(*args)\n try: self.this.append(this)\n except: self.this = this", "title": "" }, { "docid": "6d239cb713ca753143cd0fe878fc9392", "score": "0.56118894", "text": "def __init__(self, model_name=None, model_class=None, model_params=None, dataset=None):\n self.model_params = model_params or dict()\n\n # Instantiate a sklearn estimator\n if model_name:\n self.estimator = MODELS[model_name](**self.model_params)\n elif model_class:\n self.estimator = model_class(**self.model_params)\n\n # Assign dataset for reference\n self.dataset = dataset\n\n # Assign data attributes\n self.X_train, self.y_train, self.X_test, self.y_test = self.dataset.split_data()\n\n # Fit the training data to the model\n self.fit()", "title": "" }, { "docid": "c390f0e6236783bf9913d0bb48ae9fe2", "score": "0.5611632", "text": "def __init__(self, X, y, sample_weights, class_weights):\n self.name = \"SVM\"\n self.clf = self.train(X, y, sample_weights, class_weights)", "title": "" }, { "docid": "6d8bf97b4c4d7d325ef70cb7694996b6", "score": "0.56095713", "text": "def __init__(self):\n super(LogLinearModel, self).__init__()\n self.unary_features = dict()\n self.unary_feature_weights = dict()\n self.edge_features = dict()\n self.num_features = dict()\n self.num_edge_features = dict()\n\n # matrix mode placeholders\n self.weight_dim = None\n self.max_states = None\n self.max_unary_features = None\n self.max_edge_features = None\n self.unary_weight_mat = None\n self.edge_weight_mat = None\n self.unary_feature_mat = None\n self.edge_feature_mat = None", "title": "" }, { "docid": "8f7dca31e08bf21b3d4ab40e3b1c46a6", "score": "0.5603662", "text": "def __init__(self, use_model_flag=..., is_pqr=...) -> None:\n ...", "title": "" }, { "docid": "8463c44e84397572e3c47f2759a6e059", "score": "0.55896866", "text": "def __init__(self, num_features, lam, eta=lambda x: 0.1):\n \n self.w = zeros(num_features) #create a list of zeros w/the number of features\n self.lam = lam #regularization parameter \n self.eta = eta #set to 0.1\n self.last_update = defaultdict(int)\n\n assert self.lam>= 0, \"Regularization parameter must be non-negative\"", "title": "" }, { "docid": "72ec1380d26433008a4089d744d983cd", "score": "0.5589325", "text": "def __init__(self, data, dep_var=None, indep_vars=None):\n if not isinstance(data, pd.DataFrame):\n print('Parameter \\'data\\' must be a pandas Dataframe of input data.')\n return\n if dep_var is None or indep_vars is None:\n print('Provide parameters: dep_var and indep_vars.')\n return\n if not isinstance(dep_var, str):\n print('Parameter \\'dep_var\\' must be a string.')\n return\n indep_vars_valid = False # default if not validated\n if isinstance(indep_vars, str):\n indep_vars = list(indep_vars)\n indep_vars_valid = True\n if isinstance(indep_vars, list):\n if len(indep_vars) >= 1:\n if all([isinstance(var, str) for var in indep_vars]):\n indep_vars_valid = True\n if not indep_vars_valid:\n print('Parameter \\'indep_vars\\' must be a string or a list of strings.')\n return\n\n # Build inputs to regression fn and run it:\n y = data[dep_var]\n x = data[indep_vars].copy()\n x = sm_api.add_constant(x)\n model = sm_api.OLS(endog=y, exog=x)\n fit = model.fit()\n self.statsmodels_object = fit\n\n # Scalar and naming attributes:\n self.indep_vars = indep_vars # as passed in\n self.dep_var = dep_var # as passed in\n self.nobs = fit.nobs\n self.sigma = fit.mse_resid\n self.r2 = fit.rsquared_adj\n\n # Make solution (indep vars) dataframe:\n df = pd.DataFrame({'Value': fit.params})\n df = df.join(pd.DataFrame({'Stdev': fit.bse})) # use join to enforce consistency\n df = df.join(pd.DataFrame({'Tvalue': fit.tvalues})) # \"\n df = df.join(pd.DataFrame({'PValue': fit.pvalues})) # \"\n df.index = ['Intercept' if x.lower() == 'const' else x for x in df.index]\n df['Name'] = df.index\n self.df_indep_vars = df.copy()\n\n # Make observation dataframe (rows in same order as in input dataframe):\n df = pd.DataFrame({'FittedValue': fit.fittedvalues})\n df['Residual'] = fit.resid\n self.df_observations = df.copy()", "title": "" }, { "docid": "6f38baf94371254d34bf787ae1fa2adb", "score": "0.55888855", "text": "def init_full_model(cls):\n coefs = np.array(\n [\n 77.64331966,\n -3.38357566,\n 0.0,\n 0.0,\n 9.48943144,\n -0.0,\n -0.0,\n -0.0,\n -0.0,\n 0.0,\n -172.1890952,\n -61.6947121,\n 5.22729452,\n 0.0,\n 0.0,\n 0.0,\n 0.0,\n -31.84889315,\n -0.0,\n -0.0,\n ]\n )\n\n mu = np.array(\n [\n 1.06823975e00,\n 7.73847312e-01,\n 1.07329200e00,\n 1.13981637e06,\n 6.34888946e02,\n -1.36155073e00,\n -1.72133649e00,\n -3.47084255e00,\n -1.22713574e00,\n 2.63323211e-01,\n -1.47651181e00,\n 3.73629665e01,\n 2.95566653e01,\n -1.36504373e-05,\n 1.07594232e00,\n -6.87024033e-05,\n 1.08010237e00,\n 1.67499372e-02,\n 1.73228530e-02,\n -3.04463017e-04,\n ]\n )\n\n sigma = np.array(\n [\n 1.41520865e-02,\n 1.95217699e-01,\n 1.59416725e-02,\n 7.23228725e06,\n 5.65537087e01,\n 2.58001513e-01,\n 2.70289386e-01,\n 4.51079117e-01,\n 6.71019154e-01,\n 2.05683149e-01,\n 2.25143929e-01,\n 1.71008400e00,\n 5.69541083e-01,\n 5.98941337e-05,\n 1.44721412e-02,\n 6.14265444e-05,\n 1.48826794e-02,\n 6.54248192e-04,\n 7.19974520e-04,\n 5.86172005e-04,\n ]\n )\n\n model = {\"coef_\": coefs}\n name = \"full_model\"\n\n date_string = datetime.datetime.now()\n\n trained_model = {\n \"model_type\": \"linear\",\n \"model\": model,\n \"confidence_bounds\": 0.1,\n \"regularization_type\": \"elasticnet\",\n \"timestamp\": date_string.isoformat(),\n \"dataset_id\": None,\n \"hyperparameters\": {},\n \"featureset_name\": \"full_model\",\n \"predicted_quantity\": \"cycle\",\n \"mu\": np.array(mu),\n \"sigma\": np.array(sigma),\n }\n\n return cls(name, trained_model)", "title": "" }, { "docid": "adf21ef12ec36b7fa73167381e6a9c61", "score": "0.55862087", "text": "def __init__(self):\n self.p_value_columns = [\n \"1-spacing\",\n \"2-spacing\",\n \"3-spacing\",\n \"ad\",\n \"ad_transform\",\n \"shapiro\",\n \"jb\",\n \"ddst\",\n ]\n self.pwr = importr(\"PoweR\")\n self.deepmodel = None", "title": "" }, { "docid": "9e6adf2465842986ba324f16de478d17", "score": "0.5577907", "text": "def __init__(self, beta):\n\n self.beta = beta", "title": "" }, { "docid": "ad77416a34c2a9fa8cb702f68efcfd48", "score": "0.5574336", "text": "def __init__(self, slope: float, y_intercept: float):\n self.__slope = slope\n self.__y_intercept = y_intercept\n self._perceptron = Perceptron().built_with(weights_amount=2, learning_rate=0.05)", "title": "" }, { "docid": "23c9901dd3fd4dc8ef0b0ce17ac58c81", "score": "0.55735195", "text": "def __init__(self,\n kernel=None,\n normalize=False,\n verbose=False,\n n_jobs=None,\n random_seed=default_random_seed_value,\n Nystroem=False):\n self.kernel = kernel\n self.normalize = normalize\n self.verbose = verbose\n self.n_jobs = n_jobs\n self.random_seed = random_seed\n self.Nystroem = Nystroem\n self.initialized_ = {\"kernel\": False,\n \"Nystroem\": False,\n \"n_jobs\": False}", "title": "" }, { "docid": "30a0a8cbcbe9642fe8ac919e75d6827c", "score": "0.5573398", "text": "def __init__(self, name, model_function, number_of_variables,\n low=-1, high=1):\n self.model_function = model_function\n self.number_of_variables = number_of_variables\n self.low = low\n self.high = high\n self.name = name", "title": "" }, { "docid": "31fd2ce218143bcf8355d9030205c1ff", "score": "0.5571936", "text": "def model_constructor(self):\n if self.model_name == 'chebyshev':\n self.model = models.Chebyshev1D(degree=self.degree)\n self.model_fit = fitting.LevMarLSQFitter()\n elif self.model_name == 'linear':\n self.model = models.Linear1D()\n self.model_fit = fitting.LinearLSQFitter()\n # return model", "title": "" }, { "docid": "ccd3248115a944c150e3cd6dc9edf09e", "score": "0.55612415", "text": "def __init__(self, methodname):\n super().__init__(methodname)\n\n self.data = jn.array([1.0, 2.0, 3.0, 4.0])\n\n self.W = objax.TrainVar(jn.array([[1., 2., 3., 4.],\n [5., 6., 7., 8.],\n [9., 0., 1., 2.]]))\n self.b = objax.TrainVar(jn.array([-1., 0., 1.]))\n\n # f_lin(x) = W*x + b\n @objax.Function.with_vars(objax.VarCollection({'w': self.W, 'b': self.b}))\n def f_lin(x):\n return jn.dot(self.W.value, x) + self.b.value\n\n self.f_lin = f_lin", "title": "" }, { "docid": "cb2a3340c123a6265116390f7fb477a6", "score": "0.5557222", "text": "def _initialize(self, X, resp):\n nk, xk, sk = _estimate_gaussian_parameters(\n X, resp, self.reg_covar, self.covariance_type\n )\n\n self._estimate_weights(nk)\n self._estimate_means(nk, xk)\n self._estimate_precisions(nk, xk, sk)", "title": "" }, { "docid": "e5d2060b666123eef7aee8bf9f79dbdf", "score": "0.5555457", "text": "def __init__(self, alphas, coefs):\n ##Vector of regularization parameters\n self.alphas = alphas\n ###Array of coefficients\n self.coefs = coefs", "title": "" }, { "docid": "8b33f6e31cb569bf4b9e50a485173b0b", "score": "0.55487466", "text": "def __init__(self, theta_dim, mu_scale, scale_scale):\n\n self.theta_dim = theta_dim\n self.prior_mu = stats.multivariate_normal(np.zeros(self.theta_dim), mu_scale * np.eye(self.theta_dim))\n self.prior_scale = stats.uniform(0, scale_scale)", "title": "" }, { "docid": "0510332e9b37ced1f51067b86438e8d2", "score": "0.5547534", "text": "def __init__(self, alpha=0.1, warm_start=True, verbose_training=False):\n\n # Arguments\n self.alpha = alpha\n self.verbose_training = verbose_training\n\n self.model = Lasso(alpha=alpha, fit_intercept=True, warm_start=warm_start)\n\n # set the default plotting color\n self.set_plotting_color()", "title": "" }, { "docid": "6ef54bf8ecccf56d18cb9fbf054fea05", "score": "0.55440843", "text": "def __init__(self, base_kernels):\n super().__init__()\n self.base_kernels = base_kernels", "title": "" }, { "docid": "74dccae96d6b0b62c65dfde8fc7543d9", "score": "0.5540759", "text": "def __init__(self, params, lr=0.001):\n super().__init__(params)\n self.lr = lr", "title": "" }, { "docid": "74dccae96d6b0b62c65dfde8fc7543d9", "score": "0.5540759", "text": "def __init__(self, params, lr=0.001):\n super().__init__(params)\n self.lr = lr", "title": "" }, { "docid": "3e98dbae757cac9dc3d2ad0ddc7c966a", "score": "0.5538926", "text": "def __init__(self, num_inputs, error_fn):\n self.num_inputs = num_inputs\n self.weights = np.random.uniform(-0.5, 0.5, (num_inputs))\n self.error = error_fn", "title": "" }, { "docid": "cea57c80e998654d658e1b861738f663", "score": "0.5537743", "text": "def _create_model(self, X, Y):\n self.X = X\n self.Y = Y\n self.model = ExtraTreesRegressor(bootstrap = self.bootstrap,\n criterion = self.criterion,\n max_depth = self.max_depth,\n max_features = self.max_features,\n max_leaf_nodes = self.max_leaf_nodes,\n min_samples_leaf = self.min_samples_leaf,\n min_samples_split = self.min_samples_split,\n min_weight_fraction_leaf = self.min_weight_fraction_leaf,\n n_estimators = self.n_estimators,\n n_jobs = self.n_jobs,\n oob_score = self.oob_score,\n random_state = self.random_state,\n verbose = self.verbose,\n warm_start = self.warm_start,\n min_variance = self.min_variance)\n\n self.model.fit(X,Y.flatten())", "title": "" }, { "docid": "329878adab047d5909b509ac7c3a8382", "score": "0.55309415", "text": "def __init__(self, active = False):\n # Define the standardscaler object and active atribute:\n self.standardscaler = StandardScaler()\n self.active = active", "title": "" }, { "docid": "7ffe4c804d195dcac4eec3279a22f844", "score": "0.55290276", "text": "def __init__(self, d_model=512):\n self.d_model = d_model", "title": "" }, { "docid": "ec150cf78c0774d0ac11ee44afa2d28d", "score": "0.5512845", "text": "def init_data(self):\n X_init, X_sens_init, y_init = self._data_generator.sample(None, None, None)\n\n self._fit_clf(X_init, X_sens_init, y_init)\n y_hat_init = self._clf.predict(X_init, X_sens_init)\n\n self.true_states.append(X_init, X_sens_init, y_init, y_hat_init)\n self.baseline_states.append(X_init, X_sens_init, y_init, y_hat_init)", "title": "" }, { "docid": "2caf76fa2ecb9b29fdfdfc256fd2c4a9", "score": "0.5511243", "text": "def __init__(self):\n self.M = np.zeros((6, 6))\n self.C = np.zeros((6, 6))\n self.D = np.zeros((6, 6))\n self.D_coeff = np.zeros((6, 6))\n self.g = np.zeros((6, 1))\n self.buoyancy = 0.0\n self.cob = np.zeros((3, 1))\n self.origin = np.zeros((3, 1))\n\n self.load_static_parameters()", "title": "" }, { "docid": "d2259c00abe0d8ec39811f14c0bb4ce5", "score": "0.55099046", "text": "def _init_fit(self, X, y):\n now = datetime.datetime.now()\n self.start_fit_ = time.time()\n\n print(\"\\n\")\n print(\"=\"*40)\n print(f\" Estimator: {self._estimator.__class__.__name__}\")\n print(\"-\"*40)\n print(\" \", end=\"\")\n print(now.strftime(\"%Y-%m-%d %H:%M:%S\"))\n print(\"-\"*40)\n\n self.model_id_ = uuid.uuid4().time_low\n self.performance_ = pd.DataFrame()\n\n self.results_ = {}\n self.results_.update({\"Model Id\": self.model_id_})", "title": "" }, { "docid": "63bd53fa63c2a9f17109e7b6327beac2", "score": "0.5508129", "text": "def reinit(self):\n self.svmrbf = Svm('rbf', ['gamma', 'regularisation'], [1000, 0.0001])\n self.decision_tree = Forest([20, 3, 2, 200])\n self.adaboost = Adaboost([0.1])\n self.mlp = Mlp([10, 0.1, 0.1])\n self.logreg = Logistic([0.01, 0.001])\n self.svmlinear = Svm('linear', ['no index'], [])\n self.svmpoly = Svm('poly', ['degree'], [1])", "title": "" }, { "docid": "737653dcc4611f1c36529ffc5efb9b95", "score": "0.5506382", "text": "def __init__(self, num_features=10000, max_dilations_per_kernel=32, random_state=None,\n alphas=np.logspace(-3, 3, 7), *, normalize_features=True, memory=None, verbose=False, scoring=None, **kwargs):\n \n try: \n import sktime\n from sktime.transformations.panel.rocket._minirocket_multivariate import MiniRocketMultivariate\n except ImportError: \n raise ImportError(\"You need to install sktime to be able to use MiniRocketRegressor\")\n \n self.steps = [('minirocketmultivariate', MiniRocketMultivariate(num_kernels=num_features,\n max_dilations_per_kernel=max_dilations_per_kernel,\n random_state=random_state))]\n if normalize_features:\n self.steps += [('scalar', StandardScaler(with_mean=False))]\n \n self.steps += [('ridgecv', RidgeCV(alphas=alphas, scoring=scoring, **kwargs))]\n store_attr()\n self._validate_steps()", "title": "" }, { "docid": "f4e32353ea677d07db4abeaaccc1dcde", "score": "0.5502832", "text": "def _init_model(self):\n self.gp = GaussianProcess(corr='absolute_exponential', theta0=1e-1,\n thetaL=None, thetaU=None,\n nugget=(self.dy / self.y) ** 2,random_start=100, verbose=self.verbose)\n self.gp.fit(np.atleast_2d(self.X).T, self.y)", "title": "" }, { "docid": "0e762ad100b97a6e0f94dd87284f5c31", "score": "0.5501281", "text": "def __init__(self):\r\n # A dummy layer does nothing\r\n self.weights = np.zeros(shape=(input.shape[1], 10))\r\n self.bias = np.zeros(shape=(10,))\r\n pass", "title": "" }, { "docid": "9db09392815dcf9fa6119856a230576f", "score": "0.55010635", "text": "def __init__(self, learning_rate=1e-3, reg=1e-5, num_iters=100,\n batch_size=200, verbose=False, dtype=np.float32, is_regression=True):\n self.params = {}\n self.learning_rate = learning_rate\n self.reg = reg\n self.num_iters = num_iters\n self.batch_size = batch_size\n self.verbose = verbose\n self.dtype = dtype\n self.is_regression = is_regression\n\n self.loss_history = []", "title": "" }, { "docid": "6dff70d5acdda44f06a78f774fd0d62d", "score": "0.54977804", "text": "def __init__(self, drop_race=True, drop_sex=True):\n\n bunch = type(self).load_function()\n target = bunch[self._target_col].astype(int)\n bunch.drop(self._target_col, axis=1, inplace=True)\n bunch = bunch.astype(float)\n\n super().__init__(bunch, target, nrows=self._size[0], data_t=self._feature_type)\n\n self._features = list(bunch)\n\n if drop_race:\n self._race_train = recover_categorical_encoding_for_compas_race(self._X_train)\n self._race_test = recover_categorical_encoding_for_compas_race(self._X_test)\n\n # race is in columns 9-10 because the super class constructor removes the target\n self._X_train = np.delete(self._X_train, np.s_[9:11], axis=1)\n self._X_test = np.delete(self._X_test, np.s_[9:11], axis=1)\n del[self._features[9:11]]\n\n if drop_sex:\n self._sex_train = self._X_train[:, 0]\n self._sex_test = self._X_test[:, 0]\n\n self._X_train = np.delete(self._X_train, 0, axis=1)\n self._X_test = np.delete(self._X_test, 0, axis=1)\n del[self._features[0]]\n\n self._target_names = np.unique(target)", "title": "" }, { "docid": "4dbef6a039c77faa83f596413f525aa8", "score": "0.5495461", "text": "def __init__(self, driver, datadict, numberofrows=100, numfeatures = 200):\n seed(42)\n self.driver = driver\n self.numfeatures = self.driver.num_features\n featurelist = []\n # self.__clf = GradientBoostingRegressor(n_estimators=500, max_depth=4)\n gbr = GradientBoostingRegressor(n_estimators=500, max_depth=10, max_features=numfeatures, random_state=42)\n pca = PCA(whiten=True, n_components=numfeatures)\n estimators = [('polyf', PolynomialFeatures()), ('scale', MinMaxScaler()), ('pca', PCA()), ('gbr', gbr)]\n self.__clf = Pipeline(estimators)\n self.__indexlist = []\n for trace in self.driver.traces:\n self.__indexlist.append(trace.identifier)\n featurelist.append(trace.features)\n # Initialize train and test np arrays\n self.__traindata = np.asarray(featurelist)\n self.__testdata = np.asarray(featurelist)\n self.__trainlabels = np.ones((self.__traindata.shape[0],))\n data = np.empty((0, self.numfeatures), float)\n setkeys = datadict.keys()\n if driver.identifier in setkeys:\n setkeys.remove(driver.identifier)\n else:\n setkeys = sample(setkeys, len(setkeys) - 1)\n for key in setkeys:\n if key != driver.identifier:\n rand_smpl = [datadict[key][i] for i in sorted(sample(xrange(len(datadict[key])), numberofrows)) ]\n data = np.append(data, np.asarray(rand_smpl), axis=0)\n self.__traindata = np.append(self.__traindata, data, axis=0)\n self.__trainlabels = np.append(self.__trainlabels, np.zeros((data.shape[0],)), axis=0)\n self.__y = np.ones((self.__testdata.shape[0],))", "title": "" }, { "docid": "575488dc37fe6f8488e91f14da325d48", "score": "0.5494326", "text": "def __init__(self, param, kernel='linear', kernel_param=1):\n self.param = param\n self.kernel = kernel\n self.kernel_param=kernel_param", "title": "" } ]
329cc8ceae202def4653bbdb4e20f1e9
Toplevel ``on_close`` logic that provides cleanup logic for realtime communication sessions.
[ { "docid": "c495918fcefa2583801aa3d0e3203d5d", "score": "0.0", "text": "def on_close(self, handler, socket): # pragma: no cover\n\n if not socket.state is socket.State.ERROR:\n socket.set_state(socket.State.CLOSED)\n handler.on_close(socket.state is not socket.State.ERROR)\n return self", "title": "" } ]
[ { "docid": "a4cd00493476fdb45cd6d87da097eb94", "score": "0.7646872", "text": "def on_close(self, *args):", "title": "" }, { "docid": "14fa41a093a45f741fdc1c28af7d6ed6", "score": "0.73240906", "text": "def on_close(_):\n logging.info('Websocket connection closed.')", "title": "" }, { "docid": "a040ffa7c1ec1bb31c499563415b5f31", "score": "0.730165", "text": "def on_quit(self, _):\n self.on_close(None)", "title": "" }, { "docid": "e80693a095ae5e770e5b088b580908f4", "score": "0.7188802", "text": "def _on_close(self, event):\n event.Skip()\n if self._registered_close is not None:\n self._registered_close()", "title": "" }, { "docid": "4247f62b1828b03d59174de6ad12566e", "score": "0.70931226", "text": "def OnClose(self):\n pass", "title": "" }, { "docid": "7c9f0adbba77e378e6f71426c92da92c", "score": "0.70782924", "text": "def on_close(self):\n logger.debug(\"Connection closed %s\", self)\n for resource in self.rpc_objects:\n if hasattr(resource, '_on_close'):\n resource._on_close()\n self._pinger.stop()", "title": "" }, { "docid": "4c0cd74df3b27ddd34f48df080212eb3", "score": "0.7070996", "text": "def on_close(self):\n self.event_logger.remove_listener(listener=self.event_listener)", "title": "" }, { "docid": "af68f7cc6bb093af18c2bded6f076366", "score": "0.7017511", "text": "def __closed_cb(self):\n\tlogging.debug( \"Server Closed\" )", "title": "" }, { "docid": "d87e3eda17221b41d75440ceb4a291c4", "score": "0.6953", "text": "def handle_close(self):\n logging.info(\"Server shutting down.\")\n self.close()", "title": "" }, { "docid": "024fc4f95fa5aba9da615e3215d33df1", "score": "0.69367737", "text": "def on_close(self):\n self.connected = False", "title": "" }, { "docid": "37de51e3d66599b6456af43692b4dbe5", "score": "0.69010556", "text": "def on_close(self):\n super(MyApp, self).on_close()", "title": "" }, { "docid": "2490d8d34ff0b598b4488930308e2cdb", "score": "0.68723917", "text": "def on_session_closing(self, event: Event) -> None:\n pass", "title": "" }, { "docid": "3cfa57a9cb28eaa08541358ef59b67f5", "score": "0.6870582", "text": "def on_close(self):\n self.msg_manager.remove_client(self)\n print \"Closed connection {0}\".format(self.current_user)", "title": "" }, { "docid": "d068bde378a1e87c6ec1aa481bfa8816", "score": "0.6870524", "text": "def on_disconnect():", "title": "" }, { "docid": "20a1b07913efbc6c49cf198cef4558e7", "score": "0.68480563", "text": "def notify_closebase(self):\n pass", "title": "" }, { "docid": "668f6aa00b8c49780b41fb5f1b2b2153", "score": "0.6819787", "text": "def __exit__(self, *args) -> None:\n if getattr(self, \"_local_session\", False):\n self.close()", "title": "" }, { "docid": "05037ed11a1ba668cd7d98becf01d42a", "score": "0.6812855", "text": "def on_closing(self):\n self.logger.info('Shutting down.\\n')\n self.master.destroy()\n self.bulb_interface.stopped.set()\n sys.exit(0)", "title": "" }, { "docid": "80b74070408456e8395edee26304aeb0", "score": "0.67681056", "text": "def on_close(self, ws, *args):\n self.callback.on_close()", "title": "" }, { "docid": "f4406865f930ee1adf04a1acbc737902", "score": "0.6761127", "text": "def on_close(self):\n\n # Remove this handler from the server's set of handlers\n WebSocketHandler.server.web_socket_handlers.remove(self)\n print('WebSocket connection closed')", "title": "" }, { "docid": "b9d06ea511e337ed715bb6b495fda7e3", "score": "0.67542225", "text": "def OnClose(self, event=None):\n\n if len(threading.enumerate())>1:\n if event and event.CanVeto(): \n event.Veto()\n echo(\"Closing...\", marker=\"OnClose\", icon=\"red_circle\")\n publish(\"app.stop\")\n self.app.reactor.stop() \n return", "title": "" }, { "docid": "df70b454f2f46ed60f12697fb9ddd0cf", "score": "0.67422265", "text": "async def close(self):\n self.nats_handler = None", "title": "" }, { "docid": "b5d196e02301e16d4f21efa7318253ec", "score": "0.67310077", "text": "def onClose(self):\n self.communicator.Close()\n self.rungzserver.terminate()\n self.rungzclient.terminate()\n call([\"pkill\", \"gzserver\"])\n call([\"pkill\", \"gzclient\"])", "title": "" }, { "docid": "e345786dfd62c657d99e356dade0d520", "score": "0.672561", "text": "async def on_closing_async(self, reason):", "title": "" }, { "docid": "213240db6febf846aa7806369fc2a2b0", "score": "0.6725078", "text": "def on_close(self):\n\n for subscription_id in list(self._subscriptions.keys()):\n self._dispose_subscription(subscription_id)", "title": "" }, { "docid": "6164cd1c3df7875a7b603929bdd25351", "score": "0.6720452", "text": "def on_close(event=None):\n msg_to_send.set(\"{quit}\")\n send()", "title": "" }, { "docid": "4a58cf5165022fa0acf2f49aba7f5b24", "score": "0.67146635", "text": "def handle_closing(self, msg):\n\n Log.info(f'[WS] Connection to server is closing: {msg}')", "title": "" }, { "docid": "63039de99d26784db10c29d5fd10e205", "score": "0.66853994", "text": "def on_shutdown(self):\n pass", "title": "" }, { "docid": "5c7cb8d9bc5abfdb68a460c9b7827228", "score": "0.66851646", "text": "def on_close(self):\n print(\"WebSocket closed\")\n if self.id in clients:\n del clients[self.id]", "title": "" }, { "docid": "5c7cb8d9bc5abfdb68a460c9b7827228", "score": "0.66851646", "text": "def on_close(self):\n print(\"WebSocket closed\")\n if self.id in clients:\n del clients[self.id]", "title": "" }, { "docid": "4ef91ba7c9f0c700129d9fdc3ed797ab", "score": "0.6653642", "text": "def _lowLevelClose(self):\n self.socket_reference.close()", "title": "" }, { "docid": "9aecd6a11d1658f4bf4e2134668ae1d3", "score": "0.66512436", "text": "def on_closing(event=None):\n my_msg.set(\"{quit}\")\n send()", "title": "" }, { "docid": "72a2a9e8304a6952abb86a244bada4c6", "score": "0.66502124", "text": "def OnClose(self, e = None):\n log.info('OnClose: %r', self)\n self.UnbindObservers()", "title": "" }, { "docid": "e3c402632ba611bd6585396c21f1982b", "score": "0.66425556", "text": "def on_connection_closing(self, event: Event) -> None:\n pass", "title": "" }, { "docid": "253586c9db19a2ed3f719370d5615eb1", "score": "0.66318846", "text": "def __exit__(self, *args, **kwargs):\n self.disconnect()", "title": "" }, { "docid": "767d3467a5f4c98da3644215fbfcd97b", "score": "0.6629883", "text": "def close(self):\n self._state = \"exiting\"\n if self._on_close is not None:\n self._on_close()", "title": "" }, { "docid": "f0ba87af2e8eb50e997723e2eb652cbf", "score": "0.6627272", "text": "def on_close(self, ws):\n self.log(\"### Websocket connection closing ###\")", "title": "" }, { "docid": "a61b7cd70f9790d0fc9c803af9ef08df", "score": "0.66246724", "text": "async def close(self):\n for instance in self._socketInstances:\n if instance['connected']:\n instance['connected'] = False\n await instance['socket'].disconnect()\n for request_resolve in instance['requestResolves']:\n if not instance['requestResolves'][request_resolve].done():\n instance['requestResolves'][request_resolve]\\\n .set_exception(Exception('MetaApi connection closed'))\n instance['requestResolves'] = {}\n self._synchronizationListeners = {}\n self._latencyListeners = []\n self._socketInstancesByAccounts = {}\n self._socketInstances = []\n self._packetOrderer.stop()", "title": "" }, { "docid": "a6d4f134211cbeda9c58cb307deb8c11", "score": "0.662167", "text": "def on_close(self, _):\n self.dhcp_listener.shutdown = True\n self.dump_pickle()\n self.Hide()\n if type(self.ping_window) is multi_ping.MultiPing:\n self.ping_window.Hide()\n self.ping_model.shutdown()\n self.mse_active_list = []\n self.telnet_job_queue.join()\n self.Destroy()", "title": "" }, { "docid": "6ca8bf5631a7edf1cd33ae69fd2d5e3a", "score": "0.6620746", "text": "def on_close(self):\n\n LOGGER.info('[ChatWebsocketHandler] Websocket conneciton close event %s ' % self)\n\n msg = {\n 'name': self.rabbit_client._person,\n 'stage': 'stop',\n 'msg_type': 'public',\n 'msg': self.rabbit_client._person + ' left',\n 'clientid': self.rabbit_client._clientid,\n 'participants': len(websocketParticipants) - 1\n }\n\n routing_key = 'public.*'\n\n # publishing the close connection info to rest of the rabbitmq subscribers/clients\n self.rabbit_client.publish(msg, routing_key)\n\n # removing the connection of global list\n websocketParticipants.remove(self)\n\n LOGGER.info('[ChatWebsocketHandler] Websocket connection closed')", "title": "" }, { "docid": "a2b143e4249fcaec4e3b3f1cdd9995bb", "score": "0.65988946", "text": "def on_closed(self, callback):\n self.closed = callback", "title": "" }, { "docid": "94cd5b589603d8f4b9a3f7648c1784c5", "score": "0.65950215", "text": "def close(self):\n self._update_cond()\n pn_connection_close(self._impl)", "title": "" }, { "docid": "37a68307b7d08745c69c22bf51070021", "score": "0.6575879", "text": "def on_closing(event=None):\n global top, STOP_HEARTBEAT\n\n # when window is close send a quit message to the server as well\n my_msg.set(\"{quit}\")\n try:\n send()\n finally:\n STOP_HEARTBEAT = True\n top.quit()", "title": "" }, { "docid": "e76f49104a672c960ebe222a293cda3e", "score": "0.65738535", "text": "async def after_close(self, exc_type, exc, tb):", "title": "" }, { "docid": "bffb395fca131bfed2b5c2a9b77b77c1", "score": "0.65615684", "text": "def __exit__(self, *args):\n self._handle.close()", "title": "" }, { "docid": "78b87791addad583eb309af332f30adb", "score": "0.6548887", "text": "def _close(self):", "title": "" }, { "docid": "d495452719e0a674899b538d24920998", "score": "0.652888", "text": "def handle_close(self):\n self.close()", "title": "" }, { "docid": "5dd2e2ce5021a65d28dda642317daffd", "score": "0.6520886", "text": "def handle_shutdown(self):", "title": "" }, { "docid": "f85be75942d3581f95d76cc39a89b406", "score": "0.6513627", "text": "def on_session_closed(self, event: Event) -> None:\n pass", "title": "" }, { "docid": "f15dc13c649e83467bccb49954aab077", "score": "0.65052867", "text": "def shutdown():\n logger.debug('Call handler shutdown')", "title": "" }, { "docid": "5db705783811da426efc0df8b1d68eac", "score": "0.64983827", "text": "def on_closing(self,event=None):\n self.my_msg.set(\"{quit}\")\n self.send()", "title": "" }, { "docid": "beba7e40c26e7a45dce441700b18241a", "score": "0.6481447", "text": "def session_closed(self):\n self._detach()\n self.safe_finish()", "title": "" }, { "docid": "8613339e56d388d40c3cd9dc5f00a75c", "score": "0.6474794", "text": "def __del__(self):\n self.close_connection()\n log.info('Closed bloc_device')", "title": "" }, { "docid": "2374c4b0f8e71fbe85e334cea99408d9", "score": "0.6463767", "text": "def SSLv2_CLOSE_NOTIFY_FINAL(self):\n self.vprint()\n self.vprint(\"Trying to send 'goodbye' to the client...\")", "title": "" }, { "docid": "5d72bb559263f27d11bb9b487ba9f96a", "score": "0.64562905", "text": "def close(self):\n check_function_calls['close_is_called'] = True", "title": "" }, { "docid": "5d72bb559263f27d11bb9b487ba9f96a", "score": "0.64562905", "text": "def close(self):\n check_function_calls['close_is_called'] = True", "title": "" }, { "docid": "abb6810ebdea2967cd7113411e2383c1", "score": "0.6437603", "text": "def __exit__(self, type, value, traceback):\n self._close_polling_()", "title": "" }, { "docid": "1bf8be469fb1d68fc5af26f87a0dd4e8", "score": "0.64322764", "text": "async def handleClose(self):\n if self._connectionState == SocketConnectionState.DISCONNECTED:\n self._engineLogger.info(f\"{self._config['SenderCompID']} session -> DISCONNECTED\")\n self._writer.close()\n self._connectionState = SocketConnectionState.DISCONNECTED", "title": "" }, { "docid": "babee7c019cc67af7e77dadaac00a7cc", "score": "0.6426678", "text": "def _register_cleanup_on_exit(self):\n if conf.cleanup_all_sessions_on_exit():\n\n def cleanup_spark_sessions():\n try:\n self.clean_up_all()\n except Exception as e:\n self.logger.error(\n \"Error cleaning up sessions on exit: {}\".format(e)\n )\n pass\n\n atexit.register(cleanup_spark_sessions)\n self.ipython_display.writeln(\"Cleaning up livy sessions on exit is enabled\")", "title": "" }, { "docid": "754cd16194eadc36004029dd91c00b40", "score": "0.6424696", "text": "def on_close(self, event):\n self.Destroy()\n return True", "title": "" }, { "docid": "7f111e3487ff6f94b0dc4f96777297fe", "score": "0.64208597", "text": "def on_close(self, event):\n self._test_runner.kill_process()\n if self._process_timer:\n self._process_timer.Stop()\n self._test_runner.shutdown_server()\n event.Skip()", "title": "" }, { "docid": "0e3a701965eb5d2b3cc1505f88955b26", "score": "0.6416616", "text": "def closed(self, fn):\n self.on_closed = fn", "title": "" }, { "docid": "437d003f3930dd4e9bf90347a4d710ca", "score": "0.64159065", "text": "def on_close (self, event):\n\t\tself.StopAllJobs ()\n\t\tself.Destroy ()", "title": "" }, { "docid": "e00c6263778c0428a982cc9eeb535422", "score": "0.641541", "text": "def close():", "title": "" }, { "docid": "e00c6263778c0428a982cc9eeb535422", "score": "0.641541", "text": "def close():", "title": "" }, { "docid": "e00c6263778c0428a982cc9eeb535422", "score": "0.641541", "text": "def close():", "title": "" }, { "docid": "e00c6263778c0428a982cc9eeb535422", "score": "0.641541", "text": "def close():", "title": "" }, { "docid": "7ec3847ac4b59979e0d0fbefa5deb00e", "score": "0.64096254", "text": "def on_session_closing(self, event: Event) -> None:\n if self.delegate is not None:\n _dispatch(self.delegate, 'on_session_closing', event)\n elif self.peer_close_is_error:\n self.on_session_error(event)", "title": "" }, { "docid": "8605e3c334ba8537ad8b92ea66808d1b", "score": "0.63983357", "text": "def close(self):\n self.logMessages = {}\n Handler.close(self)", "title": "" }, { "docid": "e17a143ef5de5d7dca7242a777c5d4ae", "score": "0.63947916", "text": "def close(self):\n rpc_close = \"\"\"\n <rpc>\n <close-session/>\n </rpc>\n ]]>]]>\n \"\"\"\n try:\n self.rpc(rpc_close)\n except Exception as err:\n errmsg = \"RPC Close Error: %r\" % err\n self.log(errmsg)\n try:\n self.logfile.close()\n except Exception as err:\n errmsg = \"Error closing logfile: %r\" % err\n self.log(errmsg)", "title": "" }, { "docid": "04cd8f3e7d3209469c7dc56a977a4cc1", "score": "0.6385351", "text": "def close():\n logging.shutdown()", "title": "" }, { "docid": "df29979998191cac9199886a0547f5a7", "score": "0.637533", "text": "def on_close(self):\n logger.debug(\"Node websocket closed\")\n self.application.remove_ws(self)", "title": "" }, { "docid": "7c94155e707fa47ff0c7920aeec4e0c3", "score": "0.63744104", "text": "def _close(self, *args, **kwargs): # real signature unknown\n pass", "title": "" }, { "docid": "15e8611f2ab59dc7efff266a3853df35", "score": "0.6362838", "text": "def logging_close():\n logging.shutdown()", "title": "" }, { "docid": "8797e6cbdbc75b4454820a13801b6d14", "score": "0.6362396", "text": "def close(self):\n # TODO: maybe doing rabbitmq cleanups here!\n pass", "title": "" }, { "docid": "459501f34455b075e2f436aec36436e7", "score": "0.6362006", "text": "def __exit__(self, exc_type, exc_value, traceback):\n self._socket.close()", "title": "" }, { "docid": "459501f34455b075e2f436aec36436e7", "score": "0.6362006", "text": "def __exit__(self, exc_type, exc_value, traceback):\n self._socket.close()", "title": "" }, { "docid": "5edad5bd59cc1f668d5edc3398125589", "score": "0.6355299", "text": "def __del__(self) -> None:\n self._transport.close()\n while self._transport.is_closing():\n time.sleep(TIMER_RES)\n self._logger.debug('connection closed')", "title": "" }, { "docid": "fa1874d5e02077260df909a0170ed492", "score": "0.635033", "text": "def close(self) -> None:\n self.relay(\"close\")()", "title": "" }, { "docid": "f89f3571b2f36ef87a4b2b5b702fb363", "score": "0.63356924", "text": "async def close(self):", "title": "" }, { "docid": "f4a16ac722e04a7b20bfdb7651fc134c", "score": "0.6334904", "text": "async def close(self):\n\n # TODO: send a shutdown message to tracker\n await self.http_session.close()", "title": "" }, { "docid": "0862f9de6aabfd5eb2e5649785b70b33", "score": "0.6330051", "text": "def on_socket_close(socket):\n logger.info('Connection to Innoactive® Hub closed')", "title": "" }, { "docid": "6de76f940523e750a23267e039291374", "score": "0.63293535", "text": "def close(self):\n while self.check_waiting_messages() is True:\n pass\n for s in self.sockets:\n self.remove_connection(s)\n self.db.disconnect()\n self.closed = True\n self.db.flush_stats()\n sys.exit(0)", "title": "" }, { "docid": "a5ec2a5e7d97dd42045f6b72a98786ee", "score": "0.6322525", "text": "def close(self) -> None:\n self.received_message() # Flush remaining messages\n self.notifier.setEnabled(False)\n self.socket.close()\n self.context.destroy()", "title": "" }, { "docid": "91da3640f3884f346f7f9dcf927cc3cd", "score": "0.631444", "text": "def closing(self):\n ...", "title": "" }, { "docid": "99b37114bef39c48c15add518302f3c2", "score": "0.63141626", "text": "def close(self) -> None:\n asyncio.ensure_future(self._session_manager.close())\n for _, protocol_close, _ in self._protocol_handlers.values():\n protocol_close()", "title": "" }, { "docid": "fbfb15929cedd26d3ba80a074dd34e2f", "score": "0.63111264", "text": "def shutdown(self):", "title": "" }, { "docid": "fbfb15929cedd26d3ba80a074dd34e2f", "score": "0.63111264", "text": "def shutdown(self):", "title": "" }, { "docid": "fbfb15929cedd26d3ba80a074dd34e2f", "score": "0.63111264", "text": "def shutdown(self):", "title": "" }, { "docid": "9d3e2765fbbb4ecd48d09a3aabaf0886", "score": "0.6311119", "text": "def SSLv2_CLOSE_NOTIFY(self):\n self.vprint()\n self.vprint(\"Trying to send 'goodbye' to the client...\")", "title": "" }, { "docid": "a75fb2cf0ac39bd78a3a6f0091943f07", "score": "0.6304865", "text": "def _shutdown_hook(self):\n pass", "title": "" }, { "docid": "ac2c0c44c12b67c73ca837bb7e4a0c02", "score": "0.6295281", "text": "def handle_shutdown(self):\n self.communicator.unregister_connection(self)", "title": "" }, { "docid": "d2ca387c516c0330c4393afe6910e954", "score": "0.628372", "text": "def close_session(self):\n self._handlers = []\n self.cap.release()", "title": "" }, { "docid": "d881a13847731fd6ac8d7da67e4ebd8e", "score": "0.6281659", "text": "def log_out(self) -> None:\n try:\n self.mainsock.close()\n except socket.error as x:\n log.error(\"Error closing main socket: \" + str(x))\n try:\n self.immsock.close()\n except socket.error as x:\n log.error(\"Error closing immediate socket: \" + str(x))", "title": "" }, { "docid": "8b7f558d04c20443e84555b4c7844d38", "score": "0.6277276", "text": "def __del__(self) -> None:\n if getattr(self, \"_local_session\", False):\n self.close()", "title": "" }, { "docid": "a0fef295b03db0cfb14d6226b7e83474", "score": "0.62758446", "text": "def on_shutdown(self):\n LOG.debug('%r.on_shutdown()', self)\n for stream, state in self._state_by_stream.items():\n state.lock.acquire()\n try:\n for sender, fp in reversed(state.jobs):\n sender.close()\n fp.close()\n state.jobs.pop()\n finally:\n state.lock.release()", "title": "" }, { "docid": "c60741e3ad26d063e1909fb83ac52101", "score": "0.62748504", "text": "def teardown(close_frame=None):\n self.frame = None\n self.keep_running = False\n if self.sock:\n self.sock.close()\n self.sock = None\n close_args = self._get_close_args(\n close_frame.data if close_frame else None)\n self._callback(self.on_close, *close_args)", "title": "" }, { "docid": "f8beeee3f38f6b80b61402bfcb68f01e", "score": "0.6274792", "text": "def _at_exit(self):\n self.engine._at_exit()", "title": "" }, { "docid": "6020823fc8d0c62233cbc5deacdca1d7", "score": "0.62700856", "text": "def shutdown(self):\n\n for c in self.__chandles:\n c.close()\n\n self.__chandles = None\n self.__freehandles = None\n self.__mhandle.close()\n self.__mhandle = None\n self.__req_q = None\n self.__failures = None\n self.__success = None\n self.__orphans = None\n self.__active_handles = 0", "title": "" }, { "docid": "07377e531805238be0e973c98e361d14", "score": "0.6268365", "text": "def finalize():\n ioloop.stop()\n logging.info('{SERVER} Server successfully stopped.')", "title": "" }, { "docid": "2ba36ead6e6c1e8929baed2780a297a0", "score": "0.6264217", "text": "async def __aexit__(self, *exc_info) -> None:\n await self._close(Client.CLOSED, do_cbs=True)", "title": "" }, { "docid": "3d14ef7fa898832ef394a35efcd967b2", "score": "0.6263994", "text": "async def on_close(self):\n await self.session.close()\n for task in self.tasks:\n task.cancel()", "title": "" } ]
c55d4eeccc5a2094e0571823aa305396
Test aall for gt Array code i. General test odd length array without SIMD.
[ { "docid": "3f2746b88d212b25fc12f6bb1dabfa96", "score": "0.0", "text": "def test_aall_basic_gt_b3(self):\n\t\t# One test value near the end of the array is equal to the test value.\n\t\ttestval = 842150449\n\t\tarrayval = 1052688062\n\t\ttestdata = array.array('i', [arrayval] * self.arraylength)\n\t\ttestdata[-2] = testval\n\n\t\t# Verify test compatibility.\n\t\texpected = all([(x > testval) for x in testdata])\n\t\tself.assertFalse(expected)\n\n\t\t# The actual test.\n\t\tresult = arrayfunc.aall('>', testdata, testval , nosimd=True)\n\t\tself.assertFalse(result)\n\t\tself.assertEqual(result, expected)", "title": "" } ]
[ { "docid": "2f27d5e3b57c09c8c86baa613f177d15", "score": "0.69521815", "text": "def test_aall_basic_gt_b2(self):\n\t\t# All data values are equal to the test value.\n\t\ttestval = 1633771875\n\t\tarrayval = testval\n\t\ttestdata = array.array('I', [arrayval] * self.arraylength)\n\n\t\t# Verify test compatibility.\n\t\texpected = all([(x > testval) for x in testdata])\n\t\tself.assertFalse(expected)\n\n\t\t# The actual test.\n\t\tresult = arrayfunc.aall('>', testdata, testval , nosimd=True)\n\t\tself.assertFalse(result)\n\t\tself.assertEqual(result, expected)", "title": "" }, { "docid": "2f27d5e3b57c09c8c86baa613f177d15", "score": "0.69520867", "text": "def test_aall_basic_gt_b2(self):\n\t\t# All data values are equal to the test value.\n\t\ttestval = 1633771875\n\t\tarrayval = testval\n\t\ttestdata = array.array('I', [arrayval] * self.arraylength)\n\n\t\t# Verify test compatibility.\n\t\texpected = all([(x > testval) for x in testdata])\n\t\tself.assertFalse(expected)\n\n\t\t# The actual test.\n\t\tresult = arrayfunc.aall('>', testdata, testval , nosimd=True)\n\t\tself.assertFalse(result)\n\t\tself.assertEqual(result, expected)", "title": "" }, { "docid": "42b4b8f5355fee5edf9f835bdb458f36", "score": "0.6926313", "text": "def test_aall_basic_gt_b2(self):\n\t\t# All data values are equal to the test value.\n\t\ttestval = 1633771875\n\t\tarrayval = testval\n\t\ttestdata = array.array('L', [arrayval] * self.arraylength)\n\n\t\t# Verify test compatibility.\n\t\texpected = all([(x > testval) for x in testdata])\n\t\tself.assertFalse(expected)\n\n\t\t# The actual test.\n\t\tresult = arrayfunc.aall('>', testdata, testval , nosimd=True)\n\t\tself.assertFalse(result)\n\t\tself.assertEqual(result, expected)", "title": "" }, { "docid": "42b4b8f5355fee5edf9f835bdb458f36", "score": "0.6926313", "text": "def test_aall_basic_gt_b2(self):\n\t\t# All data values are equal to the test value.\n\t\ttestval = 1633771875\n\t\tarrayval = testval\n\t\ttestdata = array.array('L', [arrayval] * self.arraylength)\n\n\t\t# Verify test compatibility.\n\t\texpected = all([(x > testval) for x in testdata])\n\t\tself.assertFalse(expected)\n\n\t\t# The actual test.\n\t\tresult = arrayfunc.aall('>', testdata, testval , nosimd=True)\n\t\tself.assertFalse(result)\n\t\tself.assertEqual(result, expected)", "title": "" }, { "docid": "e79c9551c469fa8d93c8dcbac28392f4", "score": "0.691178", "text": "def test_aall_neginf_gt_b2(self):\n\t\t# All data and test values fail.\n\t\ttestval = 100.0\n\t\tarrayval = -math.inf\n\t\ttestdata = array.array('d', [arrayval] * self.arraylength)\n\n\t\t# Verify test compatibility.\n\t\texpected = all([(x > testval) for x in testdata])\n\n\t\t# The actual test.\n\t\tresult = arrayfunc.aall('>', testdata, testval , nosimd=True)\n\t\tself.assertFalse(result)\n\t\tself.assertEqual(result, expected)", "title": "" }, { "docid": "e79c9551c469fa8d93c8dcbac28392f4", "score": "0.691178", "text": "def test_aall_neginf_gt_b2(self):\n\t\t# All data and test values fail.\n\t\ttestval = 100.0\n\t\tarrayval = -math.inf\n\t\ttestdata = array.array('d', [arrayval] * self.arraylength)\n\n\t\t# Verify test compatibility.\n\t\texpected = all([(x > testval) for x in testdata])\n\n\t\t# The actual test.\n\t\tresult = arrayfunc.aall('>', testdata, testval , nosimd=True)\n\t\tself.assertFalse(result)\n\t\tself.assertEqual(result, expected)", "title": "" }, { "docid": "e703a22d0223e1f909eb8d106eab3222", "score": "0.6899496", "text": "def test_aall_basic_gt_b2(self):\n\t\t# All data values are equal to the test value.\n\t\ttestval = 25700\n\t\tarrayval = testval\n\t\ttestdata = array.array('H', [arrayval] * self.arraylength)\n\n\t\t# Verify test compatibility.\n\t\texpected = all([(x > testval) for x in testdata])\n\t\tself.assertFalse(expected)\n\n\t\t# The actual test.\n\t\tresult = arrayfunc.aall('>', testdata, testval , nosimd=True)\n\t\tself.assertFalse(result)\n\t\tself.assertEqual(result, expected)", "title": "" }, { "docid": "e703a22d0223e1f909eb8d106eab3222", "score": "0.6899496", "text": "def test_aall_basic_gt_b2(self):\n\t\t# All data values are equal to the test value.\n\t\ttestval = 25700\n\t\tarrayval = testval\n\t\ttestdata = array.array('H', [arrayval] * self.arraylength)\n\n\t\t# Verify test compatibility.\n\t\texpected = all([(x > testval) for x in testdata])\n\t\tself.assertFalse(expected)\n\n\t\t# The actual test.\n\t\tresult = arrayfunc.aall('>', testdata, testval , nosimd=True)\n\t\tself.assertFalse(result)\n\t\tself.assertEqual(result, expected)", "title": "" }, { "docid": "469e7772a2687bd518b5f06cc0eda177", "score": "0.68452233", "text": "def test_aall_neginf_gt_b2(self):\n\t\t# All data and test values fail.\n\t\ttestval = 100.0\n\t\tarrayval = -math.inf\n\t\ttestdata = array.array('f', [arrayval] * self.arraylength)\n\n\t\t# Verify test compatibility.\n\t\texpected = all([(x > testval) for x in testdata])\n\n\t\t# The actual test.\n\t\tresult = arrayfunc.aall('>', testdata, testval , nosimd=True)\n\t\tself.assertFalse(result)\n\t\tself.assertEqual(result, expected)", "title": "" }, { "docid": "469e7772a2687bd518b5f06cc0eda177", "score": "0.68452233", "text": "def test_aall_neginf_gt_b2(self):\n\t\t# All data and test values fail.\n\t\ttestval = 100.0\n\t\tarrayval = -math.inf\n\t\ttestdata = array.array('f', [arrayval] * self.arraylength)\n\n\t\t# Verify test compatibility.\n\t\texpected = all([(x > testval) for x in testdata])\n\n\t\t# The actual test.\n\t\tresult = arrayfunc.aall('>', testdata, testval , nosimd=True)\n\t\tself.assertFalse(result)\n\t\tself.assertEqual(result, expected)", "title": "" }, { "docid": "9eb73ba52ba65245bf8ab98b38d9b63a", "score": "0.68434054", "text": "def test_aall_basic_gt_b2(self):\n\t\t# All data values are equal to the test value.\n\t\ttestval = 25000\n\t\tarrayval = testval\n\t\ttestdata = array.array('h', [arrayval] * self.arraylength)\n\n\t\t# Verify test compatibility.\n\t\texpected = all([(x > testval) for x in testdata])\n\t\tself.assertFalse(expected)\n\n\t\t# The actual test.\n\t\tresult = arrayfunc.aall('>', testdata, testval , nosimd=True)\n\t\tself.assertFalse(result)\n\t\tself.assertEqual(result, expected)", "title": "" }, { "docid": "9eb73ba52ba65245bf8ab98b38d9b63a", "score": "0.68434054", "text": "def test_aall_basic_gt_b2(self):\n\t\t# All data values are equal to the test value.\n\t\ttestval = 25000\n\t\tarrayval = testval\n\t\ttestdata = array.array('h', [arrayval] * self.arraylength)\n\n\t\t# Verify test compatibility.\n\t\texpected = all([(x > testval) for x in testdata])\n\t\tself.assertFalse(expected)\n\n\t\t# The actual test.\n\t\tresult = arrayfunc.aall('>', testdata, testval , nosimd=True)\n\t\tself.assertFalse(result)\n\t\tself.assertEqual(result, expected)", "title": "" }, { "docid": "5410fd4b32d3e615c4af471a09f648a8", "score": "0.679382", "text": "def test_aall_basic_gt_b2(self):\n\t\t# All data values are equal to the test value.\n\t\ttestval = 1684300900\n\t\tarrayval = testval\n\t\ttestdata = array.array('I', [arrayval] * self.arraylength)\n\n\t\t# Verify test compatibility.\n\t\texpected = all([(x > testval) for x in testdata])\n\t\tself.assertFalse(expected)\n\n\t\t# The actual test.\n\t\tresult = arrayfunc.aall('>', testdata, testval , nosimd=True)\n\t\tself.assertFalse(result)\n\t\tself.assertEqual(result, expected)", "title": "" }, { "docid": "5410fd4b32d3e615c4af471a09f648a8", "score": "0.679382", "text": "def test_aall_basic_gt_b2(self):\n\t\t# All data values are equal to the test value.\n\t\ttestval = 1684300900\n\t\tarrayval = testval\n\t\ttestdata = array.array('I', [arrayval] * self.arraylength)\n\n\t\t# Verify test compatibility.\n\t\texpected = all([(x > testval) for x in testdata])\n\t\tself.assertFalse(expected)\n\n\t\t# The actual test.\n\t\tresult = arrayfunc.aall('>', testdata, testval , nosimd=True)\n\t\tself.assertFalse(result)\n\t\tself.assertEqual(result, expected)", "title": "" }, { "docid": "42aa13ddf4f5463e0cf9ef8c8cfba317", "score": "0.67773706", "text": "def test_aall_basic_gt_b2(self):\n\t\t# All data values are equal to the test value.\n\t\ttestval = 1633771875\n\t\tarrayval = testval\n\t\ttestdata = array.array('I', [arrayval] * self.arraylength)\n\n\t\t# Verify test compatibility.\n\t\texpected = all([(x > testval) for x in testdata])\n\t\tself.assertFalse(expected)\n\n\t\t# The actual test.\n\t\tresult = arrayfunc.aall('>', testdata, testval )\n\t\tself.assertFalse(result)\n\t\tself.assertEqual(result, expected)", "title": "" }, { "docid": "42aa13ddf4f5463e0cf9ef8c8cfba317", "score": "0.67773706", "text": "def test_aall_basic_gt_b2(self):\n\t\t# All data values are equal to the test value.\n\t\ttestval = 1633771875\n\t\tarrayval = testval\n\t\ttestdata = array.array('I', [arrayval] * self.arraylength)\n\n\t\t# Verify test compatibility.\n\t\texpected = all([(x > testval) for x in testdata])\n\t\tself.assertFalse(expected)\n\n\t\t# The actual test.\n\t\tresult = arrayfunc.aall('>', testdata, testval )\n\t\tself.assertFalse(result)\n\t\tself.assertEqual(result, expected)", "title": "" }, { "docid": "4137c4a37ef8f9cd6765f2f2baad330f", "score": "0.67706996", "text": "def test_aall_basic_gt_b2(self):\n\t\t# All data values are equal to the test value.\n\t\ttestval = 1684300900\n\t\tarrayval = testval\n\t\ttestdata = array.array('L', [arrayval] * self.arraylength)\n\n\t\t# Verify test compatibility.\n\t\texpected = all([(x > testval) for x in testdata])\n\t\tself.assertFalse(expected)\n\n\t\t# The actual test.\n\t\tresult = arrayfunc.aall('>', testdata, testval , nosimd=True)\n\t\tself.assertFalse(result)\n\t\tself.assertEqual(result, expected)", "title": "" }, { "docid": "4137c4a37ef8f9cd6765f2f2baad330f", "score": "0.67706996", "text": "def test_aall_basic_gt_b2(self):\n\t\t# All data values are equal to the test value.\n\t\ttestval = 1684300900\n\t\tarrayval = testval\n\t\ttestdata = array.array('L', [arrayval] * self.arraylength)\n\n\t\t# Verify test compatibility.\n\t\texpected = all([(x > testval) for x in testdata])\n\t\tself.assertFalse(expected)\n\n\t\t# The actual test.\n\t\tresult = arrayfunc.aall('>', testdata, testval , nosimd=True)\n\t\tself.assertFalse(result)\n\t\tself.assertEqual(result, expected)", "title": "" }, { "docid": "0c7d9a3233b22db1eaa630d959d7fe90", "score": "0.67703956", "text": "def test_aall_neginf_gt_b1(self):\n\t\t# All data and test values match.\n\t\ttestval = -math.inf\n\t\tarrayval = 100.0\n\t\ttestdata = array.array('d', [arrayval] * self.arraylength)\n\n\t\t# Verify test compatibility.\n\t\texpected = all([(x > testval) for x in testdata])\n\n\t\t# The actual test.\n\t\tresult = arrayfunc.aall('>', testdata, testval , nosimd=True)\n\t\tself.assertTrue(result)\n\t\tself.assertEqual(result, expected)", "title": "" }, { "docid": "0c7d9a3233b22db1eaa630d959d7fe90", "score": "0.67703956", "text": "def test_aall_neginf_gt_b1(self):\n\t\t# All data and test values match.\n\t\ttestval = -math.inf\n\t\tarrayval = 100.0\n\t\ttestdata = array.array('d', [arrayval] * self.arraylength)\n\n\t\t# Verify test compatibility.\n\t\texpected = all([(x > testval) for x in testdata])\n\n\t\t# The actual test.\n\t\tresult = arrayfunc.aall('>', testdata, testval , nosimd=True)\n\t\tself.assertTrue(result)\n\t\tself.assertEqual(result, expected)", "title": "" }, { "docid": "052f2fb1a112d374075ddf56c1abf2a5", "score": "0.67596835", "text": "def test_aall_basic_gt_b2(self):\n\t\t# All data values are equal to the test value.\n\t\ttestval = 842150449\n\t\tarrayval = testval\n\t\ttestdata = array.array('i', [arrayval] * self.arraylength)\n\n\t\t# Verify test compatibility.\n\t\texpected = all([(x > testval) for x in testdata])\n\t\tself.assertFalse(expected)\n\n\t\t# The actual test.\n\t\tresult = arrayfunc.aall('>', testdata, testval , nosimd=True)\n\t\tself.assertFalse(result)\n\t\tself.assertEqual(result, expected)", "title": "" }, { "docid": "052f2fb1a112d374075ddf56c1abf2a5", "score": "0.67596835", "text": "def test_aall_basic_gt_b2(self):\n\t\t# All data values are equal to the test value.\n\t\ttestval = 842150449\n\t\tarrayval = testval\n\t\ttestdata = array.array('i', [arrayval] * self.arraylength)\n\n\t\t# Verify test compatibility.\n\t\texpected = all([(x > testval) for x in testdata])\n\t\tself.assertFalse(expected)\n\n\t\t# The actual test.\n\t\tresult = arrayfunc.aall('>', testdata, testval , nosimd=True)\n\t\tself.assertFalse(result)\n\t\tself.assertEqual(result, expected)", "title": "" }, { "docid": "35ba0c6c228f77e292f4f8ca950c5813", "score": "0.67506313", "text": "def test_aall_basic_gt_b2(self):\n\t\t# All data values are equal to the test value.\n\t\ttestval = 1633771875\n\t\tarrayval = testval\n\t\ttestdata = array.array('L', [arrayval] * self.arraylength)\n\n\t\t# Verify test compatibility.\n\t\texpected = all([(x > testval) for x in testdata])\n\t\tself.assertFalse(expected)\n\n\t\t# The actual test.\n\t\tresult = arrayfunc.aall('>', testdata, testval )\n\t\tself.assertFalse(result)\n\t\tself.assertEqual(result, expected)", "title": "" }, { "docid": "35ba0c6c228f77e292f4f8ca950c5813", "score": "0.67506313", "text": "def test_aall_basic_gt_b2(self):\n\t\t# All data values are equal to the test value.\n\t\ttestval = 1633771875\n\t\tarrayval = testval\n\t\ttestdata = array.array('L', [arrayval] * self.arraylength)\n\n\t\t# Verify test compatibility.\n\t\texpected = all([(x > testval) for x in testdata])\n\t\tself.assertFalse(expected)\n\n\t\t# The actual test.\n\t\tresult = arrayfunc.aall('>', testdata, testval )\n\t\tself.assertFalse(result)\n\t\tself.assertEqual(result, expected)", "title": "" }, { "docid": "948d9b3584e83567151712d00a0b74e9", "score": "0.67409384", "text": "def test_aall_basic_gt_b2(self):\n\t\t# All data values are equal to the test value.\n\t\ttestval = 24931\n\t\tarrayval = testval\n\t\ttestdata = array.array('H', [arrayval] * self.arraylength)\n\n\t\t# Verify test compatibility.\n\t\texpected = all([(x > testval) for x in testdata])\n\t\tself.assertFalse(expected)\n\n\t\t# The actual test.\n\t\tresult = arrayfunc.aall('>', testdata, testval , nosimd=True)\n\t\tself.assertFalse(result)\n\t\tself.assertEqual(result, expected)", "title": "" }, { "docid": "948d9b3584e83567151712d00a0b74e9", "score": "0.67409384", "text": "def test_aall_basic_gt_b2(self):\n\t\t# All data values are equal to the test value.\n\t\ttestval = 24931\n\t\tarrayval = testval\n\t\ttestdata = array.array('H', [arrayval] * self.arraylength)\n\n\t\t# Verify test compatibility.\n\t\texpected = all([(x > testval) for x in testdata])\n\t\tself.assertFalse(expected)\n\n\t\t# The actual test.\n\t\tresult = arrayfunc.aall('>', testdata, testval , nosimd=True)\n\t\tself.assertFalse(result)\n\t\tself.assertEqual(result, expected)", "title": "" }, { "docid": "4efde85f7b5e5b5f21ac8a7fb245e2e1", "score": "0.6737059", "text": "def test_aall_basic_gt_b2(self):\n\t\t# All data values are equal to the test value.\n\t\ttestval = 842150449\n\t\tarrayval = testval\n\t\ttestdata = array.array('l', [arrayval] * self.arraylength)\n\n\t\t# Verify test compatibility.\n\t\texpected = all([(x > testval) for x in testdata])\n\t\tself.assertFalse(expected)\n\n\t\t# The actual test.\n\t\tresult = arrayfunc.aall('>', testdata, testval , nosimd=True)\n\t\tself.assertFalse(result)\n\t\tself.assertEqual(result, expected)", "title": "" }, { "docid": "4efde85f7b5e5b5f21ac8a7fb245e2e1", "score": "0.6737059", "text": "def test_aall_basic_gt_b2(self):\n\t\t# All data values are equal to the test value.\n\t\ttestval = 842150449\n\t\tarrayval = testval\n\t\ttestdata = array.array('l', [arrayval] * self.arraylength)\n\n\t\t# Verify test compatibility.\n\t\texpected = all([(x > testval) for x in testdata])\n\t\tself.assertFalse(expected)\n\n\t\t# The actual test.\n\t\tresult = arrayfunc.aall('>', testdata, testval , nosimd=True)\n\t\tself.assertFalse(result)\n\t\tself.assertEqual(result, expected)", "title": "" }, { "docid": "59180a764d8e468e2d69184066b630ba", "score": "0.67289627", "text": "def test_aall_basic_gt_b2(self):\n\t\t# All data values are equal to the test value.\n\t\ttestval = 25700\n\t\tarrayval = testval\n\t\ttestdata = array.array('H', [arrayval] * self.arraylength)\n\n\t\t# Verify test compatibility.\n\t\texpected = all([(x > testval) for x in testdata])\n\t\tself.assertFalse(expected)\n\n\t\t# The actual test.\n\t\tresult = arrayfunc.aall('>', testdata, testval )\n\t\tself.assertFalse(result)\n\t\tself.assertEqual(result, expected)", "title": "" }, { "docid": "59180a764d8e468e2d69184066b630ba", "score": "0.67289627", "text": "def test_aall_basic_gt_b2(self):\n\t\t# All data values are equal to the test value.\n\t\ttestval = 25700\n\t\tarrayval = testval\n\t\ttestdata = array.array('H', [arrayval] * self.arraylength)\n\n\t\t# Verify test compatibility.\n\t\texpected = all([(x > testval) for x in testdata])\n\t\tself.assertFalse(expected)\n\n\t\t# The actual test.\n\t\tresult = arrayfunc.aall('>', testdata, testval )\n\t\tself.assertFalse(result)\n\t\tself.assertEqual(result, expected)", "title": "" }, { "docid": "d0ea55eda65d88db741603f54c05b4e8", "score": "0.67261827", "text": "def test_aall_basic_gt_b2(self):\n\t\t# All data values are equal to the test value.\n\t\ttestval = 25000.0\n\t\tarrayval = testval\n\t\ttestdata = array.array('f', [arrayval] * self.arraylength)\n\n\t\t# Verify test compatibility.\n\t\texpected = all([(x > testval) for x in testdata])\n\t\tself.assertFalse(expected)\n\n\t\t# The actual test.\n\t\tresult = arrayfunc.aall('>', testdata, testval , nosimd=True)\n\t\tself.assertFalse(result)\n\t\tself.assertEqual(result, expected)", "title": "" }, { "docid": "d0ea55eda65d88db741603f54c05b4e8", "score": "0.67261827", "text": "def test_aall_basic_gt_b2(self):\n\t\t# All data values are equal to the test value.\n\t\ttestval = 25000.0\n\t\tarrayval = testval\n\t\ttestdata = array.array('f', [arrayval] * self.arraylength)\n\n\t\t# Verify test compatibility.\n\t\texpected = all([(x > testval) for x in testdata])\n\t\tself.assertFalse(expected)\n\n\t\t# The actual test.\n\t\tresult = arrayfunc.aall('>', testdata, testval , nosimd=True)\n\t\tself.assertFalse(result)\n\t\tself.assertEqual(result, expected)", "title": "" }, { "docid": "4627e4ef558879796d856050e5b52c2a", "score": "0.6721413", "text": "def test_aall_basic_gt_b2(self):\n\t\t# All data values are equal to the test value.\n\t\ttestval = -25850\n\t\tarrayval = testval\n\t\ttestdata = array.array('h', [arrayval] * self.arraylength)\n\n\t\t# Verify test compatibility.\n\t\texpected = all([(x > testval) for x in testdata])\n\t\tself.assertFalse(expected)\n\n\t\t# The actual test.\n\t\tresult = arrayfunc.aall('>', testdata, testval , nosimd=True)\n\t\tself.assertFalse(result)\n\t\tself.assertEqual(result, expected)", "title": "" }, { "docid": "4627e4ef558879796d856050e5b52c2a", "score": "0.6721413", "text": "def test_aall_basic_gt_b2(self):\n\t\t# All data values are equal to the test value.\n\t\ttestval = -25850\n\t\tarrayval = testval\n\t\ttestdata = array.array('h', [arrayval] * self.arraylength)\n\n\t\t# Verify test compatibility.\n\t\texpected = all([(x > testval) for x in testdata])\n\t\tself.assertFalse(expected)\n\n\t\t# The actual test.\n\t\tresult = arrayfunc.aall('>', testdata, testval , nosimd=True)\n\t\tself.assertFalse(result)\n\t\tself.assertEqual(result, expected)", "title": "" }, { "docid": "532a1d801ad0d83f418f7677eca49178", "score": "0.67177767", "text": "def test_aall_neginf_gt_b1(self):\n\t\t# All data and test values match.\n\t\ttestval = -math.inf\n\t\tarrayval = 100.0\n\t\ttestdata = array.array('f', [arrayval] * self.arraylength)\n\n\t\t# Verify test compatibility.\n\t\texpected = all([(x > testval) for x in testdata])\n\n\t\t# The actual test.\n\t\tresult = arrayfunc.aall('>', testdata, testval , nosimd=True)\n\t\tself.assertTrue(result)\n\t\tself.assertEqual(result, expected)", "title": "" }, { "docid": "532a1d801ad0d83f418f7677eca49178", "score": "0.67177767", "text": "def test_aall_neginf_gt_b1(self):\n\t\t# All data and test values match.\n\t\ttestval = -math.inf\n\t\tarrayval = 100.0\n\t\ttestdata = array.array('f', [arrayval] * self.arraylength)\n\n\t\t# Verify test compatibility.\n\t\texpected = all([(x > testval) for x in testdata])\n\n\t\t# The actual test.\n\t\tresult = arrayfunc.aall('>', testdata, testval , nosimd=True)\n\t\tself.assertTrue(result)\n\t\tself.assertEqual(result, expected)", "title": "" }, { "docid": "a8a81818c7b635f0b36f7fa438df018b", "score": "0.6707076", "text": "def test_aall_neginf_gt_b2(self):\n\t\t# All data and test values fail.\n\t\ttestval = 100.0\n\t\tarrayval = -math.inf\n\t\ttestdata = array.array('d', [arrayval] * self.arraylength)\n\n\t\t# Verify test compatibility.\n\t\texpected = all([(x > testval) for x in testdata])\n\n\t\t# The actual test.\n\t\tresult = arrayfunc.aall('>', testdata, testval )\n\t\tself.assertFalse(result)\n\t\tself.assertEqual(result, expected)", "title": "" }, { "docid": "a8a81818c7b635f0b36f7fa438df018b", "score": "0.6707076", "text": "def test_aall_neginf_gt_b2(self):\n\t\t# All data and test values fail.\n\t\ttestval = 100.0\n\t\tarrayval = -math.inf\n\t\ttestdata = array.array('d', [arrayval] * self.arraylength)\n\n\t\t# Verify test compatibility.\n\t\texpected = all([(x > testval) for x in testdata])\n\n\t\t# The actual test.\n\t\tresult = arrayfunc.aall('>', testdata, testval )\n\t\tself.assertFalse(result)\n\t\tself.assertEqual(result, expected)", "title": "" }, { "docid": "911b8e4d5e0e1f758388ea80ef13913f", "score": "0.6694107", "text": "def test_aall_basic_gt_b2(self):\n\t\t# All data values are equal to the test value.\n\t\ttestval = 842150449.0\n\t\tarrayval = testval\n\t\ttestdata = array.array('d', [arrayval] * self.arraylength)\n\n\t\t# Verify test compatibility.\n\t\texpected = all([(x > testval) for x in testdata])\n\t\tself.assertFalse(expected)\n\n\t\t# The actual test.\n\t\tresult = arrayfunc.aall('>', testdata, testval , nosimd=True)\n\t\tself.assertFalse(result)\n\t\tself.assertEqual(result, expected)", "title": "" }, { "docid": "911b8e4d5e0e1f758388ea80ef13913f", "score": "0.6694107", "text": "def test_aall_basic_gt_b2(self):\n\t\t# All data values are equal to the test value.\n\t\ttestval = 842150449.0\n\t\tarrayval = testval\n\t\ttestdata = array.array('d', [arrayval] * self.arraylength)\n\n\t\t# Verify test compatibility.\n\t\texpected = all([(x > testval) for x in testdata])\n\t\tself.assertFalse(expected)\n\n\t\t# The actual test.\n\t\tresult = arrayfunc.aall('>', testdata, testval , nosimd=True)\n\t\tself.assertFalse(result)\n\t\tself.assertEqual(result, expected)", "title": "" }, { "docid": "94f5b1eec837033708b22bccfdee1880", "score": "0.6687951", "text": "def test_aall_basic_gt_b2(self):\n\t\t# All data values are equal to the test value.\n\t\ttestval = -1694498815\n\t\tarrayval = testval\n\t\ttestdata = array.array('i', [arrayval] * self.arraylength)\n\n\t\t# Verify test compatibility.\n\t\texpected = all([(x > testval) for x in testdata])\n\t\tself.assertFalse(expected)\n\n\t\t# The actual test.\n\t\tresult = arrayfunc.aall('>', testdata, testval , nosimd=True)\n\t\tself.assertFalse(result)\n\t\tself.assertEqual(result, expected)", "title": "" }, { "docid": "94f5b1eec837033708b22bccfdee1880", "score": "0.6687951", "text": "def test_aall_basic_gt_b2(self):\n\t\t# All data values are equal to the test value.\n\t\ttestval = -1694498815\n\t\tarrayval = testval\n\t\ttestdata = array.array('i', [arrayval] * self.arraylength)\n\n\t\t# Verify test compatibility.\n\t\texpected = all([(x > testval) for x in testdata])\n\t\tself.assertFalse(expected)\n\n\t\t# The actual test.\n\t\tresult = arrayfunc.aall('>', testdata, testval , nosimd=True)\n\t\tself.assertFalse(result)\n\t\tself.assertEqual(result, expected)", "title": "" }, { "docid": "8c7643fb182f35014d77e097004e9ad6", "score": "0.66854566", "text": "def test_aall_basic_gt_b2(self):\n\t\t# All data values are equal to the test value.\n\t\ttestval = 3508498382646718642\n\t\tarrayval = testval\n\t\ttestdata = array.array('Q', [arrayval] * self.arraylength)\n\n\t\t# Verify test compatibility.\n\t\texpected = all([(x > testval) for x in testdata])\n\t\tself.assertFalse(expected)\n\n\t\t# The actual test.\n\t\tresult = arrayfunc.aall('>', testdata, testval , nosimd=True)\n\t\tself.assertFalse(result)\n\t\tself.assertEqual(result, expected)", "title": "" }, { "docid": "8c7643fb182f35014d77e097004e9ad6", "score": "0.6684972", "text": "def test_aall_basic_gt_b2(self):\n\t\t# All data values are equal to the test value.\n\t\ttestval = 3508498382646718642\n\t\tarrayval = testval\n\t\ttestdata = array.array('Q', [arrayval] * self.arraylength)\n\n\t\t# Verify test compatibility.\n\t\texpected = all([(x > testval) for x in testdata])\n\t\tself.assertFalse(expected)\n\n\t\t# The actual test.\n\t\tresult = arrayfunc.aall('>', testdata, testval , nosimd=True)\n\t\tself.assertFalse(result)\n\t\tself.assertEqual(result, expected)", "title": "" }, { "docid": "168de8d3ffe3f61759b8491862a63dc0", "score": "0.6676877", "text": "def test_aall_basic_gt_b2(self):\n\t\t# All data values are equal to the test value.\n\t\ttestval = 100\n\t\tarrayval = testval\n\t\ttestdata = array.array('H', [arrayval] * self.arraylength)\n\n\t\t# Verify test compatibility.\n\t\texpected = all([(x > testval) for x in testdata])\n\t\tself.assertFalse(expected)\n\n\t\t# The actual test.\n\t\tresult = arrayfunc.aall('>', testdata, testval , nosimd=True)\n\t\tself.assertFalse(result)\n\t\tself.assertEqual(result, expected)", "title": "" }, { "docid": "168de8d3ffe3f61759b8491862a63dc0", "score": "0.6676877", "text": "def test_aall_basic_gt_b2(self):\n\t\t# All data values are equal to the test value.\n\t\ttestval = 100\n\t\tarrayval = testval\n\t\ttestdata = array.array('H', [arrayval] * self.arraylength)\n\n\t\t# Verify test compatibility.\n\t\texpected = all([(x > testval) for x in testdata])\n\t\tself.assertFalse(expected)\n\n\t\t# The actual test.\n\t\tresult = arrayfunc.aall('>', testdata, testval , nosimd=True)\n\t\tself.assertFalse(result)\n\t\tself.assertEqual(result, expected)", "title": "" }, { "docid": "cdd6c978e56e934ebeaf6859fe13f8dd", "score": "0.6675503", "text": "def test_aall_basic_gt_b2(self):\n\t\t# All data values are equal to the test value.\n\t\ttestval = 25000\n\t\tarrayval = testval\n\t\ttestdata = array.array('h', [arrayval] * self.arraylength)\n\n\t\t# Verify test compatibility.\n\t\texpected = all([(x > testval) for x in testdata])\n\t\tself.assertFalse(expected)\n\n\t\t# The actual test.\n\t\tresult = arrayfunc.aall('>', testdata, testval )\n\t\tself.assertFalse(result)\n\t\tself.assertEqual(result, expected)", "title": "" }, { "docid": "cdd6c978e56e934ebeaf6859fe13f8dd", "score": "0.6675503", "text": "def test_aall_basic_gt_b2(self):\n\t\t# All data values are equal to the test value.\n\t\ttestval = 25000\n\t\tarrayval = testval\n\t\ttestdata = array.array('h', [arrayval] * self.arraylength)\n\n\t\t# Verify test compatibility.\n\t\texpected = all([(x > testval) for x in testdata])\n\t\tself.assertFalse(expected)\n\n\t\t# The actual test.\n\t\tresult = arrayfunc.aall('>', testdata, testval )\n\t\tself.assertFalse(result)\n\t\tself.assertEqual(result, expected)", "title": "" }, { "docid": "f9145b6ffb9a97812c10ff87df521431", "score": "0.6666108", "text": "def test_aall_basic_gt_b2(self):\n\t\t# All data values are equal to the test value.\n\t\ttestval = 7262497666814784100\n\t\tarrayval = testval\n\t\ttestdata = array.array('q', [arrayval] * self.arraylength)\n\n\t\t# Verify test compatibility.\n\t\texpected = all([(x > testval) for x in testdata])\n\t\tself.assertFalse(expected)\n\n\t\t# The actual test.\n\t\tresult = arrayfunc.aall('>', testdata, testval , nosimd=True)\n\t\tself.assertFalse(result)\n\t\tself.assertEqual(result, expected)", "title": "" }, { "docid": "f9145b6ffb9a97812c10ff87df521431", "score": "0.6666108", "text": "def test_aall_basic_gt_b2(self):\n\t\t# All data values are equal to the test value.\n\t\ttestval = 7262497666814784100\n\t\tarrayval = testval\n\t\ttestdata = array.array('q', [arrayval] * self.arraylength)\n\n\t\t# Verify test compatibility.\n\t\texpected = all([(x > testval) for x in testdata])\n\t\tself.assertFalse(expected)\n\n\t\t# The actual test.\n\t\tresult = arrayfunc.aall('>', testdata, testval , nosimd=True)\n\t\tself.assertFalse(result)\n\t\tself.assertEqual(result, expected)", "title": "" }, { "docid": "756279dbd7cc290aab32b7501faf8394", "score": "0.6665809", "text": "def test_aall_basic_gt_b2(self):\n\t\t# All data values are equal to the test value.\n\t\ttestval = -1694498815\n\t\tarrayval = testval\n\t\ttestdata = array.array('l', [arrayval] * self.arraylength)\n\n\t\t# Verify test compatibility.\n\t\texpected = all([(x > testval) for x in testdata])\n\t\tself.assertFalse(expected)\n\n\t\t# The actual test.\n\t\tresult = arrayfunc.aall('>', testdata, testval , nosimd=True)\n\t\tself.assertFalse(result)\n\t\tself.assertEqual(result, expected)", "title": "" }, { "docid": "756279dbd7cc290aab32b7501faf8394", "score": "0.6665809", "text": "def test_aall_basic_gt_b2(self):\n\t\t# All data values are equal to the test value.\n\t\ttestval = -1694498815\n\t\tarrayval = testval\n\t\ttestdata = array.array('l', [arrayval] * self.arraylength)\n\n\t\t# Verify test compatibility.\n\t\texpected = all([(x > testval) for x in testdata])\n\t\tself.assertFalse(expected)\n\n\t\t# The actual test.\n\t\tresult = arrayfunc.aall('>', testdata, testval , nosimd=True)\n\t\tself.assertFalse(result)\n\t\tself.assertEqual(result, expected)", "title": "" }, { "docid": "cf823eebd8d1dda966ede8a871d33d6c", "score": "0.6664166", "text": "def test_aall_basic_lt_d2(self):\n\t\t# All data values are equal to the test value.\n\t\ttestval = 25000\n\t\tarrayval = testval\n\t\ttestdata = array.array('h', [arrayval] * self.arraylength)\n\n\t\t# Verify test compatibility.\n\t\texpected = all([(x < testval) for x in testdata])\n\t\tself.assertFalse(expected)\n\n\t\t# The actual test.\n\t\tresult = arrayfunc.aall('<', testdata, testval , nosimd=True)\n\t\tself.assertFalse(result)\n\t\tself.assertEqual(result, expected)", "title": "" }, { "docid": "cf823eebd8d1dda966ede8a871d33d6c", "score": "0.6664166", "text": "def test_aall_basic_lt_d2(self):\n\t\t# All data values are equal to the test value.\n\t\ttestval = 25000\n\t\tarrayval = testval\n\t\ttestdata = array.array('h', [arrayval] * self.arraylength)\n\n\t\t# Verify test compatibility.\n\t\texpected = all([(x < testval) for x in testdata])\n\t\tself.assertFalse(expected)\n\n\t\t# The actual test.\n\t\tresult = arrayfunc.aall('<', testdata, testval , nosimd=True)\n\t\tself.assertFalse(result)\n\t\tself.assertEqual(result, expected)", "title": "" }, { "docid": "d0fd9cdfdd336df2319c085c39ca9ef8", "score": "0.66631246", "text": "def test_aall_basic_lt_d2(self):\n\t\t# All data values are equal to the test value.\n\t\ttestval = 1633771875\n\t\tarrayval = testval\n\t\ttestdata = array.array('I', [arrayval] * self.arraylength)\n\n\t\t# Verify test compatibility.\n\t\texpected = all([(x < testval) for x in testdata])\n\t\tself.assertFalse(expected)\n\n\t\t# The actual test.\n\t\tresult = arrayfunc.aall('<', testdata, testval , nosimd=True)\n\t\tself.assertFalse(result)\n\t\tself.assertEqual(result, expected)", "title": "" }, { "docid": "d0fd9cdfdd336df2319c085c39ca9ef8", "score": "0.6662836", "text": "def test_aall_basic_lt_d2(self):\n\t\t# All data values are equal to the test value.\n\t\ttestval = 1633771875\n\t\tarrayval = testval\n\t\ttestdata = array.array('I', [arrayval] * self.arraylength)\n\n\t\t# Verify test compatibility.\n\t\texpected = all([(x < testval) for x in testdata])\n\t\tself.assertFalse(expected)\n\n\t\t# The actual test.\n\t\tresult = arrayfunc.aall('<', testdata, testval , nosimd=True)\n\t\tself.assertFalse(result)\n\t\tself.assertEqual(result, expected)", "title": "" }, { "docid": "2e5606bb549a80b7ba01b5ab454a3a7f", "score": "0.6655907", "text": "def test_aall_basic_gt_b2(self):\n\t\t# All data values are equal to the test value.\n\t\ttestval = -25850.0\n\t\tarrayval = testval\n\t\ttestdata = array.array('f', [arrayval] * self.arraylength)\n\n\t\t# Verify test compatibility.\n\t\texpected = all([(x > testval) for x in testdata])\n\t\tself.assertFalse(expected)\n\n\t\t# The actual test.\n\t\tresult = arrayfunc.aall('>', testdata, testval , nosimd=True)\n\t\tself.assertFalse(result)\n\t\tself.assertEqual(result, expected)", "title": "" }, { "docid": "2e5606bb549a80b7ba01b5ab454a3a7f", "score": "0.6655907", "text": "def test_aall_basic_gt_b2(self):\n\t\t# All data values are equal to the test value.\n\t\ttestval = -25850.0\n\t\tarrayval = testval\n\t\ttestdata = array.array('f', [arrayval] * self.arraylength)\n\n\t\t# Verify test compatibility.\n\t\texpected = all([(x > testval) for x in testdata])\n\t\tself.assertFalse(expected)\n\n\t\t# The actual test.\n\t\tresult = arrayfunc.aall('>', testdata, testval , nosimd=True)\n\t\tself.assertFalse(result)\n\t\tself.assertEqual(result, expected)", "title": "" }, { "docid": "51c513031f8f135ca9905ca1d495f4e7", "score": "0.6654366", "text": "def test_aall_basic_gt_b2(self):\n\t\t# All data values are equal to the test value.\n\t\ttestval = 100\n\t\tarrayval = testval\n\t\ttestdata = array.array('h', [arrayval] * self.arraylength)\n\n\t\t# Verify test compatibility.\n\t\texpected = all([(x > testval) for x in testdata])\n\t\tself.assertFalse(expected)\n\n\t\t# The actual test.\n\t\tresult = arrayfunc.aall('>', testdata, testval , nosimd=True)\n\t\tself.assertFalse(result)\n\t\tself.assertEqual(result, expected)", "title": "" }, { "docid": "51c513031f8f135ca9905ca1d495f4e7", "score": "0.6654366", "text": "def test_aall_basic_gt_b2(self):\n\t\t# All data values are equal to the test value.\n\t\ttestval = 100\n\t\tarrayval = testval\n\t\ttestdata = array.array('h', [arrayval] * self.arraylength)\n\n\t\t# Verify test compatibility.\n\t\texpected = all([(x > testval) for x in testdata])\n\t\tself.assertFalse(expected)\n\n\t\t# The actual test.\n\t\tresult = arrayfunc.aall('>', testdata, testval , nosimd=True)\n\t\tself.assertFalse(result)\n\t\tself.assertEqual(result, expected)", "title": "" }, { "docid": "8a9ef419a9b94b7f48c646b418754010", "score": "0.66505176", "text": "def test_aall_basic_gt_b2(self):\n\t\t# All data values are equal to the test value.\n\t\ttestval = -1694498815.0\n\t\tarrayval = testval\n\t\ttestdata = array.array('d', [arrayval] * self.arraylength)\n\n\t\t# Verify test compatibility.\n\t\texpected = all([(x > testval) for x in testdata])\n\t\tself.assertFalse(expected)\n\n\t\t# The actual test.\n\t\tresult = arrayfunc.aall('>', testdata, testval , nosimd=True)\n\t\tself.assertFalse(result)\n\t\tself.assertEqual(result, expected)", "title": "" }, { "docid": "8a9ef419a9b94b7f48c646b418754010", "score": "0.66505176", "text": "def test_aall_basic_gt_b2(self):\n\t\t# All data values are equal to the test value.\n\t\ttestval = -1694498815.0\n\t\tarrayval = testval\n\t\ttestdata = array.array('d', [arrayval] * self.arraylength)\n\n\t\t# Verify test compatibility.\n\t\texpected = all([(x > testval) for x in testdata])\n\t\tself.assertFalse(expected)\n\n\t\t# The actual test.\n\t\tresult = arrayfunc.aall('>', testdata, testval , nosimd=True)\n\t\tself.assertFalse(result)\n\t\tself.assertEqual(result, expected)", "title": "" }, { "docid": "52d22a4ef1db667227945c3c38f824eb", "score": "0.664853", "text": "def test_aall_basic_gt_b1(self):\n\t\t# All data values are greater than the test value.\n\t\ttestval = 1633771875\n\t\tarrayval = 1633771876\n\t\ttestdata = array.array('I', [arrayval] * self.arraylength)\n\n\t\t# Verify test compatibility.\n\t\texpected = all([(x > testval) for x in testdata])\n\t\tself.assertTrue(expected)\n\n\t\t# The actual test.\n\t\tresult = arrayfunc.aall('>', testdata, testval , nosimd=True)\n\t\tself.assertTrue(result)\n\t\tself.assertEqual(result, expected)", "title": "" }, { "docid": "52d22a4ef1db667227945c3c38f824eb", "score": "0.6647227", "text": "def test_aall_basic_gt_b1(self):\n\t\t# All data values are greater than the test value.\n\t\ttestval = 1633771875\n\t\tarrayval = 1633771876\n\t\ttestdata = array.array('I', [arrayval] * self.arraylength)\n\n\t\t# Verify test compatibility.\n\t\texpected = all([(x > testval) for x in testdata])\n\t\tself.assertTrue(expected)\n\n\t\t# The actual test.\n\t\tresult = arrayfunc.aall('>', testdata, testval , nosimd=True)\n\t\tself.assertTrue(result)\n\t\tself.assertEqual(result, expected)", "title": "" }, { "docid": "a2aeb13da2200df944cc8a053471bf6a", "score": "0.66453093", "text": "def test_aall_basic_gt_b2(self):\n\t\t# All data values are equal to the test value.\n\t\ttestval = 7234017283807667300\n\t\tarrayval = testval\n\t\ttestdata = array.array('Q', [arrayval] * self.arraylength)\n\n\t\t# Verify test compatibility.\n\t\texpected = all([(x > testval) for x in testdata])\n\t\tself.assertFalse(expected)\n\n\t\t# The actual test.\n\t\tresult = arrayfunc.aall('>', testdata, testval , nosimd=True)\n\t\tself.assertFalse(result)\n\t\tself.assertEqual(result, expected)", "title": "" }, { "docid": "a2aeb13da2200df944cc8a053471bf6a", "score": "0.66438323", "text": "def test_aall_basic_gt_b2(self):\n\t\t# All data values are equal to the test value.\n\t\ttestval = 7234017283807667300\n\t\tarrayval = testval\n\t\ttestdata = array.array('Q', [arrayval] * self.arraylength)\n\n\t\t# Verify test compatibility.\n\t\texpected = all([(x > testval) for x in testdata])\n\t\tself.assertFalse(expected)\n\n\t\t# The actual test.\n\t\tresult = arrayfunc.aall('>', testdata, testval , nosimd=True)\n\t\tself.assertFalse(result)\n\t\tself.assertEqual(result, expected)", "title": "" }, { "docid": "9c4160ab7db5a4e750ae331bfec83148", "score": "0.66354287", "text": "def test_aall_basic_gt_b2(self):\n\t\t# All data values are equal to the test value.\n\t\ttestval = 100\n\t\tarrayval = testval\n\t\ttestdata = array.array('I', [arrayval] * self.arraylength)\n\n\t\t# Verify test compatibility.\n\t\texpected = all([(x > testval) for x in testdata])\n\t\tself.assertFalse(expected)\n\n\t\t# The actual test.\n\t\tresult = arrayfunc.aall('>', testdata, testval , nosimd=True)\n\t\tself.assertFalse(result)\n\t\tself.assertEqual(result, expected)", "title": "" }, { "docid": "9c4160ab7db5a4e750ae331bfec83148", "score": "0.66354287", "text": "def test_aall_basic_gt_b2(self):\n\t\t# All data values are equal to the test value.\n\t\ttestval = 100\n\t\tarrayval = testval\n\t\ttestdata = array.array('I', [arrayval] * self.arraylength)\n\n\t\t# Verify test compatibility.\n\t\texpected = all([(x > testval) for x in testdata])\n\t\tself.assertFalse(expected)\n\n\t\t# The actual test.\n\t\tresult = arrayfunc.aall('>', testdata, testval , nosimd=True)\n\t\tself.assertFalse(result)\n\t\tself.assertEqual(result, expected)", "title": "" }, { "docid": "ced67b6f261058dc6019631c61c34575", "score": "0.66295767", "text": "def test_aall_basic_gt_b1(self):\n\t\t# All data values are greater than the test value.\n\t\ttestval = 1633771875\n\t\tarrayval = 1633771876\n\t\ttestdata = array.array('L', [arrayval] * self.arraylength)\n\n\t\t# Verify test compatibility.\n\t\texpected = all([(x > testval) for x in testdata])\n\t\tself.assertTrue(expected)\n\n\t\t# The actual test.\n\t\tresult = arrayfunc.aall('>', testdata, testval , nosimd=True)\n\t\tself.assertTrue(result)\n\t\tself.assertEqual(result, expected)", "title": "" }, { "docid": "ced67b6f261058dc6019631c61c34575", "score": "0.66295767", "text": "def test_aall_basic_gt_b1(self):\n\t\t# All data values are greater than the test value.\n\t\ttestval = 1633771875\n\t\tarrayval = 1633771876\n\t\ttestdata = array.array('L', [arrayval] * self.arraylength)\n\n\t\t# Verify test compatibility.\n\t\texpected = all([(x > testval) for x in testdata])\n\t\tself.assertTrue(expected)\n\n\t\t# The actual test.\n\t\tresult = arrayfunc.aall('>', testdata, testval , nosimd=True)\n\t\tself.assertTrue(result)\n\t\tself.assertEqual(result, expected)", "title": "" }, { "docid": "07433de0be88584e95ced6f50f713c1d", "score": "0.6627973", "text": "def test_aall_basic_lt_d2(self):\n\t\t# All data values are equal to the test value.\n\t\ttestval = 25700\n\t\tarrayval = testval\n\t\ttestdata = array.array('H', [arrayval] * self.arraylength)\n\n\t\t# Verify test compatibility.\n\t\texpected = all([(x < testval) for x in testdata])\n\t\tself.assertFalse(expected)\n\n\t\t# The actual test.\n\t\tresult = arrayfunc.aall('<', testdata, testval , nosimd=True)\n\t\tself.assertFalse(result)\n\t\tself.assertEqual(result, expected)", "title": "" }, { "docid": "07433de0be88584e95ced6f50f713c1d", "score": "0.6627973", "text": "def test_aall_basic_lt_d2(self):\n\t\t# All data values are equal to the test value.\n\t\ttestval = 25700\n\t\tarrayval = testval\n\t\ttestdata = array.array('H', [arrayval] * self.arraylength)\n\n\t\t# Verify test compatibility.\n\t\texpected = all([(x < testval) for x in testdata])\n\t\tself.assertFalse(expected)\n\n\t\t# The actual test.\n\t\tresult = arrayfunc.aall('<', testdata, testval , nosimd=True)\n\t\tself.assertFalse(result)\n\t\tself.assertEqual(result, expected)", "title": "" }, { "docid": "726a8dac64b93dbc623d9c1907946a69", "score": "0.6627895", "text": "def test_aall_neginf_gt_b2(self):\n\t\t# All data and test values fail.\n\t\ttestval = 100.0\n\t\tarrayval = -math.inf\n\t\ttestdata = array.array('f', [arrayval] * self.arraylength)\n\n\t\t# Verify test compatibility.\n\t\texpected = all([(x > testval) for x in testdata])\n\n\t\t# The actual test.\n\t\tresult = arrayfunc.aall('>', testdata, testval )\n\t\tself.assertFalse(result)\n\t\tself.assertEqual(result, expected)", "title": "" }, { "docid": "726a8dac64b93dbc623d9c1907946a69", "score": "0.6627895", "text": "def test_aall_neginf_gt_b2(self):\n\t\t# All data and test values fail.\n\t\ttestval = 100.0\n\t\tarrayval = -math.inf\n\t\ttestdata = array.array('f', [arrayval] * self.arraylength)\n\n\t\t# Verify test compatibility.\n\t\texpected = all([(x > testval) for x in testdata])\n\n\t\t# The actual test.\n\t\tresult = arrayfunc.aall('>', testdata, testval )\n\t\tself.assertFalse(result)\n\t\tself.assertEqual(result, expected)", "title": "" }, { "docid": "04558121311ab507675ff843d1f7beed", "score": "0.6622324", "text": "def test_aall_basic_gt_b2(self):\n\t\t# All data values are equal to the test value.\n\t\ttestval = 1684300900\n\t\tarrayval = testval\n\t\ttestdata = array.array('I', [arrayval] * self.arraylength)\n\n\t\t# Verify test compatibility.\n\t\texpected = all([(x > testval) for x in testdata])\n\t\tself.assertFalse(expected)\n\n\t\t# The actual test.\n\t\tresult = arrayfunc.aall('>', testdata, testval )\n\t\tself.assertFalse(result)\n\t\tself.assertEqual(result, expected)", "title": "" }, { "docid": "04558121311ab507675ff843d1f7beed", "score": "0.6622324", "text": "def test_aall_basic_gt_b2(self):\n\t\t# All data values are equal to the test value.\n\t\ttestval = 1684300900\n\t\tarrayval = testval\n\t\ttestdata = array.array('I', [arrayval] * self.arraylength)\n\n\t\t# Verify test compatibility.\n\t\texpected = all([(x > testval) for x in testdata])\n\t\tself.assertFalse(expected)\n\n\t\t# The actual test.\n\t\tresult = arrayfunc.aall('>', testdata, testval )\n\t\tself.assertFalse(result)\n\t\tself.assertEqual(result, expected)", "title": "" }, { "docid": "f27a29bc39b0b22bb7f8678802bac30d", "score": "0.6618444", "text": "def test_aall_basic_gt_b2(self):\n\t\t# All data values are equal to the test value.\n\t\ttestval = 100\n\t\tarrayval = testval\n\t\ttestdata = array.array('i', [arrayval] * self.arraylength)\n\n\t\t# Verify test compatibility.\n\t\texpected = all([(x > testval) for x in testdata])\n\t\tself.assertFalse(expected)\n\n\t\t# The actual test.\n\t\tresult = arrayfunc.aall('>', testdata, testval , nosimd=True)\n\t\tself.assertFalse(result)\n\t\tself.assertEqual(result, expected)", "title": "" }, { "docid": "f27a29bc39b0b22bb7f8678802bac30d", "score": "0.6618444", "text": "def test_aall_basic_gt_b2(self):\n\t\t# All data values are equal to the test value.\n\t\ttestval = 100\n\t\tarrayval = testval\n\t\ttestdata = array.array('i', [arrayval] * self.arraylength)\n\n\t\t# Verify test compatibility.\n\t\texpected = all([(x > testval) for x in testdata])\n\t\tself.assertFalse(expected)\n\n\t\t# The actual test.\n\t\tresult = arrayfunc.aall('>', testdata, testval , nosimd=True)\n\t\tself.assertFalse(result)\n\t\tself.assertEqual(result, expected)", "title": "" }, { "docid": "3d43d5a64f9dde1c5d82fd7f753a1652", "score": "0.66143984", "text": "def test_aall_basic_gt_b2(self):\n\t\t# All data values are equal to the test value.\n\t\ttestval = 100\n\t\tarrayval = testval\n\t\ttestdata = array.array('L', [arrayval] * self.arraylength)\n\n\t\t# Verify test compatibility.\n\t\texpected = all([(x > testval) for x in testdata])\n\t\tself.assertFalse(expected)\n\n\t\t# The actual test.\n\t\tresult = arrayfunc.aall('>', testdata, testval , nosimd=True)\n\t\tself.assertFalse(result)\n\t\tself.assertEqual(result, expected)", "title": "" }, { "docid": "3d43d5a64f9dde1c5d82fd7f753a1652", "score": "0.66143984", "text": "def test_aall_basic_gt_b2(self):\n\t\t# All data values are equal to the test value.\n\t\ttestval = 100\n\t\tarrayval = testval\n\t\ttestdata = array.array('L', [arrayval] * self.arraylength)\n\n\t\t# Verify test compatibility.\n\t\texpected = all([(x > testval) for x in testdata])\n\t\tself.assertFalse(expected)\n\n\t\t# The actual test.\n\t\tresult = arrayfunc.aall('>', testdata, testval , nosimd=True)\n\t\tself.assertFalse(result)\n\t\tself.assertEqual(result, expected)", "title": "" }, { "docid": "63d55276074c2347c936e37be028b02f", "score": "0.66035485", "text": "def test_aall_basic_gt_b1(self):\n\t\t# All data values are greater than the test value.\n\t\ttestval = 25700\n\t\tarrayval = 37265\n\t\ttestdata = array.array('H', [arrayval] * self.arraylength)\n\n\t\t# Verify test compatibility.\n\t\texpected = all([(x > testval) for x in testdata])\n\t\tself.assertTrue(expected)\n\n\t\t# The actual test.\n\t\tresult = arrayfunc.aall('>', testdata, testval , nosimd=True)\n\t\tself.assertTrue(result)\n\t\tself.assertEqual(result, expected)", "title": "" }, { "docid": "63d55276074c2347c936e37be028b02f", "score": "0.66035485", "text": "def test_aall_basic_gt_b1(self):\n\t\t# All data values are greater than the test value.\n\t\ttestval = 25700\n\t\tarrayval = 37265\n\t\ttestdata = array.array('H', [arrayval] * self.arraylength)\n\n\t\t# Verify test compatibility.\n\t\texpected = all([(x > testval) for x in testdata])\n\t\tself.assertTrue(expected)\n\n\t\t# The actual test.\n\t\tresult = arrayfunc.aall('>', testdata, testval , nosimd=True)\n\t\tself.assertTrue(result)\n\t\tself.assertEqual(result, expected)", "title": "" }, { "docid": "579d066fc6f968dcf8cf46bc7dd4ec3f", "score": "0.66025686", "text": "def test_aall_basic_lt_d2(self):\n\t\t# All data values are equal to the test value.\n\t\ttestval = 1633771875\n\t\tarrayval = testval\n\t\ttestdata = array.array('L', [arrayval] * self.arraylength)\n\n\t\t# Verify test compatibility.\n\t\texpected = all([(x < testval) for x in testdata])\n\t\tself.assertFalse(expected)\n\n\t\t# The actual test.\n\t\tresult = arrayfunc.aall('<', testdata, testval , nosimd=True)\n\t\tself.assertFalse(result)\n\t\tself.assertEqual(result, expected)", "title": "" }, { "docid": "579d066fc6f968dcf8cf46bc7dd4ec3f", "score": "0.66025686", "text": "def test_aall_basic_lt_d2(self):\n\t\t# All data values are equal to the test value.\n\t\ttestval = 1633771875\n\t\tarrayval = testval\n\t\ttestdata = array.array('L', [arrayval] * self.arraylength)\n\n\t\t# Verify test compatibility.\n\t\texpected = all([(x < testval) for x in testdata])\n\t\tself.assertFalse(expected)\n\n\t\t# The actual test.\n\t\tresult = arrayfunc.aall('<', testdata, testval , nosimd=True)\n\t\tself.assertFalse(result)\n\t\tself.assertEqual(result, expected)", "title": "" }, { "docid": "4224f56309d64233f0a91ccf4c2960ac", "score": "0.6600486", "text": "def test_aall_basic_gt_b2(self):\n\t\t# All data values are equal to the test value.\n\t\ttestval = 1684300900\n\t\tarrayval = testval\n\t\ttestdata = array.array('L', [arrayval] * self.arraylength)\n\n\t\t# Verify test compatibility.\n\t\texpected = all([(x > testval) for x in testdata])\n\t\tself.assertFalse(expected)\n\n\t\t# The actual test.\n\t\tresult = arrayfunc.aall('>', testdata, testval )\n\t\tself.assertFalse(result)\n\t\tself.assertEqual(result, expected)", "title": "" }, { "docid": "4224f56309d64233f0a91ccf4c2960ac", "score": "0.6600486", "text": "def test_aall_basic_gt_b2(self):\n\t\t# All data values are equal to the test value.\n\t\ttestval = 1684300900\n\t\tarrayval = testval\n\t\ttestdata = array.array('L', [arrayval] * self.arraylength)\n\n\t\t# Verify test compatibility.\n\t\texpected = all([(x > testval) for x in testdata])\n\t\tself.assertFalse(expected)\n\n\t\t# The actual test.\n\t\tresult = arrayfunc.aall('>', testdata, testval )\n\t\tself.assertFalse(result)\n\t\tself.assertEqual(result, expected)", "title": "" }, { "docid": "620f93b704e5dfe89116dd61875d03c3", "score": "0.65989643", "text": "def test_aall_basic_gt_b2(self):\n\t\t# All data values are equal to the test value.\n\t\ttestval = -7393337902558193305\n\t\tarrayval = testval\n\t\ttestdata = array.array('q', [arrayval] * self.arraylength)\n\n\t\t# Verify test compatibility.\n\t\texpected = all([(x > testval) for x in testdata])\n\t\tself.assertFalse(expected)\n\n\t\t# The actual test.\n\t\tresult = arrayfunc.aall('>', testdata, testval , nosimd=True)\n\t\tself.assertFalse(result)\n\t\tself.assertEqual(result, expected)", "title": "" }, { "docid": "620f93b704e5dfe89116dd61875d03c3", "score": "0.65989643", "text": "def test_aall_basic_gt_b2(self):\n\t\t# All data values are equal to the test value.\n\t\ttestval = -7393337902558193305\n\t\tarrayval = testval\n\t\ttestdata = array.array('q', [arrayval] * self.arraylength)\n\n\t\t# Verify test compatibility.\n\t\texpected = all([(x > testval) for x in testdata])\n\t\tself.assertFalse(expected)\n\n\t\t# The actual test.\n\t\tresult = arrayfunc.aall('>', testdata, testval , nosimd=True)\n\t\tself.assertFalse(result)\n\t\tself.assertEqual(result, expected)", "title": "" }, { "docid": "161a1736ce7843d231858389210aa3f1", "score": "0.6597275", "text": "def test_aall_basic_gt_b2(self):\n\t\t# All data values are equal to the test value.\n\t\ttestval = 100\n\t\tarrayval = testval\n\t\ttestdata = array.array('l', [arrayval] * self.arraylength)\n\n\t\t# Verify test compatibility.\n\t\texpected = all([(x > testval) for x in testdata])\n\t\tself.assertFalse(expected)\n\n\t\t# The actual test.\n\t\tresult = arrayfunc.aall('>', testdata, testval , nosimd=True)\n\t\tself.assertFalse(result)\n\t\tself.assertEqual(result, expected)", "title": "" }, { "docid": "161a1736ce7843d231858389210aa3f1", "score": "0.6597275", "text": "def test_aall_basic_gt_b2(self):\n\t\t# All data values are equal to the test value.\n\t\ttestval = 100\n\t\tarrayval = testval\n\t\ttestdata = array.array('l', [arrayval] * self.arraylength)\n\n\t\t# Verify test compatibility.\n\t\texpected = all([(x > testval) for x in testdata])\n\t\tself.assertFalse(expected)\n\n\t\t# The actual test.\n\t\tresult = arrayfunc.aall('>', testdata, testval , nosimd=True)\n\t\tself.assertFalse(result)\n\t\tself.assertEqual(result, expected)", "title": "" }, { "docid": "baebec4c5d05392938fc135cdf08228c", "score": "0.65938777", "text": "def test_aall_basic_gt_b3(self):\n\t\t# One test value near the end of the array is equal to the test value.\n\t\ttestval = 1633771875\n\t\tarrayval = 1633771876\n\t\ttestdata = array.array('I', [arrayval] * self.arraylength)\n\t\ttestdata[-2] = testval\n\n\t\t# Verify test compatibility.\n\t\texpected = all([(x > testval) for x in testdata])\n\t\tself.assertFalse(expected)\n\n\t\t# The actual test.\n\t\tresult = arrayfunc.aall('>', testdata, testval , nosimd=True)\n\t\tself.assertFalse(result)\n\t\tself.assertEqual(result, expected)", "title": "" }, { "docid": "1a2be001203b0e909ca7015aa36aaeea", "score": "0.6593876", "text": "def test_aall_basic_gt_b2(self):\n\t\t# All data values are equal to the test value.\n\t\ttestval = 842150449\n\t\tarrayval = testval\n\t\ttestdata = array.array('i', [arrayval] * self.arraylength)\n\n\t\t# Verify test compatibility.\n\t\texpected = all([(x > testval) for x in testdata])\n\t\tself.assertFalse(expected)\n\n\t\t# The actual test.\n\t\tresult = arrayfunc.aall('>', testdata, testval )\n\t\tself.assertFalse(result)\n\t\tself.assertEqual(result, expected)", "title": "" }, { "docid": "1a2be001203b0e909ca7015aa36aaeea", "score": "0.6593876", "text": "def test_aall_basic_gt_b2(self):\n\t\t# All data values are equal to the test value.\n\t\ttestval = 842150449\n\t\tarrayval = testval\n\t\ttestdata = array.array('i', [arrayval] * self.arraylength)\n\n\t\t# Verify test compatibility.\n\t\texpected = all([(x > testval) for x in testdata])\n\t\tself.assertFalse(expected)\n\n\t\t# The actual test.\n\t\tresult = arrayfunc.aall('>', testdata, testval )\n\t\tself.assertFalse(result)\n\t\tself.assertEqual(result, expected)", "title": "" }, { "docid": "520ce3129c6d8f131dde4d2a8e4588f7", "score": "0.65933424", "text": "def test_aall_basic_gt_b2(self):\n\t\t# All data values are equal to the test value.\n\t\ttestval = 100.0\n\t\tarrayval = testval\n\t\ttestdata = array.array('d', [arrayval] * self.arraylength)\n\n\t\t# Verify test compatibility.\n\t\texpected = all([(x > testval) for x in testdata])\n\t\tself.assertFalse(expected)\n\n\t\t# The actual test.\n\t\tresult = arrayfunc.aall('>', testdata, testval , nosimd=True)\n\t\tself.assertFalse(result)\n\t\tself.assertEqual(result, expected)", "title": "" }, { "docid": "520ce3129c6d8f131dde4d2a8e4588f7", "score": "0.65933424", "text": "def test_aall_basic_gt_b2(self):\n\t\t# All data values are equal to the test value.\n\t\ttestval = 100.0\n\t\tarrayval = testval\n\t\ttestdata = array.array('d', [arrayval] * self.arraylength)\n\n\t\t# Verify test compatibility.\n\t\texpected = all([(x > testval) for x in testdata])\n\t\tself.assertFalse(expected)\n\n\t\t# The actual test.\n\t\tresult = arrayfunc.aall('>', testdata, testval , nosimd=True)\n\t\tself.assertFalse(result)\n\t\tself.assertEqual(result, expected)", "title": "" }, { "docid": "baebec4c5d05392938fc135cdf08228c", "score": "0.6592927", "text": "def test_aall_basic_gt_b3(self):\n\t\t# One test value near the end of the array is equal to the test value.\n\t\ttestval = 1633771875\n\t\tarrayval = 1633771876\n\t\ttestdata = array.array('I', [arrayval] * self.arraylength)\n\t\ttestdata[-2] = testval\n\n\t\t# Verify test compatibility.\n\t\texpected = all([(x > testval) for x in testdata])\n\t\tself.assertFalse(expected)\n\n\t\t# The actual test.\n\t\tresult = arrayfunc.aall('>', testdata, testval , nosimd=True)\n\t\tself.assertFalse(result)\n\t\tself.assertEqual(result, expected)", "title": "" }, { "docid": "b6bc945764f327274af7bd0e5976f809", "score": "0.659044", "text": "def test_aall_basic_gt_b2(self):\n\t\t# All data values are equal to the test value.\n\t\ttestval = 100\n\t\tarrayval = testval\n\t\ttestdata = array.array('Q', [arrayval] * self.arraylength)\n\n\t\t# Verify test compatibility.\n\t\texpected = all([(x > testval) for x in testdata])\n\t\tself.assertFalse(expected)\n\n\t\t# The actual test.\n\t\tresult = arrayfunc.aall('>', testdata, testval , nosimd=True)\n\t\tself.assertFalse(result)\n\t\tself.assertEqual(result, expected)", "title": "" }, { "docid": "b6bc945764f327274af7bd0e5976f809", "score": "0.6589354", "text": "def test_aall_basic_gt_b2(self):\n\t\t# All data values are equal to the test value.\n\t\ttestval = 100\n\t\tarrayval = testval\n\t\ttestdata = array.array('Q', [arrayval] * self.arraylength)\n\n\t\t# Verify test compatibility.\n\t\texpected = all([(x > testval) for x in testdata])\n\t\tself.assertFalse(expected)\n\n\t\t# The actual test.\n\t\tresult = arrayfunc.aall('>', testdata, testval , nosimd=True)\n\t\tself.assertFalse(result)\n\t\tself.assertEqual(result, expected)", "title": "" }, { "docid": "e976c5de4570f907f28cfc8b7050ede7", "score": "0.65815765", "text": "def test_aall_basic_gt_b2(self):\n\t\t# All data values are equal to the test value.\n\t\ttestval = 100.0\n\t\tarrayval = testval\n\t\ttestdata = array.array('f', [arrayval] * self.arraylength)\n\n\t\t# Verify test compatibility.\n\t\texpected = all([(x > testval) for x in testdata])\n\t\tself.assertFalse(expected)\n\n\t\t# The actual test.\n\t\tresult = arrayfunc.aall('>', testdata, testval , nosimd=True)\n\t\tself.assertFalse(result)\n\t\tself.assertEqual(result, expected)", "title": "" }, { "docid": "e976c5de4570f907f28cfc8b7050ede7", "score": "0.65815765", "text": "def test_aall_basic_gt_b2(self):\n\t\t# All data values are equal to the test value.\n\t\ttestval = 100.0\n\t\tarrayval = testval\n\t\ttestdata = array.array('f', [arrayval] * self.arraylength)\n\n\t\t# Verify test compatibility.\n\t\texpected = all([(x > testval) for x in testdata])\n\t\tself.assertFalse(expected)\n\n\t\t# The actual test.\n\t\tresult = arrayfunc.aall('>', testdata, testval , nosimd=True)\n\t\tself.assertFalse(result)\n\t\tself.assertEqual(result, expected)", "title": "" }, { "docid": "eeb2a8a936d7f309bb13b05768e6fdb4", "score": "0.65782756", "text": "def test_aall_basic_gt_b2(self):\n\t\t# All data values are equal to the test value.\n\t\ttestval = 100\n\t\tarrayval = testval\n\t\ttestdata = array.array('q', [arrayval] * self.arraylength)\n\n\t\t# Verify test compatibility.\n\t\texpected = all([(x > testval) for x in testdata])\n\t\tself.assertFalse(expected)\n\n\t\t# The actual test.\n\t\tresult = arrayfunc.aall('>', testdata, testval , nosimd=True)\n\t\tself.assertFalse(result)\n\t\tself.assertEqual(result, expected)", "title": "" } ]
66319ebc81fc61a9b8194f91f5fbe55b
Port format from javascript to python version of Overcooked
[ { "docid": "9327d0b064b33d66aafa8ae053ee0aa1", "score": "0.0", "text": "def json_joint_action_to_python_action(json_joint_action):\n if type(json_joint_action) is str:\n json_joint_action = eval(json_joint_action)\n return tuple(json_action_to_python_action(a) for a in json_joint_action)", "title": "" } ]
[ { "docid": "d1fce47ddabd33bf2fffe6e3e7c29fe9", "score": "0.5453539", "text": "def washJSONinput(jsontext):\n #jsontext = re.sub('\\n', '', jsontext)\n jsontext = re.sub(r\"(?<!\\\\)(\\n\\r|\\n|\\r)\", \" \", jsontext)\n jsontext = re.sub(',\\s*]', ']', jsontext)\n jsontext = re.sub(',\\s*}', '}', jsontext)\n jsontext = re.sub(',\\s*$', '', jsontext)\n return jsontext", "title": "" }, { "docid": "f420a3d21ea33a7da80a5991b8a1ee86", "score": "0.52498496", "text": "def convert(tool='copilot'):", "title": "" }, { "docid": "5c2e8bece258d7543fdd48e696abbe48", "score": "0.52439576", "text": "def to_javascript(self) -> str:\n chunks = []\n chunks.append(\"<script>\")\n chunks.append(\"var aliases = \")\n chunks.append(json.dumps(self, indent=4, sort_keys=True))\n\n chunks.append('''\n aliases.find = function(nodepath) {\n const keys = Object.keys(this);\n var matchingAliases = [];\n keys.forEach((pattern, index) => {\n if (nodepath.endsWith(pattern)) {\n matchingAliases.push(this[pattern]);\n }\n });\n return matchingAliases;\n };\n aliases.findLongest = function(nodepath) {\n var longest_length = 0;\n var best = null;\n matchingAliases = this.find(nodepath);\n for (alias of matchingAliases) {\n if (alias['pattern'].length > longest_length) {\n best = alias;\n longest_length = alias['pattern'].length;\n };\n }\n return best;\n };\n\n ''')\n # Try it out\n chunks.append(\"console.log('aliases.find(): ', aliases.find('pci(0x1002:0x5a19)/pci(0x1b21:0x1042)/usb(1d6b:0002)'));\") # noqa\n chunks.append(\"console.log('aliases.findLongest(', aliases.findLongest('pci(0x1002:0x5a19)/pci(0x1b21:0x1042)/usb(1d6b:0002)'));\") # noqa\n chunks.append(\"</script>\")\n return \"\\n\".join(chunks)", "title": "" }, { "docid": "24f94ec420a5012570c018c97b497d2d", "score": "0.51570064", "text": "def syllables():\n return json.loads('''\n [{\"aaa\": 2, \"aaae\": 1, \"aaaw\": 1, \"aaj\": 1, \"aar\": 361, \"aaro\": 361, \"aau\": 1, \"aba\": 149, \"abac\": 5, \"abad\": 1, \"abaf\": 2, \"abag\": 1, \"aban\": 89, \"abar\": 6, \"abas\": 26, \"abat\": 19, \"abb\": 100, \"abba\": 4, \"abbe\": 78, \"abbo\": 5, \"abbr\": 7, \"abd\": 38, \"abda\": 1, \"abde\": 1, \"abdi\": 8, \"abdo\": 21, \"abdu\": 1, \"abe\": 98, \"abea\": 1, \"abed\": 16, \"abel\": 12, \"aber\": 17, \"abet\": 4, \"abey\": 6, \"abh\": 71, \"abho\": 71, \"abi\": 562, \"abia\": 37, \"abid\": 167, \"abie\": 13, \"abig\": 19, \"abih\": 19, \"abij\": 25, \"abil\": 131, \"abim\": 68, \"abin\": 17, \"abir\": 11, \"abis\": 39, \"abit\": 3, \"abiu\": 3, \"abj\": 16, \"abje\": 15, \"abju\": 1, \"abl\": 634, \"abla\": 11, \"able\": 4, \"ablu\": 2, \"abn\": 76, \"abne\": 65, \"abno\": 11, \"abo\": 5759, \"aboa\": 54, \"abod\": 86, \"abol\": 42, \"abom\": 200, \"aboo\": 3, \"abor\": 43, \"abou\": 4477, \"abov\": 854, \"abr\": 661, \"abra\": 330, \"abre\": 13, \"abri\": 8, \"abro\": 177, \"abru\": 132, \"abs\": 861, \"absa\": 108, \"absc\": 4, \"abse\": 248, \"absi\": 3, \"abso\": 289, \"abst\": 134, \"absu\": 75, \"abu\": 227, \"abun\": 166, \"abus\": 59, \"abut\": 1, \"aby\": 36, \"abyd\": 1, \"abys\": 35, \"aca\": 107, \"acac\": 4, \"acad\": 96, \"acal\": 5, \"acan\": 1, \"acap\": 1, \"acc\": 3461, \"acca\": 8, \"acce\": 747, \"acch\": 1, \"acci\": 167, \"accl\": 20, \"acco\": 2096, \"accr\": 33, \"accu\": 389, \"ace\": 23, \"acel\": 1, \"acer\": 1, \"acet\": 14, \"ach\": 373, \"acha\": 23, \"achb\": 7, \"ache\": 19, \"achi\": 293, \"achm\": 1, \"acho\": 5, \"achs\": 8, \"achz\": 4, \"aci\": 24, \"acid\": 10, \"ack\": 179, \"acke\": 1, \"ackn\": 178, \"acm\": 1, \"aco\": 27, \"acol\": 1, \"acom\": 2, \"acon\": 1, \"acor\": 16, \"acou\": 7, \"acq\": 543, \"acqu\": 543, \"acr\": 631, \"acre\": 62, \"acri\": 4, \"acro\": 535, \"acry\": 11, \"act\": 1695, \"acte\": 50, \"acti\": 998, \"acto\": 77, \"actr\": 19, \"actu\": 415, \"acu\": 33, \"acum\": 2, \"acus\": 1, \"acut\": 30, \"ada\": 371, \"adad\": 1, \"adag\": 8, \"adai\": 14, \"adal\": 1, \"adam\": 80, \"adap\": 53, \"adb\": 2, \"adbe\": 2, \"adc\": 1, \"adco\": 1, \"add\": 1150, \"adda\": 5, \"adde\": 462, \"addi\": 404, \"addl\": 1, \"addo\": 2, \"addr\": 258, \"addu\": 1, \"ade\": 122, \"adel\": 13, \"aden\": 6, \"adep\": 4, \"adeq\": 96, \"ader\": 1, \"adh\": 58, \"adhe\": 58, \"adi\": 51, \"adie\": 27, \"adin\": 2, \"adio\": 12, \"adip\": 1, \"adir\": 4, \"adit\": 1, \"adj\": 225, \"adja\": 15, \"adje\": 9, \"adjo\": 46, \"adju\": 155, \"adl\": 12, \"adla\": 8, \"adle\": 3, \"adli\": 1, \"adm\": 1011, \"adma\": 10, \"adme\": 3, \"admi\": 967, \"admo\": 31, \"adn\": 5, \"adna\": 3, \"ado\": 321, \"adob\": 5, \"adol\": 46, \"adon\": 46, \"adop\": 125, \"ador\": 98, \"adow\": 1, \"adr\": 38, \"adra\": 6, \"adre\": 2, \"adri\": 24, \"adro\": 6, \"ads\": 4, \"adso\": 4, \"adu\": 178, \"adua\": 9, \"adue\": 1, \"adui\": 8, \"adul\": 151, \"adum\": 2, \"adus\": 2, \"adv\": 1313, \"adva\": 617, \"adve\": 337, \"advi\": 321, \"advo\": 38, \"adz\": 1, \"aec\": 1, \"aeg\": 17, \"aega\": 1, \"aege\": 15, \"aegi\": 1, \"aen\": 6, \"aene\": 5, \"aeno\": 1, \"aeo\": 1, \"aer\": 77, \"aera\": 30, \"aere\": 4, \"aeri\": 10, \"aero\": 24, \"aes\": 42, \"aesc\": 6, \"aeso\": 1, \"aest\": 35, \"aet\": 2, \"aete\": 1, \"aetn\": 1, \"afa\": 74, \"afar\": 1, \"afe\": 3, \"afea\": 2, \"aff\": 1357, \"affa\": 271, \"affe\": 520, \"affg\": 1, \"affi\": 103, \"affl\": 247, \"affo\": 165, \"affr\": 50, \"afg\": 5, \"afgh\": 5, \"afi\": 6, \"afic\": 1, \"afie\": 1, \"afin\": 1, \"afir\": 3, \"afl\": 31, \"afla\": 4, \"aflc\": 5, \"aflo\": 22, \"afo\": 51, \"afoa\": 1, \"afoo\": 6, \"afor\": 43, \"afou\": 1, \"afr\": 679, \"afra\": 556, \"afre\": 10, \"afri\": 108, \"afro\": 5, \"afs\": 4, \"afst\": 4, \"aft\": 4333, \"afte\": 4330, \"afth\": 2, \"aftu\": 1, \"afv\": 1, \"afva\": 1, \"aga\": 5749, \"agab\": 2, \"agag\": 5, \"agai\": 5718, \"agam\": 3, \"agap\": 2, \"agas\": 1, \"agat\": 7, \"agay\": 1, \"age\": 450, \"agea\": 2, \"aged\": 1, \"agel\": 2, \"agen\": 264, \"ageo\": 5, \"agg\": 108, \"aggi\": 4, \"aggl\": 18, \"aggr\": 86, \"agh\": 7, \"agha\": 7, \"agi\": 115, \"agil\": 10, \"agin\": 4, \"agit\": 98, \"agl\": 3, \"agle\": 1, \"aglo\": 2, \"agn\": 15, \"agne\": 7, \"agno\": 7, \"agnu\": 1, \"ago\": 83, \"agoe\": 1, \"agon\": 82, \"agr\": 672, \"agra\": 10, \"agre\": 575, \"agri\": 83, \"agro\": 3, \"agu\": 7, \"aha\": 729, \"ahab\": 1, \"ahar\": 2, \"ahas\": 35, \"ahav\": 3, \"ahaz\": 37, \"ahb\": 1, \"ahba\": 1, \"ahe\": 171, \"ahea\": 168, \"ahi\": 157, \"ahia\": 7, \"ahie\": 6, \"ahih\": 2, \"ahij\": 20, \"ahik\": 20, \"ahil\": 5, \"ahim\": 36, \"ahin\": 8, \"ahir\": 7, \"ahis\": 5, \"ahit\": 35, \"ahl\": 3, \"ahla\": 3, \"ahm\": 7, \"ahma\": 3, \"ahme\": 3, \"ahmi\": 1, \"aho\": 41, \"ahoa\": 1, \"ahoh\": 5, \"ahol\": 26, \"ahr\": 1, \"ahre\": 1, \"ahu\": 3, \"ahum\": 1, \"ahuz\": 2, \"aia\": 7, \"aial\": 1, \"aiat\": 1, \"aic\": 1, \"aich\": 1, \"aid\": 70, \"aide\": 22, \"aidi\": 7, \"aidt\": 1, \"aie\": 1, \"aij\": 8, \"aija\": 7, \"aik\": 2, \"aike\": 1, \"aiki\": 1, \"ail\": 34, \"aile\": 14, \"aili\": 2, \"ailm\": 16, \"aim\": 82, \"aima\": 1, \"aime\": 37, \"aimi\": 6, \"aiml\": 11, \"ain\": 55, \"ains\": 2, \"ainu\": 1, \"air\": 244, \"airb\": 7, \"airc\": 75, \"aird\": 2, \"aire\": 5, \"airf\": 14, \"airi\": 9, \"airl\": 19, \"airm\": 6, \"airp\": 45, \"airs\": 4, \"airt\": 3, \"airw\": 6, \"ais\": 12, \"aisl\": 12, \"aja\": 10, \"ajal\": 3, \"aka\": 1, \"ake\": 1, \"aker\": 1, \"aki\": 18, \"akim\": 1, \"akit\": 1, \"akk\": 8, \"akku\": 8, \"akr\": 3, \"akra\": 2, \"akro\": 1, \"ala\": 421, \"alaa\": 1, \"alab\": 43, \"alac\": 21, \"alad\": 4, \"alai\": 1, \"alam\": 9, \"alan\": 2, \"alar\": 200, \"alas\": 28, \"alb\": 105, \"alba\": 31, \"albe\": 41, \"albi\": 7, \"albo\": 1, \"albr\": 6, \"albu\": 18, \"alc\": 44, \"alca\": 1, \"alch\": 4, \"alci\": 5, \"alco\": 34, \"ald\": 14, \"alde\": 10, \"aldr\": 3, \"ale\": 243, \"alea\": 1, \"alec\": 7, \"aleh\": 12, \"alei\": 1, \"alem\": 5, \"alep\": 2, \"aler\": 67, \"ales\": 1, \"alew\": 1, \"alex\": 90, \"alf\": 64, \"alfo\": 1, \"alfr\": 62, \"alg\": 35, \"alga\": 8, \"alge\": 16, \"algi\": 4, \"algo\": 4, \"algu\": 3, \"alh\": 2, \"alha\": 2, \"ali\": 922, \"alia\": 6, \"alib\": 9, \"alic\": 421, \"alie\": 89, \"alig\": 30, \"alij\": 1, \"alik\": 100, \"alim\": 7, \"aliq\": 1, \"alis\": 1, \"aliu\": 8, \"aliv\": 244, \"alix\": 1, \"aliz\": 1, \"alk\": 12, \"alka\": 10, \"alky\": 2, \"all\": 1139, \"alla\": 24, \"allc\": 3, \"alle\": 161, \"allf\": 1, \"allg\": 1, \"alli\": 156, \"allk\": 1, \"alll\": 1, \"allm\": 1, \"alln\": 7, \"allo\": 654, \"allp\": 4, \"allr\": 1, \"alls\": 9, \"allt\": 4, \"allu\": 84, \"allv\": 1, \"allw\": 8, \"alm\": 1377, \"alma\": 11, \"almi\": 106, \"almo\": 1234, \"alms\": 1, \"almu\": 3, \"alo\": 1761, \"aloe\": 7, \"alof\": 87, \"alok\": 1, \"alon\": 1548, \"aloo\": 17, \"alor\": 2, \"alot\": 1, \"alou\": 94, \"alow\": 1, \"alp\": 52, \"alpa\": 1, \"alpe\": 8, \"alph\": 34, \"alpi\": 2, \"alr\": 587, \"alra\": 1, \"alre\": 586, \"als\": 3263, \"alsa\": 4, \"alsi\": 1, \"also\": 3, \"alt\": 1311, \"alta\": 471, \"alte\": 283, \"alth\": 367, \"alti\": 12, \"alto\": 172, \"altr\": 2, \"alu\": 35, \"alum\": 28, \"alun\": 1, \"alus\": 5, \"alv\": 30, \"alva\": 5, \"alve\": 15, \"alvi\": 8, \"alw\": 1502, \"alwa\": 1500, \"alwi\": 2, \"ama\": 371, \"amad\": 7, \"amai\": 6, \"amal\": 54, \"aman\": 3, \"amar\": 21, \"amas\": 26, \"amat\": 41, \"amaz\": 206, \"amb\": 324, \"amba\": 58, \"ambe\": 28, \"ambi\": 184, \"ambl\": 5, \"ambr\": 12, \"ambu\": 37, \"ame\": 1289, \"amea\": 2, \"amel\": 6, \"amen\": 91, \"amer\": 1078, \"amet\": 7, \"ami\": 281, \"amia\": 89, \"amic\": 10, \"amid\": 26, \"amie\": 1, \"amig\": 2, \"amin\": 7, \"amis\": 27, \"amit\": 11, \"amm\": 194, \"amma\": 1, \"ammi\": 37, \"ammo\": 132, \"ammu\": 19, \"amn\": 28, \"amno\": 28, \"amo\": 2313, \"amon\": 1874, \"amor\": 125, \"amou\": 267, \"amp\": 142, \"amph\": 10, \"ampl\": 124, \"amps\": 1, \"ampu\": 7, \"amr\": 21, \"amra\": 20, \"ams\": 6, \"amst\": 6, \"amu\": 175, \"amuc\": 1, \"amul\": 2, \"amus\": 172, \"amz\": 2, \"ana\": 540, \"anab\": 3, \"anac\": 30, \"anae\": 2, \"anag\": 1, \"anah\": 2, \"anai\": 2, \"anak\": 9, \"anal\": 258, \"anam\": 3, \"anan\": 15, \"anap\": 2, \"anar\": 123, \"anas\": 8, \"anat\": 57, \"anc\": 350, \"ance\": 50, \"anch\": 97, \"anci\": 202, \"anck\": 1, \"and\": 184, \"ande\": 35, \"andi\": 2, \"ando\": 20, \"andr\": 90, \"andy\": 4, \"ane\": 53, \"anec\": 20, \"anem\": 7, \"anes\": 4, \"anet\": 2, \"ang\": 1514, \"ange\": 990, \"angi\": 17, \"angl\": 161, \"ango\": 4, \"angr\": 256, \"angs\": 2, \"angu\": 84, \"anh\": 5, \"anha\": 1, \"anhe\": 1, \"anhw\": 1, \"anhy\": 2, \"ani\": 339, \"ania\": 1, \"anil\": 1, \"anim\": 315, \"anio\": 13, \"anis\": 6, \"anit\": 2, \"ank\": 48, \"anka\": 1, \"anke\": 2, \"ankl\": 45, \"ann\": 1153, \"anna\": 24, \"anne\": 13, \"anni\": 64, \"anno\": 297, \"annu\": 145, \"ano\": 2278, \"anod\": 78, \"anoi\": 180, \"anom\": 11, \"anon\": 24, \"anor\": 2, \"anot\": 1946, \"anou\": 1, \"anoy\": 1, \"ans\": 1696, \"anse\": 2, \"ansl\": 1, \"anso\": 1, \"ansu\": 1, \"answ\": 1691, \"ant\": 726, \"anta\": 37, \"ante\": 50, \"anth\": 44, \"anti\": 461, \"antl\": 5, \"anto\": 103, \"antw\": 1, \"anu\": 2, \"anv\": 13, \"anvi\": 13, \"anx\": 297, \"anxi\": 297, \"any\": 1289, \"anyb\": 159, \"anyh\": 53, \"anyl\": 1, \"anym\": 6, \"anyo\": 221, \"anyp\": 1, \"anyr\": 6, \"anyt\": 676, \"anyw\": 165, \"anz\": 1, \"anzi\": 1, \"aon\": 1, \"aoni\": 1, \"aor\": 4, \"aort\": 4, \"aou\": 1, \"aoue\": 1, \"apa\": 279, \"apac\": 13, \"apal\": 2, \"apar\": 255, \"apat\": 9, \"ape\": 32, \"apea\": 1, \"apel\": 1, \"apen\": 1, \"aper\": 15, \"aph\": 18, \"apha\": 4, \"aphe\": 9, \"aphi\": 2, \"aphr\": 2, \"aphs\": 1, \"api\": 16, \"apic\": 1, \"apie\": 13, \"apis\": 2, \"apl\": 4, \"apla\": 1, \"aplo\": 3, \"apo\": 301, \"apoc\": 14, \"apog\": 2, \"apol\": 138, \"apop\": 6, \"apos\": 119, \"apot\": 22, \"app\": 4080, \"appa\": 419, \"appe\": 1570, \"apph\": 1, \"appi\": 2, \"appl\": 664, \"appo\": 349, \"appr\": 1074, \"appu\": 1, \"apr\": 121, \"apri\": 90, \"apro\": 31, \"aps\": 1, \"apse\": 1, \"apt\": 14, \"apte\": 1, \"apti\": 7, \"aptl\": 5, \"aptn\": 1, \"aqu\": 38, \"aqua\": 6, \"aque\": 15, \"aqui\": 17, \"ara\": 95, \"arab\": 49, \"arai\": 1, \"aram\": 1, \"aran\": 1, \"arap\": 2, \"arar\": 3, \"arat\": 1, \"arau\": 9, \"arb\": 90, \"arba\": 3, \"arbe\": 1, \"arbi\": 55, \"arbo\": 21, \"arbu\": 7, \"arc\": 609, \"arca\": 8, \"arch\": 511, \"arci\": 1, \"arcl\": 1, \"arct\": 24, \"arcu\": 1, \"ard\": 55, \"arde\": 27, \"ardi\": 1, \"ardm\": 1, \"ardo\": 21, \"ardu\": 5, \"are\": 655, \"area\": 243, \"arel\": 3, \"aren\": 53, \"areo\": 3, \"areq\": 1, \"aret\": 2, \"arg\": 319, \"arga\": 3, \"arge\": 9, \"argi\": 14, \"argo\": 18, \"argu\": 272, \"argy\": 1, \"arh\": 2, \"arha\": 2, \"ari\": 472, \"aria\": 6, \"aric\": 5, \"arid\": 4, \"arie\": 12, \"arig\": 17, \"arim\": 7, \"ario\": 9, \"aris\": 369, \"arit\": 22, \"ariu\": 1, \"ariz\": 9, \"ark\": 33, \"arka\": 30, \"arki\": 3, \"arl\": 46, \"arle\": 42, \"arli\": 4, \"arm\": 1117, \"arma\": 27, \"armb\": 1, \"armc\": 8, \"arme\": 186, \"armf\": 2, \"armh\": 5, \"armi\": 124, \"arml\": 3, \"armo\": 89, \"armp\": 4, \"armr\": 1, \"arms\": 7, \"army\": 5, \"arn\": 62, \"arna\": 1, \"arnh\": 4, \"arni\": 1, \"arno\": 55, \"aro\": 1110, \"aroa\": 1, \"arod\": 2, \"aroe\": 17, \"arom\": 19, \"aroo\": 1, \"aros\": 230, \"arou\": 838, \"aroy\": 1, \"arp\": 18, \"arpa\": 4, \"arpe\": 2, \"arph\": 12, \"arr\": 988, \"arra\": 351, \"arre\": 86, \"arri\": 373, \"arro\": 176, \"arru\": 1, \"ars\": 24, \"arsa\": 7, \"arse\": 11, \"arsh\": 1, \"arsi\": 2, \"arso\": 2, \"art\": 987, \"arta\": 15, \"arte\": 92, \"artf\": 15, \"arth\": 227, \"arti\": 521, \"artk\": 1, \"artl\": 14, \"arts\": 1, \"artu\": 3, \"aru\": 7, \"arub\": 1, \"arum\": 1, \"arun\": 5, \"arv\": 5, \"arva\": 4, \"arve\": 1, \"ary\": 5, \"aryl\": 4, \"arz\": 1, \"asa\": 62, \"asah\": 20, \"asai\": 6, \"asap\": 34, \"asar\": 2, \"asb\": 2, \"asbe\": 2, \"asc\": 223, \"asca\": 2, \"asce\": 193, \"asch\": 1, \"asci\": 2, \"ascr\": 24, \"asd\": 1, \"asdi\": 1, \"ase\": 6, \"asen\": 3, \"asep\": 1, \"ash\": 501, \"asha\": 234, \"ashb\": 6, \"ashc\": 3, \"ashd\": 26, \"ashe\": 138, \"ashi\": 2, \"ashk\": 10, \"ashl\": 1, \"ashm\": 2, \"ashn\": 2, \"asho\": 47, \"ashp\": 1, \"ashr\": 1, \"asht\": 22, \"ashu\": 4, \"ashv\": 1, \"asi\": 390, \"asia\": 25, \"asid\": 269, \"asie\": 1, \"asil\": 1, \"asim\": 1, \"asin\": 2, \"asit\": 1, \"ask\": 1195, \"aska\": 7, \"aske\": 1014, \"aski\": 141, \"asl\": 114, \"asla\": 8, \"asle\": 104, \"aslo\": 2, \"asm\": 3, \"asma\": 1, \"asmo\": 1, \"asn\": 2, \"asna\": 2, \"aso\": 1, \"asoc\": 1, \"asp\": 275, \"aspa\": 5, \"aspe\": 183, \"asph\": 10, \"aspi\": 70, \"aspr\": 2, \"asr\": 3, \"asri\": 3, \"ass\": 2508, \"assa\": 143, \"asse\": 597, \"assh\": 9, \"assi\": 544, \"assn\": 1, \"asso\": 320, \"assu\": 726, \"assw\": 3, \"assy\": 155, \"ast\": 441, \"asta\": 4, \"aste\": 39, \"asth\": 3, \"asti\": 5, \"astl\": 4, \"asto\": 275, \"astr\": 108, \"astu\": 2, \"astw\": 1, \"asu\": 35, \"asun\": 33, \"asup\": 2, \"asw\": 1, \"aswe\": 1, \"asy\": 50, \"asyl\": 40, \"asym\": 7, \"asyn\": 3, \"ata\": 13, \"atab\": 1, \"atal\": 1, \"atar\": 8, \"atav\": 1, \"atb\": 1, \"atba\": 1, \"ate\": 7, \"atel\": 1, \"ater\": 1, \"ath\": 201, \"atha\": 34, \"athe\": 90, \"athi\": 8, \"athl\": 61, \"athw\": 8, \"atk\": 3, \"atki\": 3, \"atl\": 140, \"atla\": 139, \"atle\": 1, \"atm\": 142, \"atmo\": 142, \"ato\": 246, \"atom\": 91, \"aton\": 100, \"atr\": 22, \"atre\": 2, \"atro\": 20, \"att\": 2582, \"atta\": 618, \"atte\": 1262, \"atti\": 240, \"attl\": 1, \"atto\": 175, \"attr\": 271, \"attu\": 6, \"atty\": 2, \"atu\": 1, \"atun\": 1, \"atw\": 2, \"atwe\": 1, \"atwi\": 1, \"aty\": 1, \"atyp\": 1, \"aua\": 4, \"auan\": 1, \"auar\": 3, \"aub\": 9, \"aube\": 1, \"aubr\": 1, \"aubu\": 6, \"auc\": 11, \"auct\": 11, \"aud\": 273, \"auda\": 20, \"audi\": 239, \"audr\": 9, \"audu\": 5, \"aue\": 2, \"auen\": 1, \"auer\": 1, \"aug\": 194, \"auge\": 5, \"augh\": 48, \"augm\": 31, \"augu\": 110, \"auj\": 1, \"aujo\": 1, \"aul\": 1, \"aun\": 134, \"aunt\": 20, \"auo\": 10, \"auoi\": 2, \"auou\": 3, \"auoy\": 5, \"aur\": 29, \"aura\": 4, \"aure\": 9, \"auro\": 14, \"auru\": 1, \"aus\": 107, \"ausc\": 1, \"auso\": 1, \"ausp\": 13, \"aust\": 92, \"aut\": 879, \"auth\": 494, \"auti\": 15, \"auto\": 269, \"autr\": 2, \"autu\": 76, \"aux\": 13, \"auxi\": 13, \"ava\": 363, \"avai\": 311, \"aval\": 3, \"avan\": 8, \"avar\": 16, \"avas\": 22, \"avat\": 3, \"ave\": 363, \"aven\": 134, \"aver\": 224, \"avi\": 24, \"avia\": 10, \"avid\": 2, \"avig\": 2, \"avil\": 1, \"avim\": 1, \"avit\": 4, \"avo\": 243, \"avoc\": 19, \"avoi\": 209, \"avou\": 2, \"avow\": 9, \"awa\": 3025, \"awai\": 54, \"awak\": 195, \"awar\": 289, \"awas\": 1, \"away\": 1, \"awe\": 18, \"awei\": 1, \"awes\": 5, \"awf\": 107, \"awfu\": 107, \"awh\": 27, \"awhe\": 1, \"awhi\": 26, \"awi\": 1, \"awin\": 1, \"awk\": 58, \"awkw\": 58, \"awl\": 1, \"awn\": 3, \"awni\": 3, \"awo\": 34, \"awok\": 29, \"awr\": 5, \"axe\": 24, \"axem\": 1, \"axi\": 58, \"axia\": 3, \"axio\": 7, \"axl\": 13, \"axle\": 4, \"ayd\": 2, \"aye\": 3, \"ayea\": 1, \"ayey\": 1, \"ayg\": 1, \"aygr\": 1, \"ayl\": 2, \"ayle\": 2, \"aym\": 4, \"ayr\": 36, \"ayre\": 2, \"ayri\": 1, \"ays\": 1, \"ayss\": 1, \"ayu\": 1, \"aza\": 69, \"azal\": 7, \"azan\": 1, \"azar\": 55, \"azaz\": 4, \"azb\": 1, \"azbu\": 1, \"aze\": 16, \"azek\": 7, \"azer\": 1, \"azg\": 4, \"azga\": 4, \"azi\": 3, \"azie\": 1, \"azim\": 1, \"aziz\": 1, \"azm\": 11, \"azma\": 8, \"azmo\": 3, \"azn\": 1, \"azno\": 1, \"azo\": 10, \"azor\": 6, \"azot\": 2, \"azr\": 9, \"azri\": 9, \"azu\": 16, \"azub\": 4, \"azur\": 9, \"azus\": 1, \"azz\": 5, \"azza\": 4, \"azzu\": 1, \"baa\": 175, \"baal\": 67, \"baan\": 12, \"baar\": 1, \"baas\": 29, \"bab\": 575, \"baba\": 1, \"babb\": 14, \"babc\": 1, \"babe\": 41, \"babi\": 26, \"babo\": 3, \"baby\": 324, \"bac\": 2564, \"bacc\": 6, \"bach\": 26, \"baci\": 2, \"back\": 396, \"baco\": 15, \"bact\": 23, \"bad\": 135, \"bade\": 4, \"badf\": 1, \"badg\": 28, \"badi\": 3, \"badl\": 49, \"badm\": 2, \"badn\": 11, \"badr\": 1, \"badu\": 1, \"bae\": 5, \"baf\": 49, \"baff\": 49, \"bag\": 46, \"baga\": 1, \"bagd\": 1, \"bagg\": 19, \"bagl\": 1, \"bagp\": 1, \"bah\": 9, \"baha\": 2, \"bahi\": 1, \"bahu\": 5, \"bai\": 44, \"bail\": 20, \"bain\": 1, \"bair\": 4, \"bait\": 7, \"baj\": 2, \"baja\": 1, \"baji\": 1, \"bak\": 167, \"bakb\": 6, \"bake\": 118, \"bakh\": 1, \"baki\": 14, \"bakl\": 1, \"bal\": 1028, \"bala\": 298, \"balc\": 34, \"bald\": 25, \"bale\": 27, \"bali\": 6, \"balk\": 17, \"ball\": 256, \"balm\": 12, \"bals\": 2, \"balt\": 36, \"balu\": 6, \"balz\": 2, \"bam\": 11, \"bama\": 1, \"bamb\": 6, \"bamo\": 3, \"ban\": 942, \"bana\": 7, \"banb\": 1, \"banc\": 6, \"band\": 146, \"bane\": 1, \"banf\": 1, \"bang\": 54, \"bani\": 54, \"banj\": 4, \"bank\": 149, \"bann\": 64, \"banq\": 85, \"bans\": 4, \"bant\": 16, \"bany\": 4, \"bap\": 173, \"bapt\": 172, \"bar\": 1750, \"bara\": 30, \"barb\": 245, \"barc\": 33, \"bard\": 24, \"bare\": 74, \"barf\": 1, \"barg\": 82, \"barh\": 1, \"bari\": 12, \"barj\": 2, \"bark\": 28, \"barl\": 54, \"barm\": 1, \"barn\": 114, \"baro\": 64, \"barq\": 1, \"barr\": 256, \"bars\": 6, \"bart\": 158, \"baru\": 26, \"barz\": 12, \"bas\": 1286, \"basa\": 1, \"basc\": 3, \"base\": 289, \"bash\": 91, \"basi\": 428, \"bask\": 202, \"basl\": 3, \"basm\": 1, \"baso\": 25, \"basr\": 5, \"bass\": 8, \"bast\": 43, \"bat\": 1141, \"bata\": 3, \"batc\": 10, \"bate\": 165, \"bath\": 152, \"bati\": 10, \"bato\": 13, \"batt\": 607, \"batw\": 1, \"bau\": 13, \"baub\": 2, \"baud\": 2, \"baue\": 2, \"bauh\": 1, \"baul\": 4, \"bav\": 5, \"bava\": 5, \"baw\": 23, \"bawd\": 4, \"bawh\": 1, \"bawl\": 12, \"bay\": 58, \"baya\": 3, \"baye\": 7, \"bayf\": 1, \"bayi\": 1, \"bayl\": 5, \"bayo\": 27, \"bayr\": 4, \"baz\": 11, \"baza\": 9, \"bazl\": 2, \"bbc\": 1, \"bde\": 2, \"bdel\": 2, \"bdf\": 1, \"bdi\": 22, \"bdik\": 22, \"bea\": 3233, \"beac\": 157, \"bead\": 22, \"beak\": 10, \"beal\": 14, \"beam\": 99, \"bean\": 19, \"bear\": 513, \"beas\": 473, \"beat\": 181, \"beau\": 776, \"beav\": 16, \"beb\": 7, \"beba\": 6, \"bebo\": 1, \"bec\": 4332, \"beca\": 3319, \"becc\": 2, \"bech\": 9, \"beck\": 60, \"beco\": 941, \"bed\": 242, \"beda\": 7, \"bedb\": 1, \"bedc\": 12, \"bedd\": 8, \"bede\": 3, \"bedf\": 32, \"bedg\": 1, \"bedh\": 1, \"bedl\": 3, \"bedo\": 1, \"bedp\": 1, \"bedr\": 74, \"beds\": 22, \"bedt\": 7, \"bedw\": 2, \"bee\": 6262, \"beeb\": 12, \"beec\": 12, \"beef\": 12, \"beeh\": 10, \"beel\": 14, \"been\": 31, \"beep\": 3, \"beer\": 60, \"bees\": 5, \"beet\": 39, \"beev\": 8, \"bef\": 4588, \"befa\": 49, \"befe\": 18, \"befi\": 8, \"befo\": 4501, \"befr\": 8, \"befu\": 4, \"beg\": 2558, \"bega\": 1302, \"bege\": 37, \"begg\": 155, \"begi\": 826, \"begl\": 2, \"bego\": 47, \"begr\": 4, \"begu\": 182, \"beh\": 2794, \"beha\": 360, \"behe\": 149, \"behi\": 741, \"beho\": 1540, \"behr\": 3, \"behy\": 1, \"bei\": 2436, \"beid\": 1, \"beig\": 2, \"bein\": 2428, \"beir\": 1, \"beis\": 1, \"bej\": 1, \"beju\": 1, \"bek\": 2, \"beka\": 1, \"bekk\": 1, \"bel\": 2933, \"bela\": 21, \"belc\": 15, \"beld\": 1, \"bele\": 32, \"belf\": 5, \"belg\": 40, \"beli\": 1521, \"bell\": 225, \"belm\": 5, \"belo\": 807, \"bels\": 14, \"belt\": 27, \"belu\": 3, \"belv\": 4, \"belz\": 2, \"bem\": 11, \"bema\": 2, \"bemo\": 9, \"ben\": 1308, \"bena\": 44, \"benc\": 114, \"bend\": 61, \"bene\": 479, \"beng\": 10, \"benh\": 28, \"beni\": 25, \"benj\": 199, \"benn\": 13, \"beno\": 2, \"bens\": 18, \"bent\": 8, \"benu\": 6, \"benw\": 70, \"beny\": 1, \"benz\": 5, \"beo\": 26, \"beow\": 15, \"bep\": 1, \"bepa\": 1, \"beq\": 32, \"bequ\": 32, \"ber\": 419, \"bera\": 8, \"berb\": 1, \"berc\": 1, \"bere\": 42, \"berg\": 24, \"beri\": 18, \"berk\": 24, \"berl\": 89, \"berm\": 18, \"bern\": 33, \"bero\": 5, \"berr\": 82, \"bert\": 40, \"bery\": 12, \"bes\": 1911, \"besa\": 2, \"bese\": 113, \"besh\": 1, \"besi\": 603, \"besm\": 8, \"beso\": 56, \"besp\": 28, \"bess\": 4, \"best\": 151, \"bet\": 3401, \"beta\": 9, \"bete\": 3, \"beth\": 290, \"beti\": 21, \"beto\": 19, \"betr\": 147, \"bets\": 8, \"bett\": 1347, \"betw\": 1544, \"beu\": 1, \"beul\": 1, \"bev\": 41, \"beve\": 33, \"bevo\": 1, \"bew\": 156, \"bewa\": 75, \"bewe\": 1, \"bewh\": 1, \"bewi\": 75, \"bewr\": 4, \"bex\": 1, \"bexa\": 1, \"bey\": 534, \"beye\": 2, \"beyo\": 532, \"bez\": 20, \"beza\": 12, \"beze\": 8, \"bho\": 1, \"bia\": 19, \"bian\": 2, \"bias\": 3, \"bib\": 126, \"bibb\": 1, \"bibl\": 124, \"bic\": 34, \"bica\": 2, \"bice\": 4, \"bich\": 9, \"bick\": 8, \"bico\": 1, \"bicy\": 9, \"bid\": 79, \"bidd\": 52, \"bide\": 1, \"bidk\": 1, \"bids\": 1, \"bie\": 17, \"bien\": 11, \"bier\": 1, \"biet\": 1, \"bif\": 5, \"biff\": 1, \"bifo\": 3, \"bifu\": 1, \"big\": 135, \"biga\": 4, \"bigb\": 2, \"bigc\": 1, \"bigd\": 1, \"bigg\": 99, \"bigl\": 2, \"bign\": 5, \"bigo\": 8, \"bigs\": 2, \"bigt\": 5, \"bigv\": 6, \"bij\": 1, \"bijo\": 1, \"bik\": 1, \"biki\": 1, \"bil\": 609, \"bila\": 2, \"bilb\": 1, \"bild\": 81, \"bile\": 2, \"bilg\": 6, \"bilh\": 16, \"bili\": 9, \"bilk\": 2, \"bill\": 277, \"bilo\": 1, \"bils\": 2, \"bim\": 6, \"bime\": 1, \"bimh\": 1, \"bimi\": 1, \"bimo\": 3, \"bin\": 194, \"bina\": 1, \"bind\": 49, \"bine\": 2, \"bing\": 2, \"binn\": 23, \"bino\": 38, \"binu\": 1, \"bio\": 74, \"bioa\": 2, \"bioc\": 3, \"biod\": 1, \"biog\": 23, \"biol\": 35, \"biom\": 3, \"biop\": 6, \"bios\": 1, \"bip\": 7, \"bipa\": 2, \"bipe\": 3, \"bipl\": 2, \"bir\": 684, \"bira\": 1, \"birc\": 11, \"bird\": 197, \"bire\": 3, \"birg\": 2, \"birk\": 3, \"birm\": 17, \"birn\": 4, \"birs\": 1, \"birt\": 261, \"birz\": 1, \"bis\": 96, \"bisc\": 26, \"bise\": 1, \"bish\": 52, \"bism\": 3, \"biso\": 7, \"bisq\": 6, \"biss\": 1, \"bit\": 370, \"bitc\": 7, \"bite\": 15, \"bith\": 4, \"biti\": 20, \"bitl\": 1, \"bitt\": 263, \"bitu\": 2, \"biv\": 13, \"bivo\": 13, \"biw\": 9, \"biz\": 14, \"biza\": 8, \"bize\": 4, \"bizj\": 1, \"bizt\": 1, \"bje\": 1, \"bjer\": 1, \"bla\": 1716, \"blab\": 5, \"blac\": 936, \"blad\": 96, \"blai\": 7, \"blak\": 13, \"blam\": 133, \"blan\": 214, \"blar\": 3, \"blas\": 209, \"blat\": 14, \"blau\": 3, \"blaz\": 81, \"bld\": 3, \"ble\": 1099, \"blea\": 71, \"bleb\": 1, \"blec\": 1, \"blee\": 64, \"blem\": 69, \"blen\": 56, \"bles\": 764, \"blev\": 3, \"bli\": 483, \"blig\": 12, \"blim\": 1, \"blin\": 357, \"blip\": 1, \"blis\": 85, \"blit\": 19, \"bliz\": 8, \"bll\": 4, \"bllc\": 4, \"blo\": 1878, \"bloa\": 13, \"bloc\": 205, \"bloi\": 1, \"blok\": 2, \"blom\": 2, \"blon\": 39, \"bloo\": 1102, \"blos\": 74, \"blot\": 26, \"blou\": 9, \"blow\": 180, \"blu\": 763, \"blub\": 36, \"bluc\": 1, \"blud\": 5, \"blue\": 59, \"bluf\": 20, \"blui\": 5, \"blum\": 5, \"blun\": 83, \"blur\": 17, \"blus\": 95, \"blut\": 2, \"blv\": 4, \"bly\": 1, \"blyt\": 1, \"bme\": 2, \"bmew\": 2, \"boa\": 1395, \"boad\": 3, \"boan\": 1, \"boar\": 517, \"boas\": 102, \"boat\": 268, \"bob\": 78, \"bobb\": 75, \"boc\": 6, \"boch\": 4, \"bock\": 1, \"bod\": 1286, \"bodd\": 3, \"bode\": 3, \"bodh\": 1, \"bodi\": 219, \"bodk\": 2, \"bodl\": 1, \"body\": 18, \"boe\": 5, \"boeh\": 1, \"boei\": 3, \"boeo\": 1, \"bog\": 32, \"boga\": 1, \"bogb\": 5, \"boge\": 11, \"bogg\": 6, \"bogi\": 1, \"bogu\": 3, \"boh\": 10, \"boha\": 3, \"bohe\": 5, \"bohl\": 1, \"boi\": 138, \"boie\": 1, \"boil\": 83, \"bois\": 16, \"boit\": 2, \"bok\": 1, \"bol\": 301, \"bola\": 1, \"bold\": 77, \"bole\": 2, \"bolg\": 2, \"boli\": 10, \"bolk\": 2, \"boll\": 1, \"bolo\": 3, \"bols\": 17, \"bolt\": 42, \"bom\": 157, \"bomb\": 105, \"bon\": 709, \"bona\": 7, \"bond\": 184, \"bone\": 209, \"bonf\": 8, \"bong\": 1, \"bonh\": 4, \"boni\": 6, \"bonj\": 1, \"bonn\": 41, \"bono\": 1, \"bont\": 2, \"bonu\": 2, \"bonz\": 3, \"boo\": 1162, \"boob\": 14, \"boog\": 2, \"booi\": 1, \"book\": 263, \"boom\": 16, \"boon\": 4, \"boor\": 5, \"boos\": 23, \"boot\": 122, \"booz\": 5, \"bor\": 1110, \"bora\": 3, \"bord\": 308, \"bore\": 37, \"borg\": 3, \"bori\": 19, \"borl\": 1, \"born\": 132, \"boro\": 9, \"borr\": 83, \"bos\": 242, \"bosc\": 4, \"bosi\": 7, \"bosk\": 1, \"bosl\": 2, \"boso\": 103, \"bosp\": 6, \"boss\": 13, \"bost\": 77, \"bosu\": 1, \"bot\": 2337, \"bota\": 8, \"botc\": 5, \"both\": 58, \"boto\": 1, \"bott\": 460, \"botu\": 2, \"bou\": 847, \"boua\": 1, \"bouc\": 3, \"boud\": 1, \"bouf\": 3, \"boug\": 213, \"boul\": 81, \"boun\": 475, \"bouq\": 9, \"bour\": 28, \"bout\": 13, \"bouv\": 3, \"bov\": 3, \"bovi\": 3, \"bow\": 481, \"bowa\": 1, \"bowd\": 6, \"bowe\": 242, \"bowi\": 26, \"bowl\": 46, \"bowm\": 4, \"bows\": 22, \"bowt\": 1, \"box\": 53, \"boxc\": 8, \"boxe\": 38, \"boxf\": 1, \"boxi\": 2, \"boxs\": 1, \"boxw\": 1, \"boy\": 370, \"boya\": 1, \"boyc\": 10, \"boye\": 3, \"boyf\": 1, \"boyh\": 10, \"boyi\": 14, \"boyl\": 5, \"boym\": 3, \"boyn\": 1, \"boys\": 1, \"boz\": 11, \"boze\": 1, \"bozk\": 1, \"bozr\": 9, \"bpl\": 1, \"bpla\": 1, \"bra\": 1604, \"brac\": 110, \"brad\": 23, \"brag\": 20, \"brah\": 67, \"brai\": 227, \"brak\": 92, \"bram\": 9, \"bran\": 570, \"braq\": 14, \"bras\": 220, \"brat\": 3, \"brau\": 16, \"brav\": 145, \"braw\": 30, \"bray\": 3, \"braz\": 36, \"bre\": 3011, \"brea\": 2115, \"brec\": 1, \"bree\": 179, \"breg\": 1, \"brel\": 1, \"brem\": 5, \"bren\": 17, \"bres\": 12, \"bret\": 591, \"breu\": 2, \"brev\": 17, \"brew\": 16, \"bri\": 3206, \"bria\": 31, \"brib\": 28, \"bric\": 66, \"brid\": 351, \"brie\": 201, \"brig\": 530, \"bril\": 103, \"brim\": 34, \"brin\": 1493, \"bris\": 94, \"brit\": 234, \"bro\": 5052, \"broa\": 455, \"brob\": 10, \"broc\": 14, \"brod\": 11, \"broe\": 1, \"brog\": 4, \"broi\": 29, \"brok\": 716, \"brom\": 6, \"bron\": 99, \"broo\": 273, \"brot\": 1069, \"brou\": 1500, \"brow\": 772, \"brox\": 2, \"broy\": 1, \"bru\": 611, \"bruc\": 14, \"brue\": 1, \"bruh\": 1, \"brui\": 86, \"brum\": 20, \"brun\": 37, \"brus\": 130, \"brut\": 274, \"brux\": 1, \"bry\": 22, \"brya\": 16, \"bryc\": 2, \"bryn\": 1, \"brys\": 1, \"btu\": 2, \"btus\": 1, \"bua\": 1, \"buaf\": 1, \"bub\": 80, \"bubb\": 77, \"bube\": 3, \"buc\": 176, \"bucc\": 1, \"buce\": 1, \"buch\": 10, \"buck\": 134, \"buco\": 1, \"bud\": 181, \"buda\": 8, \"budd\": 57, \"budg\": 88, \"budi\": 1, \"budl\": 1, \"budw\": 1, \"budz\": 1, \"bue\": 5, \"buel\": 2, \"buen\": 3, \"buf\": 92, \"buff\": 83, \"bug\": 47, \"buga\": 1, \"bugb\": 1, \"buge\": 1, \"bugg\": 14, \"bugl\": 22, \"bui\": 1182, \"buic\": 3, \"buil\": 1179, \"buk\": 7, \"bukk\": 7, \"bul\": 701, \"bulb\": 9, \"bulg\": 20, \"bulk\": 36, \"bull\": 335, \"bulr\": 6, \"bult\": 11, \"bulw\": 51, \"bum\": 57, \"bumb\": 14, \"bumm\": 1, \"bump\": 32, \"bun\": 212, \"buna\": 1, \"bunc\": 54, \"bund\": 57, \"bung\": 18, \"bunk\": 24, \"bunn\": 4, \"bunt\": 7, \"buny\": 4, \"buo\": 50, \"buoy\": 32, \"bur\": 2147, \"bura\": 1, \"burb\": 2, \"burc\": 4, \"burd\": 181, \"bure\": 64, \"burf\": 1, \"burg\": 60, \"buri\": 237, \"burk\": 17, \"burl\": 24, \"burm\": 25, \"burn\": 853, \"burr\": 14, \"burs\": 319, \"burt\": 24, \"bury\": 19, \"bus\": 1457, \"busb\": 2, \"busc\": 2, \"buse\": 7, \"bush\": 63, \"busi\": 873, \"busk\": 2, \"buss\": 4, \"bust\": 297, \"busy\": 6, \"but\": 363, \"buta\": 1, \"butc\": 61, \"butl\": 65, \"butt\": 209, \"buty\": 10, \"bux\": 5, \"buxo\": 3, \"buxt\": 2, \"buy\": 75, \"buye\": 23, \"buyi\": 40, \"buz\": 46, \"buzi\": 2, \"buzz\": 25, \"bye\": 1, \"byer\": 1, \"byf\": 1, \"byg\": 4, \"bygo\": 4, \"byi\": 1, \"byin\": 1, \"byl\": 4, \"byla\": 2, \"byli\": 1, \"bylo\": 1, \"byp\": 19, \"bypa\": 9, \"bypr\": 10, \"byr\": 54, \"byrd\": 3, \"byrl\": 2, \"byrn\": 9, \"byro\": 30, \"byrt\": 1, \"bys\": 5, \"byst\": 5, \"byt\": 2, \"byth\": 1, \"byw\": 11, \"bywa\": 3, \"bywo\": 8, \"byz\": 9, \"byza\": 9, \"cab\": 249, \"caba\": 16, \"cabb\": 10, \"cabd\": 1, \"cabi\": 164, \"cabl\": 25, \"cabm\": 13, \"cabo\": 4, \"cabr\": 4, \"cabu\": 2, \"cac\": 17, \"caca\": 1, \"cach\": 7, \"cack\": 6, \"caco\": 2, \"cact\": 1, \"cad\": 75, \"cada\": 5, \"cadd\": 2, \"cade\": 16, \"cadi\": 16, \"cadm\": 3, \"cadr\": 3, \"cadw\": 3, \"cae\": 338, \"caec\": 1, \"caes\": 296, \"caet\": 2, \"caf\": 59, \"cafe\": 24, \"caff\": 1, \"cafr\": 1, \"cag\": 46, \"caga\": 1, \"cage\": 9, \"cah\": 2, \"cahi\": 1, \"caho\": 1, \"cai\": 66, \"caia\": 9, \"cain\": 9, \"cair\": 9, \"caiu\": 14, \"caj\": 1, \"cajo\": 1, \"cak\": 105, \"cake\": 46, \"caki\": 1, \"cal\": 3723, \"cala\": 57, \"calc\": 151, \"cald\": 25, \"cale\": 89, \"calf\": 3, \"calh\": 20, \"cali\": 157, \"calk\": 3, \"call\": 1917, \"calm\": 137, \"caln\": 4, \"calo\": 17, \"calp\": 10, \"calt\": 2, \"calu\": 10, \"calv\": 46, \"caly\": 1, \"cam\": 4823, \"cama\": 6, \"camb\": 31, \"camd\": 32, \"came\": 180, \"cami\": 22, \"camo\": 7, \"camp\": 340, \"camu\": 4, \"can\": 2389, \"cana\": 270, \"canc\": 52, \"cand\": 335, \"cane\": 7, \"cani\": 5, \"cank\": 13, \"cann\": 1200, \"cano\": 67, \"cans\": 86, \"cant\": 39, \"canu\": 1, \"canv\": 62, \"cany\": 19, \"cao\": 1, \"caou\": 1, \"cap\": 2263, \"capa\": 273, \"cape\": 49, \"caph\": 6, \"capi\": 239, \"capo\": 9, \"capp\": 19, \"capr\": 29, \"caps\": 33, \"capt\": 1495, \"capu\": 1, \"car\": 3820, \"cara\": 26, \"carb\": 83, \"carc\": 82, \"card\": 109, \"care\": 661, \"carg\": 16, \"cari\": 35, \"cark\": 2, \"carl\": 126, \"carm\": 67, \"carn\": 62, \"caro\": 109, \"carp\": 183, \"carr\": 1288, \"cars\": 21, \"cart\": 79, \"caru\": 4, \"carv\": 93, \"carw\": 3, \"cary\": 2, \"cas\": 2365, \"casa\": 5, \"casb\": 4, \"casc\": 11, \"case\": 263, \"cash\": 5, \"casi\": 8, \"cask\": 61, \"casl\": 2, \"casp\": 2, \"cass\": 203, \"cast\": 274, \"casu\": 70, \"cat\": 1138, \"cata\": 103, \"catc\": 277, \"cate\": 115, \"catf\": 2, \"cath\": 277, \"cati\": 1, \"catk\": 7, \"catl\": 4, \"cato\": 1, \"cats\": 11, \"catt\": 282, \"cau\": 1537, \"cauc\": 11, \"caud\": 1, \"caue\": 1, \"cauf\": 1, \"caug\": 342, \"caui\": 1, \"caul\": 18, \"caus\": 1009, \"caut\": 141, \"cav\": 183, \"cava\": 52, \"cave\": 43, \"cavi\": 29, \"cavo\": 4, \"caw\": 24, \"cawd\": 22, \"cawi\": 2, \"cay\": 2, \"caye\": 2, \"ccn\": 1, \"cdc\": 2, \"cea\": 318, \"cear\": 1, \"ceas\": 314, \"cec\": 183, \"ceci\": 183, \"ced\": 106, \"ceda\": 101, \"cedr\": 4, \"cedv\": 1, \"cee\": 2, \"ceec\": 2, \"cei\": 58, \"ceil\": 57, \"cel\": 441, \"cele\": 161, \"celi\": 6, \"cell\": 163, \"cels\": 1, \"celt\": 14, \"cem\": 39, \"cema\": 1, \"ceme\": 38, \"cen\": 1318, \"cenc\": 3, \"ceni\": 1, \"cenn\": 2, \"ceno\": 1, \"cens\": 84, \"cent\": 1066, \"cep\": 9, \"ceph\": 7, \"cept\": 1, \"cer\": 1682, \"cera\": 13, \"cerb\": 1, \"cere\": 132, \"ceri\": 3, \"cerm\": 1, \"cert\": 1515, \"ceru\": 5, \"cerv\": 5, \"ces\": 13, \"cesa\": 1, \"cesi\": 3, \"cess\": 4, \"cest\": 3, \"cet\": 28, \"ceta\": 6, \"cete\": 5, \"ceto\": 11, \"cetu\": 3, \"cey\": 7, \"ceyl\": 7, \"cez\": 4, \"ceza\": 4, \"cha\": 6261, \"chab\": 6, \"chac\": 8, \"chad\": 3, \"chaf\": 31, \"chag\": 6, \"chah\": 1, \"chai\": 553, \"chal\": 208, \"cham\": 409, \"chan\": 1677, \"chao\": 64, \"chap\": 550, \"char\": 2307, \"chas\": 252, \"chat\": 87, \"chau\": 32, \"chav\": 7, \"chaw\": 1, \"chay\": 1, \"che\": 1475, \"chea\": 115, \"cheb\": 8, \"chec\": 211, \"ched\": 6, \"chee\": 488, \"chef\": 1, \"cheh\": 1, \"chek\": 2, \"chel\": 7, \"chem\": 125, \"chen\": 14, \"chep\": 5, \"cher\": 249, \"ches\": 171, \"chev\": 9, \"chew\": 35, \"chey\": 5, \"chez\": 1, \"chi\": 5075, \"chia\": 7, \"chib\": 1, \"chic\": 230, \"chid\": 14, \"chie\": 632, \"chig\": 2, \"chil\": 3651, \"chim\": 110, \"chin\": 218, \"chio\": 2, \"chip\": 30, \"chir\": 9, \"chis\": 28, \"chit\": 6, \"chiu\": 2, \"chiv\": 19, \"chl\": 45, \"chlo\": 45, \"chm\": 1, \"cho\": 1183, \"choa\": 3, \"choc\": 38, \"chod\": 2, \"choi\": 251, \"chok\": 65, \"chol\": 36, \"chom\": 2, \"choo\": 227, \"chop\": 30, \"chor\": 138, \"chos\": 351, \"chou\": 3, \"chow\": 16, \"choy\": 4, \"choz\": 1, \"chr\": 1300, \"chre\": 1, \"chri\": 1167, \"chro\": 121, \"chry\": 11, \"chu\": 1176, \"chub\": 2, \"chuc\": 54, \"chuf\": 2, \"chug\": 2, \"chum\": 3, \"chun\": 9, \"chur\": 1048, \"chus\": 44, \"chut\": 5, \"chuz\": 1, \"cia\": 2, \"ciar\": 1, \"cib\": 2, \"cibi\": 1, \"cibu\": 1, \"cic\": 26, \"cica\": 3, \"cice\": 21, \"cici\": 1, \"cico\": 1, \"cid\": 11, \"cide\": 11, \"cie\": 6, \"ciec\": 1, \"ciel\": 5, \"cig\": 98, \"ciga\": 98, \"cil\": 13, \"cili\": 13, \"cim\": 4, \"cima\": 2, \"cimb\": 1, \"cimo\": 1, \"cin\": 74, \"cinc\": 15, \"cind\": 12, \"cine\": 10, \"cinn\": 32, \"cinq\": 3, \"cip\": 13, \"ciph\": 11, \"cipo\": 1, \"cipr\": 1, \"cir\": 893, \"circ\": 893, \"cis\": 13, \"cist\": 13, \"cit\": 2330, \"cita\": 13, \"cite\": 38, \"citi\": 784, \"cito\": 2, \"citr\": 7, \"city\": 31, \"ciu\": 5, \"ciud\": 3, \"ciui\": 2, \"civ\": 386, \"civi\": 386, \"cla\": 1714, \"clac\": 1, \"clad\": 1, \"clai\": 377, \"clam\": 80, \"clan\": 37, \"clap\": 62, \"clar\": 131, \"clas\": 561, \"clat\": 22, \"clau\": 58, \"clav\": 14, \"claw\": 20, \"clay\": 36, \"cle\": 1714, \"clea\": 1323, \"cleb\": 1, \"clee\": 4, \"clef\": 20, \"clem\": 23, \"clen\": 17, \"cleo\": 6, \"cler\": 152, \"clev\": 162, \"clew\": 2, \"cli\": 556, \"clib\": 3, \"clic\": 32, \"clie\": 37, \"clif\": 80, \"clim\": 231, \"clin\": 125, \"clip\": 25, \"cliq\": 3, \"clit\": 5, \"cliv\": 1, \"clo\": 2475, \"cloa\": 37, \"clob\": 3, \"cloc\": 149, \"clod\": 18, \"clog\": 9, \"cloi\": 5, \"clok\": 3, \"clom\": 2, \"clon\": 1, \"cloo\": 1, \"clos\": 1047, \"clot\": 576, \"clou\": 515, \"clov\": 54, \"clow\": 34, \"cloy\": 4, \"clu\": 457, \"club\": 48, \"cluc\": 10, \"clue\": 10, \"clum\": 45, \"clun\": 29, \"clur\": 2, \"clus\": 74, \"clut\": 48, \"cly\": 4, \"clyd\": 2, \"clyf\": 1, \"clym\": 1, \"cmd\": 1, \"cmo\": 4, \"cni\": 1, \"cnid\": 1, \"coa\": 894, \"coac\": 98, \"coag\": 2, \"coah\": 6, \"coal\": 68, \"coar\": 48, \"coas\": 297, \"coat\": 106, \"coau\": 1, \"coax\": 10, \"cob\": 63, \"coba\": 3, \"cobb\": 11, \"cobh\": 3, \"cobl\": 3, \"cobr\": 3, \"cobw\": 5, \"coc\": 189, \"coca\": 3, \"cocc\": 2, \"coch\": 8, \"cock\": 100, \"coco\": 29, \"coct\": 2, \"cocy\": 1, \"cod\": 80, \"codd\": 5, \"code\": 21, \"codf\": 2, \"codi\": 7, \"coe\": 40, \"coed\": 3, \"coef\": 6, \"coen\": 1, \"coer\": 11, \"coet\": 1, \"coeu\": 1, \"coev\": 1, \"coex\": 15, \"cof\": 208, \"cofa\": 1, \"coff\": 207, \"cog\": 32, \"coge\": 7, \"cogg\": 1, \"cogi\": 6, \"cogn\": 17, \"coh\": 45, \"cohe\": 37, \"cohn\": 1, \"coho\": 3, \"coi\": 192, \"coif\": 1, \"coig\": 1, \"coil\": 35, \"coin\": 102, \"cok\": 6, \"coke\": 1, \"col\": 2929, \"cola\": 2, \"colc\": 5, \"cold\": 68, \"cole\": 31, \"colf\": 1, \"colh\": 2, \"coli\": 6, \"coll\": 924, \"colm\": 13, \"coln\": 5, \"colo\": 1106, \"colq\": 2, \"colt\": 16, \"colu\": 182, \"colv\": 1, \"colz\": 1, \"com\": 15515, \"coma\": 3, \"comb\": 325, \"come\": 891, \"comf\": 617, \"comi\": 748, \"comm\": 4086, \"comp\": 4413, \"comr\": 141, \"coms\": 2, \"comt\": 1, \"comu\": 3, \"con\": 15518, \"cona\": 13, \"conc\": 2249, \"cond\": 937, \"cone\": 15, \"conf\": 1659, \"cong\": 884, \"coni\": 22, \"conj\": 135, \"conl\": 1, \"conn\": 377, \"cono\": 2, \"conp\": 1, \"conq\": 115, \"conr\": 12, \"cons\": 3900, \"cont\": 3850, \"conu\": 20, \"conv\": 1299, \"conw\": 1, \"cony\": 3, \"coo\": 881, \"cooc\": 3, \"cooe\": 1, \"cooi\": 1, \"cook\": 94, \"cool\": 201, \"coom\": 9, \"coon\": 3, \"coop\": 163, \"coor\": 61, \"coos\": 4, \"cooz\": 1, \"cop\": 301, \"copa\": 1, \"cope\": 42, \"copi\": 67, \"copl\": 3, \"copo\": 2, \"copp\": 41, \"copr\": 2, \"cops\": 1, \"copt\": 1, \"copu\": 4, \"copy\": 17, \"coq\": 2, \"coqu\": 2, \"cor\": 1996, \"cora\": 21, \"corb\": 2, \"corc\": 2, \"cord\": 100, \"core\": 6, \"cori\": 25, \"cork\": 58, \"corl\": 2, \"corm\": 5, \"corn\": 458, \"coro\": 38, \"corp\": 386, \"corr\": 554, \"cors\": 12, \"cort\": 24, \"cory\": 1, \"cos\": 666, \"cosa\": 1, \"cose\": 1, \"cosi\": 11, \"cosm\": 58, \"cosp\": 3, \"coss\": 8, \"cost\": 305, \"cot\": 253, \"cote\": 4, \"coti\": 3, \"cotm\": 1, \"cott\": 241, \"coty\": 1, \"cou\": 10382, \"couc\": 50, \"coud\": 1, \"coue\": 4, \"coug\": 31, \"coul\": 5479, \"coun\": 2458, \"coup\": 286, \"cour\": 1860, \"cous\": 206, \"cout\": 1, \"couv\": 1, \"cov\": 1116, \"cove\": 1111, \"covi\": 2, \"cow\": 181, \"cowa\": 73, \"cowb\": 26, \"cowd\": 6, \"cowe\": 6, \"cowh\": 8, \"cowl\": 5, \"cowm\": 3, \"cowo\": 4, \"cowp\": 11, \"cowr\": 1, \"cows\": 8, \"cox\": 15, \"coxc\": 8, \"coxe\": 5, \"coy\": 10, \"coyi\": 1, \"coyl\": 1, \"coyn\": 3, \"coyo\": 5, \"coz\": 9, \"cozb\": 2, \"coze\": 5, \"cozi\": 1, \"cpl\": 8, \"cpla\": 8, \"cra\": 812, \"crab\": 20, \"crac\": 149, \"crad\": 31, \"craf\": 141, \"crag\": 13, \"crai\": 4, \"cram\": 18, \"cran\": 51, \"crap\": 8, \"cras\": 88, \"crat\": 21, \"crau\": 4, \"crav\": 24, \"craw\": 100, \"cray\": 6, \"craz\": 82, \"cre\": 1713, \"crea\": 961, \"crec\": 1, \"cred\": 226, \"cree\": 165, \"crei\": 3, \"crem\": 2, \"creo\": 2, \"crep\": 38, \"cres\": 73, \"cret\": 12, \"crev\": 4, \"crew\": 27, \"cri\": 1975, \"crib\": 4, \"cric\": 21, \"crid\": 1, \"crie\": 1225, \"crik\": 1, \"crim\": 247, \"crin\": 14, \"crip\": 24, \"cris\": 155, \"crit\": 268, \"cro\": 1595, \"croa\": 12, \"croc\": 17, \"crof\": 83, \"croi\": 1, \"crok\": 5, \"crom\": 38, \"cron\": 10, \"croo\": 75, \"crop\": 49, \"croq\": 11, \"cros\": 516, \"crot\": 13, \"crou\": 56, \"crow\": 619, \"croy\": 2, \"croz\": 4, \"crs\": 1, \"crsp\": 1, \"cru\": 601, \"cruc\": 115, \"crud\": 30, \"crue\": 178, \"crui\": 71, \"crum\": 44, \"crun\": 11, \"crup\": 3, \"crus\": 119, \"crut\": 15, \"cry\": 195, \"crye\": 8, \"cryi\": 92, \"cryo\": 1, \"cryp\": 7, \"crys\": 86, \"ctc\": 5, \"cub\": 398, \"cuba\": 28, \"cubb\": 1, \"cube\": 5, \"cubi\": 302, \"cuc\": 19, \"cuck\": 6, \"cucu\": 13, \"cud\": 9, \"cudd\": 1, \"cudg\": 6, \"cudk\": 1, \"cudm\": 1, \"cue\": 4, \"cuep\": 1, \"cuf\": 16, \"cuff\": 13, \"cui\": 2, \"cuir\": 1, \"cuis\": 1, \"cuk\": 1, \"cuky\": 1, \"cul\": 238, \"culb\": 1, \"cull\": 6, \"culm\": 21, \"culp\": 7, \"cult\": 187, \"culv\": 3, \"cum\": 61, \"cuma\": 1, \"cumb\": 34, \"cumh\": 1, \"cumi\": 1, \"cumm\": 5, \"cumn\": 1, \"cumu\": 17, \"cun\": 112, \"cuna\": 6, \"cunn\": 106, \"cup\": 71, \"cupb\": 19, \"cupf\": 3, \"cupi\": 2, \"cupl\": 1, \"cupo\": 2, \"cupp\": 7, \"cur\": 1650, \"cura\": 28, \"curb\": 14, \"curd\": 6, \"cure\": 29, \"curi\": 456, \"curl\": 80, \"curr\": 245, \"curs\": 349, \"curt\": 153, \"curv\": 135, \"curz\": 8, \"cus\": 341, \"cusc\": 1, \"cush\": 44, \"cuss\": 1, \"cust\": 285, \"cut\": 273, \"cuta\": 1, \"cutb\": 1, \"cutd\": 1, \"cute\": 1, \"cutg\": 1, \"cuth\": 1, \"cutl\": 45, \"cuto\": 3, \"cutp\": 1, \"cutt\": 167, \"cuv\": 11, \"cuvi\": 11, \"cyc\": 59, \"cycl\": 59, \"cyg\": 1, \"cygn\": 1, \"cyl\": 46, \"cyli\": 46, \"cym\": 31, \"cymb\": 30, \"cyn\": 34, \"cyne\": 3, \"cyni\": 25, \"cynn\": 4, \"cynt\": 2, \"cyp\": 24, \"cyph\": 3, \"cypr\": 21, \"cyr\": 37, \"cyre\": 9, \"cyri\": 1, \"cyru\": 27, \"cys\": 3, \"cyst\": 3, \"cyt\": 6, \"cyth\": 1, \"cyto\": 5, \"cza\": 18, \"czar\": 7, \"cze\": 6, \"czec\": 5, \"czer\": 1, \"c\\u00e6s\": 2, \"c\\u00e6sa\": 2, \"dab\": 23, \"daba\": 1, \"dabb\": 14, \"dabe\": 2, \"dabh\": 1, \"dabo\": 2, \"dac\": 5, \"dach\": 1, \"dack\": 1, \"dacr\": 1, \"dact\": 2, \"dad\": 13, \"dada\": 2, \"dadd\": 7, \"dade\": 1, \"dae\": 5, \"daf\": 6, \"daff\": 3, \"dag\": 115, \"dagd\": 15, \"dage\": 1, \"dagg\": 84, \"dago\": 15, \"dah\": 1, \"daho\": 1, \"dai\": 343, \"dail\": 253, \"dain\": 30, \"dair\": 31, \"dais\": 27, \"dak\": 12, \"dako\": 12, \"dal\": 166, \"dala\": 1, \"dalb\": 4, \"dale\": 5, \"dali\": 1, \"dall\": 84, \"dalm\": 2, \"dalp\": 1, \"dalr\": 42, \"dalt\": 5, \"dalz\": 1, \"dam\": 463, \"dama\": 143, \"dame\": 7, \"dami\": 1, \"damm\": 10, \"damn\": 99, \"damo\": 4, \"damp\": 14, \"dams\": 54, \"dan\": 1160, \"dana\": 6, \"danb\": 1, \"danc\": 528, \"dand\": 37, \"dane\": 7, \"dang\": 398, \"dani\": 127, \"danj\": 1, \"dank\": 1, \"dann\": 10, \"dans\": 2, \"dant\": 9, \"danu\": 6, \"danv\": 1, \"danz\": 1, \"dap\": 14, \"daph\": 2, \"dapp\": 12, \"dar\": 1757, \"darb\": 2, \"dard\": 7, \"dare\": 113, \"darg\": 1, \"dari\": 79, \"dark\": 480, \"darl\": 45, \"darm\": 1, \"darn\": 7, \"darr\": 2, \"dart\": 140, \"darw\": 10, \"das\": 417, \"dash\": 367, \"dat\": 374, \"data\": 3, \"date\": 61, \"dath\": 10, \"dati\": 5, \"datu\": 1, \"dau\": 1016, \"daub\": 10, \"daug\": 986, \"daum\": 1, \"daun\": 15, \"daup\": 3, \"dav\": 1256, \"dava\": 1, \"dave\": 7, \"davi\": 1212, \"davy\": 1, \"daw\": 114, \"dawd\": 5, \"dawl\": 5, \"dawn\": 23, \"daws\": 2, \"day\": 1808, \"daya\": 2, \"dayb\": 24, \"dayd\": 3, \"daye\": 20, \"dayl\": 54, \"days\": 5, \"dayt\": 26, \"dayw\": 3, \"daz\": 68, \"daze\": 17, \"dazz\": 50, \"dea\": 4170, \"deac\": 21, \"dead\": 111, \"deae\": 10, \"deaf\": 19, \"deal\": 274, \"dean\": 4, \"dear\": 138, \"deat\": 1312, \"deau\": 2, \"deb\": 255, \"deba\": 82, \"debe\": 8, \"debi\": 21, \"debo\": 23, \"debr\": 19, \"debt\": 37, \"debu\": 25, \"dec\": 2476, \"deca\": 167, \"decc\": 3, \"dece\": 463, \"deci\": 657, \"deck\": 48, \"decl\": 634, \"deco\": 102, \"decr\": 152, \"ded\": 183, \"deda\": 12, \"dedi\": 99, \"dedu\": 72, \"dee\": 1059, \"deed\": 108, \"deeg\": 24, \"deem\": 52, \"deep\": 261, \"deer\": 46, \"def\": 1226, \"defa\": 12, \"defe\": 611, \"defi\": 499, \"defl\": 2, \"defo\": 31, \"defr\": 20, \"deft\": 3, \"defu\": 4, \"defy\": 8, \"deg\": 448, \"dega\": 2, \"dege\": 8, \"degl\": 1, \"degr\": 437, \"deh\": 10, \"deha\": 2, \"dehu\": 6, \"dehy\": 2, \"dei\": 60, \"deie\": 2, \"deif\": 12, \"deig\": 10, \"deio\": 6, \"deis\": 2, \"deit\": 28, \"dej\": 21, \"deje\": 14, \"dek\": 9, \"deka\": 8, \"deko\": 1, \"del\": 2120, \"dela\": 219, \"dele\": 56, \"delh\": 6, \"deli\": 1732, \"dell\": 13, \"delm\": 1, \"delo\": 6, \"delp\": 17, \"delr\": 1, \"delt\": 13, \"delu\": 44, \"delv\": 5, \"dem\": 840, \"dema\": 329, \"deme\": 25, \"demi\": 12, \"demo\": 438, \"dems\": 1, \"demu\": 17, \"demy\": 11, \"den\": 677, \"dend\": 1, \"deni\": 155, \"denm\": 32, \"denn\": 7, \"deno\": 123, \"dens\": 84, \"dent\": 60, \"denu\": 8, \"denv\": 19, \"deny\": 34, \"deo\": 3, \"deod\": 2, \"dep\": 1679, \"depa\": 802, \"depe\": 373, \"depi\": 26, \"depl\": 40, \"depo\": 75, \"depp\": 2, \"depr\": 166, \"dept\": 139, \"depu\": 51, \"deq\": 2, \"dequ\": 2, \"der\": 198, \"dera\": 8, \"derb\": 12, \"dere\": 4, \"deri\": 154, \"derm\": 1, \"dern\": 1, \"dero\": 3, \"derr\": 6, \"derv\": 5, \"derw\": 1, \"des\": 4227, \"desa\": 10, \"desc\": 683, \"dese\": 419, \"desh\": 1, \"desi\": 1107, \"desk\": 7, \"desl\": 2, \"desm\": 4, \"deso\": 253, \"desp\": 618, \"dess\": 15, \"dest\": 1012, \"desu\": 4, \"desy\": 1, \"det\": 1227, \"deta\": 317, \"dete\": 855, \"deto\": 17, \"detr\": 38, \"deu\": 37, \"deuc\": 6, \"deue\": 4, \"deui\": 10, \"deuo\": 4, \"deut\": 8, \"dev\": 1751, \"deva\": 12, \"deve\": 750, \"devi\": 582, \"devo\": 407, \"dew\": 40, \"dewa\": 1, \"dewd\": 10, \"dewe\": 5, \"dewi\": 1, \"dewy\": 1, \"dex\": 33, \"dexa\": 2, \"dexe\": 1, \"dext\": 30, \"dha\": 1, \"dhar\": 1, \"dho\": 1, \"dhot\": 1, \"dia\": 419, \"diab\": 17, \"diac\": 1, \"diad\": 10, \"diag\": 84, \"dial\": 69, \"diam\": 117, \"dian\": 62, \"diap\": 18, \"diar\": 23, \"dias\": 1, \"diat\": 5, \"dib\": 14, \"dibl\": 2, \"dibo\": 11, \"dibr\": 1, \"dic\": 313, \"dice\": 2, \"dich\": 2, \"dick\": 34, \"dico\": 6, \"dict\": 146, \"did\": 756, \"dida\": 1, \"didd\": 6, \"dide\": 1, \"didn\": 401, \"dids\": 160, \"didy\": 3, \"die\": 531, \"died\": 1, \"dieg\": 5, \"dieh\": 2, \"dien\": 1, \"dies\": 3, \"diet\": 56, \"dieu\": 2, \"dif\": 1627, \"diff\": 1627, \"dig\": 262, \"digb\": 8, \"dige\": 27, \"digg\": 73, \"digi\": 10, \"dign\": 134, \"digr\": 7, \"dii\": 6, \"diio\": 5, \"diis\": 1, \"dij\": 1, \"dijo\": 1, \"dik\": 2, \"dikl\": 2, \"dil\": 204, \"dila\": 38, \"dile\": 32, \"dili\": 83, \"dill\": 15, \"dilt\": 1, \"dilu\": 21, \"dilw\": 1, \"dily\": 1, \"dim\": 213, \"dima\": 8, \"dime\": 85, \"dimi\": 67, \"diml\": 27, \"dimm\": 8, \"dimn\": 6, \"dimo\": 3, \"dimp\": 3, \"din\": 527, \"dina\": 23, \"dine\": 32, \"ding\": 14, \"dinh\": 2, \"dini\": 70, \"dinn\": 317, \"dino\": 2, \"dins\": 1, \"dint\": 1, \"dio\": 30, \"dioc\": 14, \"diod\": 1, \"dion\": 7, \"dior\": 2, \"diot\": 1, \"diox\": 3, \"dip\": 118, \"diph\": 1, \"dipl\": 68, \"dipo\": 6, \"dipp\": 35, \"dips\": 1, \"dipy\": 3, \"dir\": 1294, \"dire\": 1106, \"dirg\": 15, \"diri\": 1, \"dirk\": 3, \"diro\": 1, \"dirt\": 77, \"dis\": 6401, \"disa\": 615, \"disb\": 32, \"disc\": 1686, \"disd\": 51, \"dise\": 185, \"disf\": 16, \"disg\": 151, \"dish\": 113, \"disi\": 62, \"disj\": 7, \"disk\": 6, \"disl\": 116, \"dism\": 220, \"disn\": 1, \"diso\": 126, \"disp\": 834, \"disq\": 19, \"disr\": 66, \"diss\": 233, \"dist\": 1774, \"disu\": 8, \"dit\": 48, \"ditc\": 36, \"dite\": 1, \"ditm\": 3, \"ditt\": 8, \"diu\": 33, \"diue\": 15, \"diui\": 8, \"diur\": 10, \"div\": 1100, \"diva\": 10, \"dive\": 220, \"divi\": 774, \"divo\": 54, \"divu\": 8, \"dix\": 57, \"dixi\": 5, \"dixo\": 52, \"diz\": 17, \"diza\": 1, \"dizz\": 16, \"dja\": 8, \"djak\": 1, \"djan\": 7, \"dli\": 3, \"dlin\": 3, \"dni\": 3, \"dnie\": 2, \"dnig\": 1, \"doa\": 23, \"doat\": 20, \"dob\": 4, \"dobb\": 2, \"dobe\": 1, \"dobl\": 1, \"doc\": 571, \"doch\": 9, \"doci\": 8, \"dock\": 18, \"doct\": 440, \"docu\": 61, \"dod\": 65, \"doda\": 4, \"dodd\": 1, \"dodg\": 39, \"dodi\": 1, \"dodo\": 1, \"doe\": 1432, \"doer\": 14, \"does\": 190, \"doet\": 96, \"dof\": 3, \"doff\": 2, \"dofr\": 1, \"dog\": 188, \"dogb\": 1, \"doge\": 1, \"dogg\": 18, \"dogh\": 1, \"dogl\": 1, \"dogm\": 25, \"dogo\": 3, \"dogp\": 1, \"dogt\": 10, \"dogu\": 1, \"dogw\": 1, \"doh\": 1, \"dohn\": 1, \"doi\": 506, \"doin\": 498, \"doit\": 5, \"dol\": 271, \"dola\": 2, \"dolc\": 6, \"dold\": 1, \"dole\": 11, \"doll\": 188, \"dolm\": 1, \"dolo\": 25, \"dolp\": 17, \"dolt\": 2, \"dom\": 478, \"doma\": 21, \"dome\": 137, \"domi\": 278, \"domo\": 2, \"don\": 2317, \"dona\": 69, \"dond\": 1, \"doni\": 1, \"donk\": 20, \"donn\": 22, \"dono\": 18, \"dont\": 1, \"donw\": 49, \"doo\": 1469, \"dood\": 1, \"dook\": 3, \"dool\": 13, \"doom\": 32, \"door\": 297, \"dop\": 7, \"dope\": 2, \"doph\": 2, \"dopp\": 1, \"dor\": 108, \"dora\": 7, \"dorc\": 5, \"dore\": 2, \"dori\": 15, \"dork\": 1, \"dorm\": 51, \"doro\": 3, \"dors\": 18, \"dos\": 144, \"dosa\": 8, \"dose\": 17, \"dosi\": 1, \"dost\": 3, \"dot\": 343, \"dote\": 11, \"doth\": 4, \"doti\": 6, \"dott\": 18, \"dou\": 1094, \"doub\": 1006, \"douc\": 4, \"doug\": 74, \"dour\": 1, \"dous\": 4, \"dov\": 54, \"dove\": 25, \"dow\": 4079, \"dowa\": 5, \"dowe\": 12, \"dowg\": 1, \"dowj\": 1, \"dowl\": 1, \"down\": 338, \"dowr\": 7, \"dox\": 3, \"doxi\": 1, \"doxo\": 2, \"doy\": 6, \"doyl\": 6, \"doz\": 102, \"doze\": 92, \"dozi\": 4, \"dra\": 1502, \"drab\": 3, \"drac\": 3, \"draf\": 46, \"drag\": 161, \"drah\": 1, \"drai\": 66, \"drak\": 10, \"dram\": 159, \"dran\": 58, \"drap\": 42, \"dras\": 21, \"drau\": 31, \"drav\": 13, \"draw\": 593, \"dray\": 1, \"drd\": 3, \"dre\": 1660, \"drea\": 886, \"dred\": 1, \"dreg\": 8, \"drei\": 4, \"dren\": 13, \"dres\": 428, \"drew\": 2, \"drex\": 11, \"drey\": 9, \"dri\": 1744, \"drib\": 4, \"drie\": 108, \"drif\": 91, \"dril\": 60, \"drin\": 768, \"drip\": 37, \"driu\": 8, \"driv\": 647, \"driz\": 12, \"dro\": 902, \"droi\": 1, \"drol\": 12, \"drom\": 14, \"dron\": 11, \"droo\": 45, \"drop\": 381, \"dros\": 16, \"drou\": 27, \"drov\": 131, \"drow\": 112, \"dru\": 459, \"drub\": 1, \"drud\": 5, \"drug\": 90, \"drui\": 4, \"drum\": 72, \"drun\": 211, \"drur\": 5, \"drus\": 1, \"drut\": 1, \"dry\": 65, \"drya\": 3, \"dryd\": 3, \"drye\": 5, \"dryf\": 3, \"dryg\": 1, \"dryi\": 31, \"dryl\": 8, \"dryn\": 6, \"drys\": 1, \"dryw\": 4, \"dua\": 17, \"dual\": 6, \"duan\": 1, \"dub\": 61, \"dubb\": 4, \"dubi\": 24, \"dubl\": 17, \"dubo\": 16, \"duc\": 130, \"duca\": 3, \"duce\": 1, \"duch\": 43, \"duck\": 31, \"ducl\": 7, \"ducr\": 13, \"duct\": 8, \"dud\": 5, \"dudg\": 1, \"dudl\": 1, \"duds\": 1, \"due\": 79, \"duel\": 23, \"dues\": 4, \"duet\": 2, \"duf\": 13, \"duff\": 9, \"dufr\": 2, \"dug\": 12, \"duga\": 2, \"dugd\": 1, \"dugg\": 1, \"dugo\": 8, \"duh\": 1, \"duha\": 1, \"duk\": 127, \"duke\": 23, \"dul\": 185, \"dulc\": 9, \"dull\": 28, \"duln\": 5, \"dum\": 142, \"duma\": 5, \"dumb\": 15, \"dumf\": 1, \"dumm\": 6, \"dumo\": 10, \"dump\": 26, \"dun\": 164, \"dunb\": 4, \"dunc\": 28, \"dund\": 2, \"dune\": 10, \"dunf\": 1, \"dung\": 38, \"dunk\": 6, \"dunl\": 1, \"dunn\": 7, \"duns\": 23, \"duo\": 11, \"duod\": 11, \"dup\": 50, \"dupe\": 7, \"dupl\": 29, \"dupo\": 6, \"duq\": 1, \"duqu\": 1, \"dur\": 813, \"dura\": 58, \"dure\": 7, \"duri\": 707, \"durk\": 5, \"durl\": 1, \"duro\": 1, \"durr\": 1, \"durs\": 31, \"durw\": 1, \"dus\": 338, \"dusk\": 19, \"duss\": 4, \"dust\": 50, \"dut\": 306, \"dutc\": 49, \"dute\": 1, \"duti\": 75, \"dutt\": 2, \"duv\": 2, \"duve\": 1, \"duvo\": 1, \"duy\": 1, \"duyv\": 1, \"dvo\": 1, \"dvor\": 1, \"dwa\": 22, \"dwar\": 22, \"dwe\": 896, \"dwel\": 896, \"dwi\": 25, \"dwig\": 12, \"dwin\": 13, \"dwo\": 1, \"dwy\": 4, \"dwye\": 4, \"dye\": 32, \"dyei\": 1, \"dyer\": 1, \"dyes\": 2, \"dyi\": 114, \"dyin\": 114, \"dyk\": 1, \"dyl\": 11, \"dyla\": 11, \"dyn\": 79, \"dyna\": 78, \"dyno\": 1, \"dyo\": 1, \"dyq\": 1, \"dyqu\": 1, \"dyr\": 1, \"dys\": 29, \"dyse\": 1, \"dysp\": 9, \"dyst\": 19, \"eac\": 1688, \"ead\": 1, \"eade\": 1, \"eag\": 377, \"eage\": 299, \"eagl\": 78, \"eal\": 1, \"eali\": 1, \"ear\": 3577, \"eard\": 1, \"eare\": 18, \"eari\": 2, \"earl\": 873, \"earm\": 2, \"earn\": 288, \"earp\": 1, \"earr\": 20, \"ears\": 4, \"eart\": 1987, \"eas\": 1460, \"ease\": 21, \"easi\": 361, \"east\": 168, \"easy\": 5, \"eat\": 385, \"eata\": 5, \"eate\": 220, \"eati\": 134, \"eato\": 2, \"eav\": 4, \"eave\": 2, \"eba\": 8, \"ebb\": 9, \"ebbe\": 2, \"ebbi\": 4, \"ebc\": 2, \"ebcd\": 2, \"ebe\": 33, \"ebed\": 6, \"eben\": 3, \"ebi\": 3, \"ebia\": 3, \"ebo\": 56, \"ebon\": 9, \"eboo\": 46, \"ebr\": 2, \"ebro\": 2, \"ebu\": 4, \"ebul\": 4, \"ecb\": 1, \"ecba\": 1, \"ecc\": 57, \"ecce\": 41, \"ecch\": 1, \"eccl\": 15, \"ech\": 92, \"eche\": 3, \"echo\": 62, \"eck\": 5, \"ecka\": 1, \"ecke\": 4, \"ecl\": 30, \"ecla\": 7, \"ecle\": 4, \"ecli\": 19, \"eco\": 416, \"ecol\": 3, \"econ\": 413, \"ecs\": 52, \"ecst\": 52, \"ecu\": 34, \"ecua\": 2, \"ecum\": 32, \"eda\": 1, \"edd\": 58, \"eddi\": 44, \"eddy\": 8, \"ede\": 88, \"edem\": 3, \"eden\": 2, \"edg\": 319, \"edga\": 4, \"edge\": 77, \"edgi\": 8, \"edgw\": 1, \"edi\": 316, \"edib\": 5, \"edic\": 6, \"edif\": 37, \"edin\": 1, \"edis\": 4, \"edit\": 261, \"edm\": 41, \"edmo\": 2, \"edmu\": 39, \"edn\": 3, \"edo\": 105, \"edom\": 19, \"edr\": 8, \"edre\": 8, \"edt\": 1, \"edti\": 1, \"edu\": 415, \"edua\": 1, \"educ\": 414, \"edw\": 342, \"edwa\": 328, \"edwi\": 14, \"edy\": 16, \"edyt\": 16, \"eea\": 1, \"eeae\": 1, \"eef\": 1, \"eefa\": 1, \"eel\": 6, \"een\": 1, \"eer\": 7, \"eeri\": 7, \"eff\": 1244, \"effa\": 5, \"effe\": 746, \"effi\": 119, \"effl\": 26, \"effo\": 316, \"effr\": 3, \"effu\": 29, \"ega\": 2, \"egal\": 2, \"ege\": 1, \"eger\": 1, \"egg\": 69, \"egge\": 1, \"eggh\": 2, \"eggs\": 2, \"egi\": 2, \"egil\": 1, \"egin\": 1, \"egl\": 16, \"egla\": 3, \"eglo\": 13, \"ego\": 18, \"egoa\": 1, \"egoc\": 1, \"egot\": 14, \"egr\": 6, \"egre\": 6, \"egy\": 799, \"egyp\": 799, \"ehl\": 1, \"ehle\": 1, \"ehr\": 1, \"ehre\": 1, \"ehu\": 10, \"eic\": 20, \"eich\": 20, \"eid\": 30, \"eide\": 2, \"eido\": 28, \"eie\": 3, \"eiel\": 1, \"eif\": 1, \"eiff\": 1, \"eig\": 525, \"eigh\": 525, \"eil\": 29, \"eile\": 29, \"ein\": 10, \"eins\": 8, \"eir\": 2, \"eis\": 63, \"eise\": 62, \"eisl\": 1, \"eit\": 665, \"eith\": 665, \"eja\": 16, \"ejac\": 16, \"eje\": 7, \"ejec\": 7, \"eka\": 1, \"ekat\": 1, \"ekb\": 1, \"ekbe\": 1, \"eke\": 3, \"ekr\": 24, \"ekro\": 24, \"eks\": 27, \"ekst\": 27, \"ekw\": 1, \"ekwa\": 1, \"ela\": 204, \"elab\": 66, \"elad\": 1, \"elai\": 23, \"elam\": 2, \"elap\": 13, \"elas\": 31, \"elat\": 22, \"elb\": 70, \"elbe\": 1, \"elbo\": 63, \"elbr\": 1, \"elbu\": 1, \"eld\": 367, \"elda\": 4, \"elde\": 361, \"eldo\": 2, \"ele\": 1434, \"elea\": 97, \"elec\": 643, \"elee\": 1, \"eleg\": 148, \"elel\": 1, \"elem\": 259, \"elen\": 4, \"elep\": 97, \"eleu\": 4, \"elev\": 171, \"elf\": 3, \"elfi\": 3, \"elg\": 1, \"elgi\": 1, \"elh\": 4, \"elha\": 4, \"eli\": 1364, \"elia\": 101, \"elic\": 10, \"elid\": 1, \"elie\": 26, \"elig\": 43, \"elih\": 13, \"elij\": 86, \"elik\": 1, \"elim\": 83, \"elin\": 688, \"elio\": 14, \"elip\": 26, \"elis\": 97, \"elit\": 13, \"eliu\": 2, \"elix\": 1, \"eliz\": 151, \"elk\": 25, \"elka\": 21, \"elkh\": 1, \"elko\": 1, \"ell\": 351, \"ella\": 3, \"elle\": 13, \"elli\": 328, \"ells\": 1, \"ellw\": 1, \"elm\": 18, \"elma\": 5, \"elme\": 6, \"elmi\": 1, \"elmo\": 1, \"eln\": 8, \"elna\": 8, \"elo\": 75, \"eloc\": 1, \"eloi\": 1, \"elon\": 11, \"elop\": 6, \"eloq\": 43, \"elot\": 3, \"elp\": 5, \"elpa\": 5, \"els\": 851, \"else\": 94, \"elsi\": 7, \"elso\": 3, \"elt\": 414, \"elte\": 3, \"elto\": 411, \"elu\": 59, \"elua\": 4, \"eluc\": 8, \"elud\": 30, \"elue\": 1, \"elus\": 9, \"elut\": 5, \"eluz\": 1, \"elv\": 12, \"elve\": 9, \"elvi\": 3, \"ely\": 8, \"elym\": 1, \"elys\": 7, \"elz\": 4, \"elza\": 4, \"ema\": 41, \"emac\": 5, \"emai\": 3, \"eman\": 31, \"emas\": 2, \"emb\": 408, \"emba\": 170, \"embe\": 22, \"embi\": 2, \"embl\": 23, \"embo\": 60, \"embr\": 131, \"emc\": 1, \"emce\": 1, \"eme\": 223, \"emen\": 1, \"emer\": 217, \"emet\": 5, \"emi\": 122, \"emig\": 12, \"emil\": 9, \"emim\": 2, \"emin\": 50, \"emis\": 36, \"emit\": 6, \"emm\": 894, \"emma\": 5, \"emme\": 12, \"emmo\": 1, \"emo\": 252, \"emol\": 1, \"emor\": 14, \"emot\": 237, \"emp\": 1083, \"empa\": 1, \"empe\": 100, \"emph\": 145, \"empi\": 106, \"empl\": 439, \"empo\": 5, \"empr\": 6, \"empt\": 263, \"empy\": 18, \"ems\": 1, \"emse\": 1, \"emu\": 25, \"emul\": 25, \"ena\": 137, \"enab\": 80, \"enac\": 36, \"enam\": 15, \"enc\": 638, \"enca\": 66, \"ence\": 2, \"ench\": 56, \"enci\": 13, \"encl\": 62, \"enco\": 400, \"encr\": 27, \"encu\": 8, \"ency\": 4, \"end\": 962, \"enda\": 16, \"ende\": 308, \"endg\": 1, \"endi\": 57, \"endl\": 101, \"endo\": 52, \"endp\": 3, \"endt\": 1, \"endu\": 252, \"endw\": 2, \"ene\": 854, \"enea\": 1, \"eneg\": 1, \"enem\": 666, \"ener\": 186, \"enf\": 84, \"enfa\": 1, \"enfe\": 4, \"enfi\": 1, \"enfl\": 1, \"enfo\": 74, \"enfr\": 3, \"eng\": 1546, \"enga\": 462, \"enge\": 23, \"engi\": 244, \"engl\": 739, \"engr\": 67, \"engu\": 10, \"enh\": 22, \"enha\": 20, \"enhu\": 2, \"eni\": 15, \"enig\": 14, \"enio\": 1, \"enj\": 383, \"enjo\": 383, \"enk\": 2, \"enki\": 2, \"enl\": 148, \"enla\": 72, \"enli\": 76, \"enm\": 23, \"enme\": 2, \"enmi\": 21, \"enn\": 14, \"enna\": 1, \"enni\": 1, \"enno\": 2, \"ennu\": 8, \"eno\": 1384, \"enoc\": 13, \"enor\": 121, \"enos\": 1, \"enou\": 1238, \"enq\": 135, \"enqu\": 135, \"enr\": 83, \"enra\": 21, \"enri\": 31, \"enro\": 31, \"ens\": 189, \"ensa\": 7, \"ensc\": 39, \"ense\": 22, \"ensh\": 6, \"ensi\": 29, \"ensl\": 12, \"ensn\": 2, \"enso\": 2, \"ensu\": 70, \"ent\": 2512, \"enta\": 50, \"ente\": 1296, \"enth\": 119, \"enti\": 668, \"ento\": 7, \"entr\": 366, \"entw\": 5, \"enu\": 19, \"enui\": 4, \"enum\": 8, \"enun\": 3, \"enur\": 1, \"env\": 292, \"enve\": 78, \"envi\": 131, \"envo\": 4, \"envy\": 11, \"enw\": 3, \"enwi\": 2, \"enwr\": 1, \"enz\": 22, \"enzy\": 21, \"eos\": 1, \"eosi\": 1, \"epa\": 51, \"epae\": 1, \"epam\": 33, \"epap\": 5, \"epau\": 12, \"eph\": 350, \"epha\": 42, \"ephe\": 41, \"ephl\": 2, \"epho\": 52, \"ephp\": 1, \"ephr\": 212, \"epi\": 199, \"epic\": 35, \"epid\": 20, \"epig\": 9, \"epil\": 7, \"epip\": 10, \"epis\": 70, \"epit\": 26, \"epo\": 10, \"epoc\": 8, \"epox\": 2, \"epp\": 2, \"eppi\": 1, \"eppl\": 1, \"eps\": 8, \"epsi\": 1, \"epso\": 6, \"epst\": 1, \"eqn\": 1, \"equ\": 1042, \"equa\": 674, \"eque\": 2, \"equi\": 366, \"era\": 34, \"erad\": 5, \"eran\": 1, \"erar\": 2, \"eras\": 15, \"erb\": 1, \"erbl\": 1, \"erc\": 1, \"erco\": 1, \"erd\": 4, \"erdm\": 1, \"erdo\": 2, \"ere\": 113, \"ereb\": 2, \"erec\": 101, \"erel\": 2, \"erem\": 1, \"erew\": 7, \"erf\": 1, \"erfl\": 1, \"erg\": 3, \"ergo\": 1, \"erh\": 3, \"erha\": 3, \"eri\": 29, \"eric\": 5, \"erik\": 9, \"erit\": 1, \"erl\": 1, \"erle\": 1, \"erm\": 5, \"erma\": 2, \"ermi\": 3, \"ern\": 36, \"erna\": 2, \"erne\": 10, \"erni\": 19, \"erns\": 5, \"ero\": 32, \"erod\": 4, \"erom\": 10, \"eros\": 6, \"erot\": 10, \"erp\": 1, \"erpo\": 1, \"err\": 241, \"erra\": 56, \"erre\": 23, \"erri\": 5, \"erro\": 153, \"erru\": 1, \"ers\": 17, \"ersa\": 2, \"ersh\": 1, \"ersk\": 5, \"ersp\": 1, \"ert\": 1, \"erth\": 1, \"eru\": 37, \"erud\": 8, \"erup\": 29, \"erv\": 2, \"ervi\": 2, \"erw\": 5, \"erwa\": 1, \"erwe\": 1, \"erwh\": 2, \"erwi\": 1, \"ery\": 3, \"erys\": 2, \"eryt\": 1, \"esa\": 126, \"esai\": 21, \"esar\": 3, \"esc\": 470, \"esca\": 423, \"esch\": 13, \"esco\": 27, \"escr\": 2, \"escu\": 5, \"ese\": 1, \"esh\": 30, \"eshb\": 4, \"eshc\": 6, \"eshe\": 2, \"eshk\": 1, \"eshl\": 1, \"esht\": 16, \"esi\": 1, \"esil\": 1, \"esk\": 5, \"eski\": 5, \"esl\": 1, \"esm\": 2, \"esma\": 2, \"esn\": 1, \"esna\": 1, \"eso\": 4, \"esot\": 4, \"esp\": 380, \"espa\": 3, \"espe\": 336, \"espi\": 14, \"espl\": 3, \"espo\": 14, \"espr\": 6, \"espy\": 1, \"esq\": 5, \"esqu\": 5, \"esr\": 3, \"esro\": 3, \"ess\": 262, \"essa\": 39, \"esse\": 221, \"est\": 971, \"esta\": 575, \"este\": 102, \"esth\": 73, \"esti\": 202, \"esto\": 1, \"estr\": 16, \"estu\": 2, \"eta\": 6, \"etai\": 1, \"etc\": 12, \"etce\": 9, \"etch\": 3, \"ete\": 241, \"eter\": 237, \"etex\": 3, \"eth\": 208, \"etha\": 17, \"ethb\": 1, \"ethe\": 53, \"ethi\": 113, \"ethn\": 16, \"etho\": 4, \"ethy\": 4, \"eti\": 6, \"etiq\": 6, \"etn\": 2, \"eto\": 31, \"eton\": 1, \"etr\": 3, \"etru\": 2, \"ett\": 3, \"etto\": 1, \"etu\": 2, \"etud\": 2, \"ety\": 2, \"etym\": 2, \"eub\": 2, \"eubo\": 1, \"eubu\": 1, \"euc\": 6, \"euca\": 1, \"eucl\": 5, \"eue\": 156, \"euen\": 4, \"euer\": 58, \"eug\": 44, \"euge\": 44, \"eui\": 11, \"euid\": 1, \"euil\": 10, \"eul\": 6, \"eula\": 1, \"eulo\": 5, \"eum\": 1, \"eumm\": 1, \"eun\": 28, \"euni\": 1, \"eunu\": 27, \"euo\": 1, \"euod\": 1, \"eup\": 31, \"euph\": 31, \"eur\": 264, \"eura\": 2, \"euri\": 3, \"euro\": 255, \"euru\": 1, \"eury\": 3, \"eus\": 10, \"eust\": 10, \"eut\": 2, \"eute\": 1, \"euty\": 1, \"eva\": 303, \"evac\": 9, \"evad\": 14, \"eval\": 68, \"evan\": 51, \"evap\": 9, \"evas\": 11, \"eve\": 11267, \"eveg\": 1, \"evel\": 4, \"even\": 1098, \"ever\": 4513, \"evi\": 1432, \"evic\": 2, \"evid\": 450, \"evil\": 66, \"evin\": 31, \"evo\": 82, \"evoc\": 5, \"evok\": 22, \"evol\": 55, \"evz\": 1, \"evzo\": 1, \"ewe\": 7, \"exa\": 1323, \"exac\": 439, \"exag\": 47, \"exal\": 141, \"exam\": 668, \"exas\": 28, \"exb\": 2, \"exba\": 1, \"exbo\": 1, \"exc\": 2396, \"exca\": 7, \"exce\": 1421, \"exch\": 124, \"exci\": 245, \"excl\": 393, \"exco\": 8, \"excr\": 9, \"excu\": 189, \"exe\": 671, \"exea\": 1, \"exec\": 235, \"exeg\": 3, \"exem\": 34, \"exer\": 298, \"exet\": 16, \"exeu\": 82, \"exf\": 1, \"exfi\": 1, \"exg\": 2, \"exga\": 1, \"exgo\": 1, \"exh\": 261, \"exha\": 90, \"exhi\": 129, \"exho\": 39, \"exhu\": 3, \"exi\": 551, \"exig\": 5, \"exil\": 22, \"exis\": 438, \"exit\": 5, \"exj\": 2, \"exja\": 1, \"exju\": 1, \"exl\": 2, \"exli\": 2, \"exm\": 20, \"exma\": 2, \"exmo\": 15, \"exmr\": 2, \"exmu\": 1, \"exn\": 1, \"exna\": 1, \"exo\": 32, \"exod\": 5, \"exog\": 3, \"exon\": 5, \"exor\": 9, \"exot\": 10, \"exp\": 3797, \"expa\": 193, \"expe\": 1997, \"expi\": 35, \"expl\": 733, \"expo\": 227, \"expr\": 602, \"expu\": 9, \"exq\": 55, \"exqu\": 55, \"exs\": 2, \"exsc\": 1, \"exsi\": 1, \"ext\": 1317, \"exta\": 16, \"exte\": 551, \"exti\": 38, \"exto\": 33, \"extr\": 679, \"exu\": 66, \"exub\": 14, \"exud\": 8, \"exul\": 43, \"exur\": 1, \"exy\": 1, \"exya\": 1, \"eye\": 2026, \"eyeb\": 46, \"eyed\": 1, \"eyef\": 2, \"eyeg\": 4, \"eyei\": 40, \"eyel\": 35, \"eyem\": 1, \"eyep\": 1, \"eyes\": 18, \"eyet\": 2, \"eyeu\": 2, \"eyew\": 4, \"eyi\": 2, \"eyin\": 2, \"eyk\": 1, \"eyl\": 1, \"eyr\": 12, \"eyre\": 7, \"eyri\": 4, \"eyt\": 2, \"eyth\": 2, \"eza\": 1, \"ezb\": 3, \"ezba\": 1, \"ezbo\": 2, \"eze\": 19, \"ezek\": 8, \"ezi\": 7, \"ezio\": 7, \"ezn\": 1, \"ezni\": 1, \"ezr\": 31, \"ezra\": 1, \"ezz\": 16, \"fab\": 134, \"fabe\": 7, \"fabi\": 4, \"fabl\": 47, \"fabr\": 64, \"fabu\": 12, \"fac\": 3457, \"faca\": 23, \"face\": 418, \"faci\": 181, \"facs\": 3, \"fact\": 420, \"facu\": 91, \"fad\": 111, \"fadd\": 1, \"fade\": 70, \"fadi\": 20, \"fae\": 3, \"faer\": 3, \"fag\": 21, \"faga\": 1, \"fage\": 6, \"fagg\": 12, \"fagi\": 1, \"fago\": 1, \"fah\": 2, \"fahe\": 1, \"fahr\": 1, \"fai\": 2375, \"faie\": 2, \"fail\": 313, \"fain\": 219, \"fair\": 526, \"fait\": 718, \"fak\": 17, \"fake\": 5, \"faki\": 1, \"fal\": 1484, \"falc\": 24, \"fale\": 1, \"fall\": 538, \"falm\": 1, \"faln\": 3, \"fals\": 256, \"falt\": 23, \"fam\": 1663, \"fame\": 12, \"fami\": 1393, \"famo\": 182, \"fan\": 606, \"fana\": 23, \"fanc\": 358, \"fane\": 1, \"fanf\": 1, \"fang\": 6, \"fann\": 76, \"fans\": 37, \"fant\": 75, \"far\": 1057, \"fara\": 1, \"farc\": 14, \"fard\": 2, \"fare\": 86, \"farf\": 5, \"farg\": 2, \"fari\": 5, \"farl\": 1, \"farm\": 376, \"farn\": 5, \"faro\": 7, \"farr\": 40, \"fars\": 1, \"fart\": 253, \"farv\": 1, \"farw\": 5, \"fas\": 954, \"fasc\": 69, \"fash\": 269, \"fast\": 207, \"fat\": 3602, \"fata\": 86, \"fatb\": 1, \"fate\": 24, \"fatf\": 2, \"fath\": 3245, \"fati\": 48, \"fatl\": 6, \"fatn\": 19, \"fats\": 2, \"fatt\": 27, \"fatu\": 3, \"fau\": 253, \"fauc\": 2, \"faul\": 223, \"faun\": 3, \"fauo\": 13, \"faus\": 11, \"faut\": 1, \"fav\": 615, \"favo\": 609, \"favr\": 6, \"faw\": 20, \"fawc\": 1, \"fawk\": 4, \"fawn\": 12, \"fay\": 7, \"faye\": 4, \"fayl\": 1, \"faz\": 2, \"fazi\": 1, \"fda\": 1, \"fea\": 1935, \"feal\": 4, \"fear\": 547, \"feas\": 245, \"feat\": 315, \"feau\": 1, \"feb\": 68, \"febr\": 68, \"fec\": 3, \"fecu\": 3, \"fed\": 304, \"feda\": 27, \"fede\": 274, \"fedo\": 2, \"fee\": 2836, \"feeb\": 68, \"feed\": 115, \"feeg\": 3, \"feej\": 1, \"feel\": 841, \"feen\": 1, \"feep\": 2, \"feet\": 2, \"feg\": 1, \"fege\": 1, \"fei\": 31, \"feig\": 27, \"fein\": 3, \"fej\": 3, \"feje\": 3, \"fel\": 2311, \"fele\": 1, \"feli\": 152, \"fell\": 546, \"felo\": 14, \"fels\": 1, \"fem\": 190, \"fema\": 154, \"femi\": 30, \"femm\": 6, \"fen\": 164, \"fenc\": 145, \"fend\": 6, \"fenn\": 5, \"fens\": 1, \"fenu\": 1, \"fenw\": 2, \"fer\": 325, \"ferb\": 1, \"ferd\": 4, \"ferg\": 11, \"feri\": 1, \"ferl\": 1, \"ferm\": 15, \"fern\": 8, \"fero\": 16, \"ferr\": 186, \"fert\": 43, \"feru\": 2, \"ferv\": 35, \"fes\": 95, \"feso\": 1, \"fest\": 93, \"fet\": 179, \"fetc\": 140, \"fete\": 5, \"feti\": 10, \"feto\": 2, \"fett\": 19, \"feu\": 34, \"feuc\": 2, \"feud\": 21, \"feue\": 2, \"feui\": 5, \"feuo\": 1, \"fev\": 91, \"feve\": 91, \"few\": 45, \"fewe\": 44, \"fewn\": 1, \"fey\": 1, \"feye\": 1, \"ffo\": 1, \"ffor\": 1, \"ffr\": 1, \"ffre\": 1, \"fia\": 22, \"fian\": 2, \"fias\": 4, \"fiat\": 2, \"fib\": 90, \"fibe\": 60, \"fibr\": 30, \"fic\": 81, \"fich\": 3, \"fick\": 8, \"fict\": 70, \"fid\": 38, \"fidd\": 10, \"fide\": 22, \"fidg\": 5, \"fie\": 1359, \"fied\": 10, \"fief\": 1, \"fiel\": 992, \"fien\": 55, \"fier\": 300, \"fies\": 1, \"fif\": 628, \"fife\": 1, \"fiff\": 1, \"fift\": 616, \"fig\": 1285, \"figa\": 1, \"figg\": 4, \"figh\": 596, \"figm\": 2, \"figo\": 2, \"figu\": 620, \"fik\": 2, \"fil\": 1184, \"fila\": 9, \"filb\": 3, \"filc\": 1, \"fild\": 1, \"file\": 61, \"fili\": 55, \"fill\": 494, \"film\": 41, \"filt\": 90, \"fin\": 3751, \"fina\": 750, \"finb\": 2, \"finc\": 1, \"find\": 360, \"fine\": 103, \"fing\": 343, \"fini\": 376, \"fink\": 1, \"finl\": 4, \"finn\": 13, \"fino\": 1, \"fins\": 32, \"fio\": 5, \"fior\": 5, \"fir\": 5243, \"fire\": 218, \"firi\": 27, \"firk\": 2, \"firm\": 213, \"firs\": 3500, \"firt\": 1, \"firz\": 1, \"fis\": 956, \"fisc\": 120, \"fish\": 386, \"fisk\": 5, \"fiss\": 12, \"fist\": 28, \"fit\": 192, \"fitc\": 5, \"fitf\": 18, \"fitl\": 7, \"fitn\": 12, \"fitt\": 105, \"fitz\": 8, \"fiu\": 2, \"fiv\": 901, \"five\": 34, \"fix\": 321, \"fixa\": 1, \"fixe\": 281, \"fixi\": 27, \"fixt\": 7, \"fiz\": 2, \"fizz\": 2, \"fjo\": 2, \"fjor\": 2, \"fla\": 1437, \"flab\": 2, \"flac\": 1, \"flag\": 55, \"flai\": 19, \"flak\": 23, \"flam\": 403, \"flan\": 75, \"flap\": 39, \"flar\": 26, \"flas\": 276, \"flat\": 194, \"flau\": 24, \"flav\": 34, \"flaw\": 6, \"flax\": 8, \"flay\": 1, \"fle\": 1294, \"flea\": 18, \"flec\": 4, \"fled\": 10, \"flee\": 125, \"flei\": 7, \"flem\": 8, \"fles\": 581, \"flet\": 7, \"flex\": 58, \"fli\": 344, \"flic\": 24, \"flie\": 57, \"flig\": 147, \"flim\": 3, \"flin\": 56, \"flip\": 13, \"flir\": 13, \"flit\": 17, \"fln\": 1, \"flo\": 1825, \"floa\": 160, \"floc\": 245, \"floe\": 4, \"flog\": 15, \"flon\": 1, \"floo\": 463, \"flop\": 9, \"flor\": 95, \"flot\": 5, \"flou\": 174, \"flow\": 510, \"floy\": 4, \"flu\": 417, \"flub\": 1, \"fluc\": 8, \"flue\": 14, \"fluf\": 5, \"flug\": 1, \"flui\": 65, \"fluk\": 49, \"flum\": 5, \"flun\": 87, \"fluo\": 23, \"flur\": 11, \"flus\": 53, \"flut\": 60, \"flux\": 4, \"fly\": 215, \"flya\": 1, \"flyb\": 1, \"flyd\": 1, \"flye\": 11, \"flyi\": 172, \"flyn\": 3, \"flyw\": 1, \"fma\": 1, \"fmaj\": 1, \"foa\": 151, \"foal\": 3, \"foam\": 64, \"fob\": 2, \"fobb\": 2, \"foc\": 75, \"foca\": 9, \"focu\": 65, \"fod\": 2, \"fodd\": 2, \"foe\": 48, \"foet\": 2, \"fog\": 42, \"foge\": 2, \"fogg\": 13, \"foi\": 58, \"foib\": 12, \"foil\": 11, \"foin\": 1, \"fois\": 1, \"fok\": 1, \"foki\": 1, \"fol\": 1863, \"fold\": 109, \"fole\": 1, \"folg\": 2, \"foli\": 38, \"folk\": 85, \"foll\": 1536, \"fols\": 1, \"fom\": 11, \"fome\": 3, \"fomo\": 8, \"fon\": 213, \"fond\": 37, \"fonn\": 1, \"font\": 5, \"foo\": 1424, \"food\": 59, \"fool\": 291, \"foor\": 6, \"foot\": 220, \"fop\": 4, \"fopp\": 4, \"for\": 8294, \"fora\": 55, \"forb\": 241, \"forc\": 813, \"ford\": 6, \"fore\": 1172, \"forf\": 17, \"forg\": 984, \"fori\": 1, \"fork\": 32, \"forl\": 24, \"form\": 1257, \"forn\": 45, \"forr\": 8, \"fors\": 210, \"fort\": 2181, \"foru\": 12, \"forw\": 459, \"fos\": 68, \"fosd\": 13, \"foss\": 21, \"fost\": 31, \"fou\": 3866, \"foug\": 161, \"foul\": 31, \"foun\": 2231, \"four\": 397, \"fov\": 1, \"fovu\": 1, \"fow\": 139, \"fowl\": 81, \"fox\": 15, \"foxe\": 9, \"foxh\": 3, \"foxt\": 1, \"foy\": 10, \"foye\": 3, \"foyl\": 6, \"foys\": 1, \"fpl\": 6, \"fpla\": 6, \"fra\": 1508, \"frac\": 59, \"frag\": 106, \"frai\": 29, \"fram\": 207, \"fran\": 1008, \"frao\": 1, \"frat\": 16, \"frau\": 47, \"fray\": 10, \"fraz\": 1, \"fre\": 2347, \"frea\": 13, \"frec\": 16, \"fred\": 177, \"free\": 447, \"frei\": 58, \"frel\": 5, \"fren\": 308, \"freq\": 296, \"fres\": 353, \"fret\": 21, \"freu\": 14, \"frey\": 3, \"fri\": 2362, \"fria\": 8, \"fric\": 26, \"frid\": 73, \"frie\": 1871, \"frig\": 282, \"fril\": 5, \"frin\": 48, \"fris\": 8, \"frit\": 16, \"friv\": 22, \"friz\": 3, \"fro\": 14314, \"frob\": 1, \"froc\": 33, \"frod\": 12, \"frog\": 27, \"froh\": 1, \"froi\": 3, \"frol\": 14, \"from\": 30, \"fron\": 495, \"fror\": 1, \"fros\": 62, \"frot\": 12, \"frow\": 85, \"froz\": 66, \"fru\": 600, \"fruc\": 3, \"frug\": 9, \"frui\": 545, \"frus\": 43, \"fry\": 4, \"fryi\": 4, \"fsu\": 1, \"fsup\": 1, \"fuc\": 16, \"fuch\": 5, \"fuck\": 7, \"fud\": 13, \"fudd\": 1, \"fudo\": 5, \"fue\": 34, \"fueg\": 1, \"fuel\": 6, \"fug\": 31, \"fuga\": 1, \"fugi\": 28, \"fugu\": 2, \"fuh\": 3, \"fuhr\": 3, \"fui\": 1, \"fuj\": 3, \"fuji\": 2, \"ful\": 1568, \"fulb\": 2, \"fulf\": 186, \"fulg\": 1, \"fulh\": 3, \"fulk\": 3, \"full\": 273, \"fulm\": 2, \"fuln\": 32, \"fult\": 17, \"fum\": 42, \"fumb\": 20, \"fume\": 14, \"fumi\": 5, \"fun\": 642, \"funa\": 1, \"func\": 216, \"fund\": 188, \"fune\": 64, \"funf\": 1, \"fung\": 6, \"funl\": 2, \"funn\": 89, \"funs\": 2, \"fur\": 1065, \"furb\": 8, \"furh\": 1, \"furi\": 102, \"furl\": 17, \"furn\": 285, \"furo\": 4, \"furp\": 1, \"furr\": 27, \"furt\": 475, \"fus\": 70, \"fuse\": 13, \"fusi\": 22, \"fuss\": 7, \"fust\": 2, \"fut\": 412, \"futh\": 1, \"futi\": 17, \"futo\": 1, \"futu\": 393, \"fuz\": 13, \"fuzz\": 10, \"fyo\": 1, \"fyod\": 1, \"gaa\": 14, \"gaaf\": 1, \"gaas\": 4, \"gab\": 88, \"gaba\": 1, \"gabb\": 6, \"gabl\": 14, \"gabr\": 64, \"gad\": 39, \"gada\": 3, \"gadd\": 4, \"gadf\": 3, \"gadg\": 12, \"gadi\": 15, \"gae\": 8, \"gaei\": 1, \"gael\": 3, \"gaet\": 1, \"gaf\": 5, \"gaff\": 3, \"gag\": 21, \"gaga\": 2, \"gage\": 3, \"gagg\": 7, \"gagi\": 1, \"gagl\": 1, \"gagw\": 1, \"gah\": 4, \"gaha\": 3, \"gai\": 448, \"gaie\": 39, \"gail\": 16, \"gain\": 202, \"gait\": 5, \"gaiu\": 5, \"gal\": 483, \"gala\": 32, \"galb\": 1, \"gale\": 11, \"gali\": 86, \"gall\": 257, \"galo\": 2, \"galt\": 2, \"galv\": 12, \"galw\": 4, \"gam\": 369, \"gama\": 7, \"gamb\": 45, \"game\": 82, \"gami\": 9, \"gamm\": 9, \"gamu\": 6, \"gan\": 99, \"gana\": 1, \"gand\": 3, \"gane\": 1, \"gang\": 34, \"gann\": 9, \"gans\": 6, \"gant\": 7, \"gao\": 7, \"gaol\": 1, \"gap\": 44, \"gape\": 14, \"gapi\": 13, \"gar\": 1144, \"gara\": 31, \"garb\": 12, \"garc\": 3, \"gard\": 651, \"gare\": 3, \"garg\": 7, \"gari\": 25, \"garl\": 33, \"garm\": 222, \"garn\": 24, \"garr\": 80, \"gars\": 10, \"gart\": 27, \"garv\": 2, \"garz\": 1, \"gas\": 129, \"gasc\": 3, \"gase\": 13, \"gasf\": 1, \"gasg\": 1, \"gash\": 8, \"gask\": 6, \"gasl\": 4, \"gaso\": 13, \"gasp\": 39, \"gass\": 8, \"gast\": 12, \"gat\": 1386, \"gata\": 3, \"gate\": 232, \"gath\": 701, \"gatl\": 2, \"gato\": 2, \"gats\": 1, \"gau\": 86, \"gauc\": 4, \"gaud\": 11, \"gaue\": 2, \"gaug\": 19, \"gaul\": 7, \"gaun\": 20, \"gaus\": 4, \"gaut\": 1, \"gauz\": 3, \"gav\": 1263, \"gave\": 38, \"gavi\": 25, \"gavo\": 1, \"gaw\": 4, \"gawd\": 2, \"gawk\": 1, \"gay\": 26, \"gaye\": 14, \"gayh\": 2, \"gayl\": 7, \"gayn\": 3, \"gaz\": 207, \"gaza\": 1, \"gaze\": 63, \"gazi\": 57, \"gazz\": 2, \"gea\": 34, \"gear\": 7, \"geat\": 1, \"geb\": 17, \"geba\": 2, \"gebe\": 2, \"gebi\": 1, \"ged\": 48, \"geda\": 32, \"gedd\": 1, \"gede\": 8, \"gedo\": 7, \"gee\": 18, \"geei\": 1, \"geel\": 8, \"geer\": 1, \"gees\": 7, \"geg\": 1, \"gege\": 1, \"geh\": 18, \"geha\": 12, \"gehe\": 1, \"gehr\": 5, \"gei\": 2, \"geig\": 1, \"geis\": 1, \"gel\": 10, \"gela\": 1, \"geld\": 7, \"geli\": 1, \"gell\": 1, \"gem\": 23, \"gema\": 6, \"geme\": 1, \"gemi\": 4, \"geml\": 1, \"gemm\": 1, \"gen\": 2986, \"gend\": 20, \"gene\": 1732, \"geni\": 85, \"genl\": 1, \"genn\": 5, \"geno\": 1, \"genr\": 3, \"gent\": 1047, \"genu\": 70, \"geo\": 360, \"geoc\": 5, \"geod\": 1, \"geog\": 46, \"geol\": 30, \"geom\": 31, \"geop\": 2, \"geor\": 245, \"ger\": 423, \"gera\": 31, \"gerb\": 1, \"gere\": 1, \"gerg\": 1, \"gerh\": 1, \"geri\": 5, \"germ\": 291, \"gero\": 6, \"gerr\": 4, \"gers\": 48, \"gert\": 18, \"geru\": 2, \"gery\": 1, \"ges\": 141, \"gesa\": 2, \"gesh\": 19, \"gesn\": 1, \"gest\": 118, \"gesu\": 1, \"get\": 403, \"geta\": 1, \"geth\": 4, \"gett\": 309, \"getz\": 1, \"geu\": 2, \"geue\": 1, \"geul\": 1, \"gev\": 1, \"gevu\": 1, \"gey\": 4, \"geys\": 4, \"gez\": 14, \"geze\": 13, \"gezr\": 1, \"gha\": 32, \"ghad\": 1, \"ghan\": 4, \"ghas\": 24, \"ghau\": 1, \"ghaz\": 2, \"ghe\": 19, \"ghen\": 2, \"gher\": 1, \"ghet\": 16, \"ghi\": 1, \"ghib\": 1, \"gho\": 217, \"ghor\": 3, \"ghos\": 211, \"ghou\": 3, \"gia\": 141, \"giac\": 2, \"gian\": 135, \"giao\": 3, \"gib\": 158, \"giba\": 2, \"gibb\": 34, \"gibe\": 101, \"gibl\": 2, \"gibr\": 7, \"gibs\": 11, \"gid\": 67, \"gidd\": 20, \"gide\": 44, \"gidg\": 1, \"gido\": 1, \"gie\": 2, \"gif\": 260, \"giff\": 19, \"gift\": 116, \"gig\": 67, \"giga\": 52, \"gige\": 1, \"gigg\": 13, \"gih\": 6, \"giho\": 6, \"gil\": 270, \"gila\": 1, \"gilb\": 47, \"gild\": 16, \"gile\": 128, \"gilg\": 41, \"gilk\": 1, \"gill\": 9, \"gilm\": 4, \"gilo\": 4, \"gilr\": 1, \"gim\": 7, \"gimb\": 3, \"gimc\": 1, \"gimm\": 1, \"gimp\": 1, \"gimz\": 1, \"gin\": 98, \"gina\": 2, \"ging\": 74, \"gink\": 1, \"ginm\": 2, \"ginn\": 11, \"gins\": 1, \"gio\": 7, \"gioc\": 1, \"gior\": 3, \"giov\": 3, \"gip\": 43, \"gips\": 43, \"gir\": 928, \"gira\": 2, \"gird\": 102, \"girg\": 7, \"girl\": 261, \"girt\": 7, \"gis\": 9, \"gise\": 5, \"giso\": 1, \"gisp\": 1, \"git\": 13, \"gitt\": 13, \"giu\": 170, \"giub\": 1, \"giue\": 40, \"giui\": 6, \"giul\": 1, \"gius\": 4, \"giv\": 4121, \"give\": 1583, \"givi\": 319, \"giz\": 4, \"gize\": 2, \"gizo\": 1, \"gizz\": 1, \"gla\": 1293, \"glac\": 7, \"glad\": 122, \"glam\": 25, \"glan\": 257, \"glar\": 63, \"glas\": 378, \"glau\": 1, \"glay\": 1, \"glaz\": 35, \"gle\": 143, \"glea\": 82, \"gleb\": 1, \"gled\": 1, \"glee\": 7, \"glei\": 1, \"glen\": 28, \"gli\": 297, \"glib\": 5, \"glid\": 62, \"glim\": 97, \"glin\": 17, \"glis\": 56, \"glit\": 57, \"glo\": 1259, \"gloa\": 7, \"glob\": 127, \"gloc\": 1, \"glom\": 3, \"gloo\": 109, \"glor\": 798, \"glos\": 18, \"glot\": 5, \"glou\": 8, \"glov\": 69, \"glow\": 62, \"gloz\": 2, \"glu\": 71, \"glue\": 26, \"glum\": 3, \"glut\": 23, \"gly\": 15, \"glyc\": 15, \"gna\": 47, \"gnar\": 5, \"gnas\": 18, \"gnaw\": 16, \"gne\": 1, \"gnei\": 1, \"gno\": 5, \"gnom\": 5, \"goa\": 291, \"goad\": 8, \"goal\": 43, \"goar\": 1, \"goat\": 109, \"gob\": 29, \"gobb\": 12, \"gobe\": 3, \"gobl\": 14, \"god\": 638, \"goda\": 1, \"godb\": 1, \"godc\": 1, \"godd\": 100, \"godf\": 3, \"godg\": 3, \"godh\": 13, \"godk\": 4, \"godl\": 49, \"godm\": 12, \"godo\": 5, \"gods\": 2, \"godu\": 1, \"godw\": 5, \"goe\": 470, \"goer\": 2, \"goes\": 49, \"goet\": 141, \"gog\": 52, \"gogg\": 9, \"gogo\": 40, \"goi\": 1249, \"goin\": 1224, \"goit\": 8, \"gol\": 1050, \"gola\": 4, \"golc\": 1, \"gold\": 267, \"golf\": 11, \"golg\": 5, \"goli\": 8, \"goll\": 3, \"golo\": 2, \"gom\": 37, \"gomb\": 1, \"gome\": 8, \"gomo\": 27, \"gomp\": 1, \"gon\": 956, \"gonc\": 1, \"gond\": 1, \"gone\": 5, \"gonf\": 1, \"gong\": 4, \"gonn\": 18, \"gont\": 2, \"gonz\": 16, \"goo\": 4134, \"good\": 395, \"gooe\": 1, \"goof\": 1, \"gool\": 1, \"gooo\": 1, \"goos\": 30, \"gop\": 1, \"goph\": 1, \"gor\": 161, \"gorb\": 7, \"gord\": 24, \"gore\": 6, \"gorg\": 56, \"gorh\": 5, \"gori\": 2, \"gork\": 1, \"gors\": 4, \"gort\": 37, \"gos\": 193, \"gosa\": 1, \"gosh\": 16, \"gosl\": 1, \"gosp\": 133, \"goss\": 38, \"got\": 84, \"goth\": 15, \"goto\": 1, \"gott\": 66, \"gou\": 67, \"goue\": 6, \"goug\": 11, \"goul\": 12, \"goun\": 1, \"gour\": 20, \"gout\": 6, \"gouv\": 2, \"gov\": 867, \"gove\": 867, \"gow\": 72, \"gowa\": 1, \"gowl\": 3, \"gown\": 16, \"goy\": 1, \"goye\": 1, \"goz\": 5, \"goza\": 5, \"gra\": 3459, \"graa\": 2, \"grab\": 35, \"grac\": 497, \"grad\": 303, \"graf\": 25, \"grah\": 17, \"grai\": 104, \"gram\": 55, \"gran\": 731, \"grap\": 169, \"gras\": 411, \"grat\": 319, \"grau\": 31, \"grav\": 519, \"gray\": 17, \"graz\": 40, \"gre\": 6368, \"grea\": 4515, \"grec\": 26, \"gree\": 998, \"greg\": 154, \"gren\": 17, \"gres\": 51, \"gret\": 1, \"grev\": 22, \"grew\": 1, \"grey\": 48, \"gri\": 694, \"grid\": 2, \"grie\": 261, \"grif\": 29, \"grig\": 14, \"gril\": 21, \"grim\": 58, \"grin\": 122, \"grip\": 45, \"gris\": 16, \"grit\": 8, \"griz\": 10, \"gro\": 2949, \"groa\": 67, \"groc\": 19, \"grog\": 6, \"groi\": 5, \"grok\": 8, \"gron\": 4, \"groo\": 36, \"grop\": 43, \"gros\": 128, \"grot\": 43, \"grou\": 1437, \"grov\": 139, \"grow\": 770, \"gru\": 132, \"grub\": 8, \"grud\": 29, \"grue\": 11, \"gruf\": 16, \"grul\": 1, \"grum\": 26, \"grun\": 28, \"gruo\": 1, \"grup\": 5, \"gry\": 56, \"gryp\": 56, \"grz\": 1, \"grze\": 1, \"gta\": 1, \"gtax\": 1, \"gua\": 311, \"guad\": 1, \"guan\": 1, \"guar\": 291, \"guat\": 4, \"gub\": 7, \"gube\": 7, \"gud\": 4, \"gudg\": 4, \"gue\": 469, \"guer\": 48, \"gues\": 420, \"guev\": 1, \"guf\": 3, \"guff\": 3, \"gug\": 2, \"gugg\": 1, \"gugl\": 1, \"gui\": 757, \"guia\": 3, \"guid\": 235, \"guif\": 3, \"guig\": 1, \"guil\": 275, \"guim\": 1, \"guin\": 186, \"guis\": 13, \"guit\": 29, \"guiz\": 1, \"gul\": 101, \"gulc\": 1, \"gule\": 1, \"gulf\": 5, \"gull\": 29, \"gulp\": 6, \"gum\": 18, \"gumc\": 1, \"gumm\": 6, \"gump\": 1, \"gun\": 159, \"gunb\": 1, \"gunf\": 10, \"guni\": 1, \"gunm\": 7, \"gunn\": 15, \"gunp\": 14, \"guns\": 6, \"gunt\": 1, \"gunw\": 31, \"gur\": 22, \"gurb\": 1, \"gurg\": 12, \"guri\": 2, \"gurk\": 1, \"gurl\": 1, \"gurr\": 1, \"gurs\": 3, \"gus\": 47, \"gush\": 16, \"guss\": 1, \"gust\": 17, \"gut\": 73, \"gute\": 33, \"gutf\": 1, \"guth\": 3, \"gutt\": 22, \"gutz\": 1, \"guy\": 23, \"guz\": 2, \"guzz\": 2, \"gwe\": 1, \"gwi\": 1, \"gwin\": 1, \"gwy\": 1, \"gwyn\": 1, \"gya\": 1, \"gyan\": 1, \"gym\": 29, \"gymn\": 27, \"gyn\": 4, \"gyne\": 4, \"gyp\": 8, \"gypl\": 1, \"gyps\": 7, \"gyr\": 46, \"gyra\": 5, \"gyro\": 15, \"gyu\": 1, \"gyue\": 1, \"haa\": 4, \"haae\": 1, \"haah\": 1, \"haar\": 1, \"haas\": 1, \"hab\": 332, \"haba\": 6, \"habe\": 10, \"habi\": 309, \"habl\": 2, \"habo\": 3, \"habs\": 1, \"hac\": 39, \"hach\": 7, \"hack\": 28, \"had\": 219, \"hada\": 43, \"hadd\": 3, \"hadi\": 3, \"hadl\": 1, \"hadn\": 99, \"hado\": 4, \"hadr\": 2, \"hads\": 36, \"hae\": 3, \"haec\": 1, \"haes\": 1, \"haf\": 8, \"hafi\": 2, \"hafl\": 1, \"haft\": 3, \"hag\": 72, \"haga\": 19, \"hage\": 2, \"hagg\": 38, \"hagn\": 1, \"hagu\": 10, \"hai\": 793, \"haij\": 2, \"hail\": 67, \"hain\": 2, \"hair\": 128, \"hait\": 1, \"haj\": 1, \"haji\": 1, \"hak\": 5, \"hakk\": 2, \"haku\": 2, \"hal\": 1905, \"hala\": 5, \"halc\": 3, \"hald\": 1, \"hale\": 2, \"half\": 206, \"halh\": 1, \"hali\": 5, \"halk\": 1, \"hall\": 116, \"halm\": 1, \"halo\": 3, \"halt\": 36, \"halv\": 6, \"haly\": 4, \"ham\": 528, \"hama\": 92, \"hamb\": 12, \"hame\": 2, \"hami\": 29, \"haml\": 126, \"hamm\": 181, \"hamo\": 16, \"hamp\": 37, \"hamr\": 11, \"hams\": 1, \"hamu\": 8, \"han\": 5648, \"hana\": 60, \"hanc\": 6, \"hand\": 2122, \"hane\": 22, \"hanf\": 16, \"hang\": 229, \"hani\": 1, \"hank\": 4, \"hann\": 21, \"hano\": 97, \"hans\": 23, \"hanu\": 12, \"hanw\": 1, \"hap\": 1814, \"hapg\": 2, \"haph\": 7, \"hapl\": 54, \"happ\": 1750, \"haq\": 1, \"haqv\": 1, \"har\": 3708, \"hara\": 46, \"harb\": 96, \"harc\": 6, \"hard\": 813, \"hare\": 18, \"harf\": 1, \"harg\": 1, \"harh\": 4, \"hari\": 13, \"hark\": 3, \"harl\": 95, \"harm\": 140, \"harn\": 28, \"haro\": 44, \"harp\": 343, \"harr\": 676, \"hars\": 50, \"hart\": 193, \"haru\": 6, \"harv\": 270, \"has\": 1681, \"hasa\": 1, \"hase\": 1, \"hash\": 34, \"hask\": 3, \"hasn\": 20, \"hasp\": 1, \"hasr\": 1, \"hass\": 4, \"hast\": 371, \"hasu\": 1, \"hat\": 3248, \"hata\": 4, \"hatb\": 2, \"hatc\": 76, \"hate\": 200, \"hatf\": 3, \"hath\": 3, \"hati\": 15, \"hatl\": 2, \"hatr\": 58, \"hatt\": 77, \"hau\": 646, \"haug\": 54, \"haui\": 9, \"haul\": 34, \"haum\": 2, \"haun\": 65, \"hauo\": 2, \"haup\": 2, \"haur\": 2, \"haus\": 2, \"haut\": 6, \"hav\": 15210, \"hava\": 15, \"have\": 103, \"havi\": 1019, \"havo\": 14, \"haw\": 142, \"hawa\": 26, \"hawi\": 1, \"hawk\": 57, \"haws\": 9, \"hawt\": 16, \"hay\": 77, \"hayd\": 5, \"haye\": 6, \"hayf\": 1, \"hayi\": 1, \"hayl\": 3, \"hayn\": 1, \"hays\": 5, \"hayt\": 45, \"hayw\": 3, \"haz\": 175, \"haza\": 103, \"haze\": 18, \"hazi\": 2, \"hazl\": 5, \"hazo\": 19, \"hea\": 12192, \"head\": 693, \"heal\": 483, \"heap\": 50, \"hear\": 4796, \"heat\": 274, \"heau\": 104, \"heav\": 2149, \"heb\": 174, \"hebe\": 23, \"hebr\": 151, \"hec\": 25, \"heca\": 5, \"hecc\": 2, \"heck\": 1, \"hecl\": 2, \"hect\": 10, \"hecu\": 4, \"hed\": 114, \"hedd\": 1, \"hedg\": 103, \"hedi\": 2, \"hedo\": 4, \"hedw\": 4, \"hee\": 403, \"heed\": 39, \"heel\": 69, \"heen\": 1, \"heer\": 143, \"heev\": 2, \"hef\": 6, \"heff\": 2, \"heft\": 3, \"heg\": 14, \"hega\": 3, \"hege\": 10, \"hei\": 386, \"heid\": 22, \"heif\": 21, \"heig\": 237, \"heil\": 7, \"hein\": 11, \"heir\": 23, \"heis\": 8, \"heit\": 1, \"hel\": 2335, \"hela\": 4, \"helb\": 2, \"held\": 2, \"hele\": 29, \"heli\": 48, \"helk\": 4, \"hell\": 43, \"helm\": 52, \"helo\": 5, \"help\": 406, \"hels\": 1, \"helt\": 2, \"helv\": 37, \"hem\": 138, \"hema\": 20, \"hemd\": 1, \"heme\": 1, \"hemi\": 28, \"heml\": 12, \"hemm\": 6, \"hemo\": 16, \"hemp\": 20, \"hemu\": 5, \"hen\": 638, \"hena\": 4, \"henc\": 331, \"hend\": 11, \"heng\": 10, \"henh\": 1, \"henl\": 2, \"heno\": 2, \"henp\": 1, \"henr\": 262, \"hens\": 1, \"heo\": 1, \"heor\": 1, \"hep\": 18, \"hepa\": 3, \"heph\": 13, \"hepk\": 1, \"hept\": 1, \"her\": 4778, \"hera\": 37, \"herb\": 54, \"herc\": 29, \"herd\": 83, \"here\": 218, \"herf\": 6, \"herg\": 1, \"heri\": 54, \"herm\": 84, \"hern\": 6, \"hero\": 232, \"herp\": 4, \"herr\": 23, \"hers\": 1102, \"hert\": 8, \"herz\": 2, \"hes\": 211, \"hese\": 2, \"hesh\": 39, \"hesi\": 142, \"hesp\": 19, \"hess\": 5, \"hest\": 3, \"het\": 57, \"hete\": 10, \"heth\": 2, \"hetm\": 20, \"heto\": 1, \"hett\": 10, \"heu\": 4, \"heus\": 1, \"heut\": 1, \"heuv\": 2, \"hev\": 3, \"hevi\": 3, \"hew\": 52, \"hewe\": 28, \"hewi\": 1, \"hewl\": 2, \"hex\": 8, \"hexa\": 7, \"hexe\": 1, \"hey\": 16, \"heyd\": 9, \"heyr\": 2, \"heyw\": 5, \"hez\": 154, \"heze\": 129, \"hezi\": 3, \"hezr\": 22, \"hia\": 2, \"hiaw\": 2, \"hib\": 8, \"hiba\": 5, \"hibe\": 2, \"hibl\": 1, \"hic\": 20, \"hicc\": 2, \"hick\": 17, \"hid\": 425, \"hidd\": 104, \"hide\": 98, \"hidi\": 54, \"hie\": 37, \"hier\": 34, \"hif\": 2, \"hifa\": 1, \"hig\": 2554, \"higg\": 3, \"high\": 1070, \"higr\": 1, \"hij\": 6, \"hija\": 6, \"hik\": 11, \"hike\": 5, \"hiki\": 2, \"hil\": 720, \"hila\": 23, \"hild\": 1, \"hile\": 1, \"hilk\": 34, \"hill\": 268, \"hilp\": 4, \"hilt\": 8, \"hilu\": 5, \"him\": 2357, \"hima\": 4, \"himm\": 4, \"hims\": 2349, \"hin\": 353, \"hinc\": 1, \"hind\": 90, \"hing\": 24, \"hink\": 1, \"hinn\": 14, \"hino\": 2, \"hins\": 1, \"hint\": 106, \"hip\": 33, \"hipe\": 1, \"hipl\": 1, \"hipp\": 10, \"hips\": 1, \"hir\": 205, \"hira\": 28, \"hirc\": 1, \"hire\": 84, \"hiri\": 6, \"hiro\": 11, \"hirs\": 34, \"his\": 717, \"hisp\": 1, \"hiss\": 42, \"hist\": 631, \"hit\": 356, \"hita\": 2, \"hitc\": 15, \"hith\": 217, \"hitl\": 17, \"hitr\": 2, \"hitt\": 75, \"hiv\": 34, \"hive\": 1, \"hivi\": 25, \"hiz\": 2, \"hizk\": 2, \"hjc\": 1, \"hjck\": 1, \"hmp\": 1, \"hoa\": 96, \"hoag\": 3, \"hoap\": 1, \"hoar\": 61, \"hoas\": 1, \"hoaw\": 1, \"hoax\": 1, \"hob\": 42, \"hoba\": 4, \"hobb\": 28, \"hobd\": 1, \"hobg\": 1, \"hobo\": 5, \"hoc\": 5, \"hock\": 5, \"hod\": 39, \"hoda\": 4, \"hode\": 2, \"hodg\": 26, \"hodi\": 6, \"hodo\": 1, \"hoe\": 8, \"hoed\": 1, \"hoev\": 3, \"hof\": 5, \"hoff\": 5, \"hog\": 28, \"hoga\": 14, \"hoge\": 1, \"hogg\": 1, \"hogl\": 4, \"hogm\": 1, \"hogs\": 3, \"hoh\": 10, \"hoha\": 1, \"hohl\": 9, \"hoi\": 66, \"hoij\": 6, \"hoip\": 1, \"hois\": 59, \"hok\": 4, \"hoka\": 1, \"hol\": 2500, \"hola\": 1, \"holb\": 3, \"hold\": 400, \"hole\": 79, \"holi\": 124, \"holl\": 157, \"holm\": 40, \"holo\": 10, \"holp\": 5, \"hols\": 14, \"holt\": 2, \"holy\": 9, \"holz\": 1, \"hom\": 1528, \"homa\": 15, \"home\": 242, \"homi\": 10, \"homo\": 30, \"hon\": 1200, \"hona\": 2, \"honb\": 2, \"hond\": 1, \"hone\": 443, \"honi\": 4, \"honk\": 2, \"hono\": 726, \"hons\": 1, \"hony\": 1, \"hoo\": 261, \"hooc\": 1, \"hood\": 22, \"hoof\": 23, \"hoog\": 2, \"hook\": 52, \"hool\": 1, \"hoop\": 23, \"hoor\": 3, \"hoos\": 5, \"hoot\": 5, \"hoov\": 10, \"hop\": 1416, \"hope\": 421, \"hoph\": 5, \"hopi\": 56, \"hopk\": 28, \"hopp\": 43, \"hops\": 2, \"hor\": 1549, \"hora\": 51, \"hord\": 7, \"hore\": 18, \"horh\": 2, \"hori\": 119, \"horm\": 24, \"horn\": 115, \"horo\": 12, \"horr\": 279, \"hors\": 798, \"hort\": 1, \"hos\": 997, \"hosa\": 15, \"hose\": 13, \"hosh\": 16, \"hosi\": 2, \"hosm\": 1, \"hosp\": 210, \"hoss\": 5, \"host\": 406, \"hot\": 232, \"hotb\": 2, \"hotc\": 1, \"hotd\": 1, \"hote\": 188, \"hoth\": 9, \"hotl\": 4, \"hotr\": 1, \"hots\": 3, \"hott\": 21, \"hotw\": 2, \"hou\": 5265, \"houd\": 1, \"houe\": 2, \"houg\": 18, \"houn\": 21, \"hour\": 437, \"hous\": 4174, \"hout\": 2, \"hov\": 66, \"hova\": 1, \"hovd\": 1, \"hove\": 59, \"how\": 1475, \"howa\": 34, \"howb\": 64, \"howd\": 3, \"howe\": 1230, \"howl\": 51, \"howo\": 2, \"howr\": 2, \"hows\": 14, \"howt\": 2, \"hox\": 1, \"hoy\": 6, \"hoyd\": 1, \"hoyl\": 1, \"hro\": 2, \"hrot\": 2, \"htm\": 1, \"htt\": 4, \"hua\": 2, \"huan\": 1, \"hub\": 17, \"huba\": 1, \"hubb\": 7, \"hube\": 4, \"hubi\": 1, \"hubr\": 1, \"huc\": 5, \"huck\": 4, \"hud\": 99, \"hudd\": 24, \"huds\": 75, \"hue\": 21, \"huf\": 16, \"huff\": 2, \"hug\": 261, \"huge\": 9, \"hugg\": 19, \"hugh\": 32, \"hugo\": 1, \"hugu\": 1, \"huh\": 3, \"huhm\": 2, \"huhu\": 1, \"hui\": 1, \"huit\": 1, \"huk\": 2, \"hukk\": 1, \"huko\": 1, \"hul\": 76, \"huld\": 2, \"hulk\": 5, \"hull\": 16, \"hult\": 1, \"hum\": 1237, \"huma\": 672, \"humb\": 165, \"hume\": 2, \"humi\": 77, \"huml\": 1, \"humm\": 27, \"humo\": 231, \"hump\": 26, \"humt\": 1, \"hun\": 1758, \"hunc\": 16, \"hund\": 1061, \"hung\": 194, \"hunk\": 6, \"hunt\": 247, \"huo\": 1, \"huos\": 1, \"hup\": 6, \"huph\": 2, \"hupp\": 4, \"hur\": 631, \"hura\": 13, \"hurd\": 6, \"hurl\": 38, \"huro\": 9, \"hurr\": 354, \"hurt\": 32, \"hus\": 699, \"husb\": 558, \"hush\": 35, \"husk\": 24, \"huss\": 19, \"hust\": 14, \"husw\": 3, \"hut\": 39, \"hutc\": 7, \"hutm\": 2, \"hutt\": 18, \"hux\": 8, \"huxl\": 8, \"huz\": 39, \"huzz\": 39, \"hva\": 3, \"hval\": 2, \"hwa\": 6, \"hwan\": 4, \"hwas\": 1, \"hya\": 9, \"hyac\": 4, \"hyal\": 3, \"hyan\": 2, \"hyb\": 1, \"hybr\": 1, \"hyd\": 92, \"hyda\": 1, \"hyde\": 1, \"hydr\": 86, \"hye\": 5, \"hyen\": 4, \"hyg\": 14, \"hygi\": 14, \"hyl\": 1, \"hyla\": 1, \"hym\": 68, \"hyme\": 24, \"hymn\": 26, \"hyms\": 1, \"hyn\": 1, \"hynd\": 1, \"hyp\": 204, \"hype\": 23, \"hyph\": 2, \"hypn\": 7, \"hypo\": 170, \"hyr\": 2, \"hyrc\": 1, \"hys\": 50, \"hyss\": 12, \"hyst\": 38, \"hyt\": 1, \"iad\": 2, \"iade\": 1, \"ian\": 1, \"iang\": 1, \"iaw\": 1, \"iawe\": 1, \"ibb\": 1, \"ibbo\": 1, \"ibe\": 1, \"iber\": 1, \"ibh\": 3, \"ibha\": 3, \"ibi\": 6, \"ibib\": 2, \"ibl\": 3, \"ible\": 3, \"ibn\": 2, \"ibne\": 1, \"ibni\": 1, \"ibr\": 5, \"ibra\": 4, \"ibs\": 3, \"ibse\": 3, \"ibz\": 2, \"ibza\": 2, \"ica\": 1, \"icb\": 7, \"icbm\": 4, \"ice\": 38, \"iceb\": 14, \"icec\": 3, \"icef\": 4, \"icel\": 15, \"ich\": 2, \"icha\": 2, \"ici\": 9, \"icic\": 6, \"icil\": 1, \"icin\": 2, \"ico\": 7, \"icon\": 7, \"ida\": 6, \"idah\": 4, \"idal\": 1, \"idb\": 1, \"idba\": 1, \"idd\": 14, \"ide\": 1163, \"idea\": 336, \"iden\": 286, \"ideo\": 39, \"idi\": 68, \"idio\": 68, \"idj\": 3, \"idl\": 152, \"idle\": 33, \"idli\": 3, \"ido\": 188, \"idol\": 140, \"idu\": 5, \"idum\": 5, \"idy\": 7, \"idyl\": 7, \"iea\": 5, \"ieal\": 5, \"iee\": 1, \"ieer\": 1, \"iel\": 1, \"iell\": 1, \"iem\": 1, \"iemm\": 1, \"iep\": 3, \"ieph\": 3, \"ier\": 3, \"ieru\": 3, \"ies\": 4, \"iest\": 1, \"iew\": 2, \"iewe\": 2, \"ifa\": 2, \"ifai\": 2, \"ifn\": 1, \"iga\": 2, \"igb\": 3, \"igd\": 1, \"igda\": 1, \"ige\": 1, \"igea\": 1, \"igl\": 1, \"igle\": 1, \"ign\": 272, \"igna\": 1, \"igne\": 1, \"igni\": 14, \"igno\": 256, \"igo\": 4, \"igom\": 1, \"ihm\": 1, \"ihms\": 1, \"iib\": 1, \"iibe\": 1, \"iig\": 3, \"iigg\": 3, \"iij\": 1, \"iiji\": 1, \"ija\": 1, \"ije\": 2, \"ijea\": 2, \"ijo\": 3, \"ike\": 2, \"ikey\": 1, \"ikk\": 3, \"ikke\": 3, \"ikl\": 1, \"ila\": 1, \"ile\": 2, \"ileu\": 1, \"ili\": 19, \"ilia\": 18, \"iliu\": 1, \"ilk\": 1, \"ill\": 469, \"illa\": 2, \"illc\": 3, \"ille\": 25, \"illf\": 2, \"illi\": 82, \"illn\": 59, \"illo\": 2, \"illp\": 1, \"ills\": 4, \"illu\": 275, \"illy\": 2, \"ilo\": 1, \"ilon\": 1, \"ily\": 1, \"ilyu\": 1, \"ima\": 923, \"imag\": 922, \"imau\": 1, \"imb\": 47, \"imba\": 5, \"imbe\": 15, \"imbi\": 7, \"imbl\": 2, \"imbo\": 7, \"imbr\": 5, \"imbu\": 6, \"imc\": 2, \"imco\": 2, \"ime\": 1, \"imed\": 1, \"imi\": 70, \"imit\": 70, \"iml\": 4, \"imla\": 2, \"imm\": 1021, \"imma\": 29, \"imme\": 745, \"immi\": 51, \"immo\": 175, \"immu\": 21, \"imn\": 3, \"imna\": 2, \"imp\": 3007, \"impa\": 412, \"impe\": 287, \"impi\": 29, \"impl\": 202, \"impo\": 1147, \"impr\": 763, \"impu\": 165, \"imr\": 3, \"imra\": 1, \"ina\": 207, \"inab\": 21, \"inac\": 41, \"inad\": 53, \"inal\": 3, \"inan\": 7, \"inap\": 10, \"inar\": 6, \"inas\": 18, \"inat\": 11, \"inau\": 37, \"inb\": 11, \"inbo\": 9, \"inbr\": 2, \"inc\": 2930, \"inca\": 81, \"ince\": 266, \"inch\": 123, \"inci\": 149, \"incl\": 738, \"inco\": 465, \"incr\": 905, \"incu\": 84, \"ind\": 3879, \"inde\": 1502, \"indi\": 1641, \"indo\": 67, \"indu\": 667, \"indw\": 2, \"ine\": 312, \"ineb\": 2, \"inef\": 37, \"inel\": 12, \"inep\": 5, \"ineq\": 7, \"iner\": 17, \"ines\": 11, \"inev\": 121, \"inex\": 100, \"inf\": 1622, \"infa\": 160, \"infe\": 232, \"infi\": 230, \"infl\": 408, \"info\": 541, \"infr\": 31, \"infu\": 20, \"ing\": 134, \"inga\": 7, \"ingb\": 1, \"inge\": 62, \"ingi\": 2, \"ingl\": 10, \"ingo\": 1, \"ingr\": 48, \"ingu\": 2, \"inh\": 830, \"inha\": 365, \"inhe\": 395, \"inhi\": 33, \"inho\": 8, \"inhu\": 29, \"ini\": 515, \"inim\": 13, \"inio\": 1, \"iniq\": 339, \"init\": 162, \"inj\": 214, \"inje\": 21, \"inju\": 193, \"ink\": 13, \"inkh\": 3, \"inki\": 1, \"inkl\": 3, \"inks\": 2, \"inl\": 63, \"inla\": 51, \"inle\": 10, \"inlo\": 1, \"inm\": 25, \"inma\": 13, \"inmi\": 1, \"inmo\": 11, \"inn\": 481, \"inna\": 6, \"inne\": 130, \"inni\": 16, \"innk\": 12, \"inno\": 254, \"innu\": 49, \"ino\": 34, \"inob\": 3, \"inoc\": 3, \"inof\": 6, \"inop\": 2, \"inor\": 20, \"inp\": 28, \"inpe\": 1, \"inpl\": 1, \"inpo\": 1, \"inpu\": 25, \"inq\": 288, \"inqu\": 288, \"inr\": 10, \"inro\": 10, \"ins\": 3305, \"insa\": 60, \"insc\": 44, \"inse\": 181, \"insh\": 5, \"insi\": 529, \"inso\": 91, \"insp\": 166, \"inst\": 1952, \"insu\": 277, \"int\": 10388, \"inta\": 35, \"inte\": 3779, \"inti\": 160, \"into\": 72, \"intr\": 411, \"intu\": 42, \"inu\": 43, \"inue\": 8, \"inui\": 6, \"inun\": 4, \"inur\": 24, \"inut\": 1, \"inv\": 1487, \"inva\": 186, \"inve\": 493, \"invi\": 386, \"invo\": 415, \"invu\": 7, \"inw\": 105, \"inwa\": 101, \"inwo\": 2, \"inwr\": 2, \"ioc\": 10, \"ioco\": 1, \"iocs\": 6, \"iocu\": 1, \"iod\": 45, \"iodi\": 40, \"iodo\": 5, \"ioh\": 1, \"ion\": 33, \"ioni\": 20, \"iono\": 3, \"ios\": 1, \"ioso\": 1, \"iot\": 4, \"iou\": 6, \"ioue\": 1, \"ioui\": 1, \"iour\": 2, \"iow\": 10, \"iowa\": 1, \"iowl\": 1, \"ioy\": 18, \"ioye\": 3, \"ioyf\": 3, \"ioyn\": 10, \"iph\": 1, \"iphe\": 1, \"ips\": 1, \"ira\": 16, \"iraq\": 2, \"iras\": 1, \"irat\": 2, \"ire\": 72, \"iref\": 1, \"irel\": 51, \"iren\": 17, \"iret\": 3, \"iri\": 57, \"irid\": 1, \"irij\": 2, \"irin\": 2, \"iris\": 49, \"irk\": 9, \"irks\": 8, \"irku\": 1, \"irm\": 1, \"irn\": 1, \"irna\": 1, \"iro\": 430, \"iron\": 100, \"iroq\": 4, \"irp\": 1, \"irpe\": 1, \"irr\": 337, \"irra\": 51, \"irre\": 200, \"irri\": 84, \"irru\": 2, \"irs\": 5, \"irsa\": 4, \"irsh\": 1, \"irv\": 5, \"irvi\": 5, \"irw\": 1, \"irwi\": 1, \"isa\": 278, \"isaa\": 147, \"isab\": 95, \"isai\": 36, \"isc\": 12, \"isca\": 12, \"isf\": 4, \"isfa\": 4, \"ish\": 113, \"isha\": 1, \"ishb\": 16, \"ishi\": 3, \"ishm\": 79, \"isho\": 1, \"ishp\": 1, \"isht\": 3, \"ishu\": 3, \"isi\": 9, \"isid\": 3, \"isin\": 3, \"isl\": 474, \"isla\": 370, \"isle\": 67, \"isli\": 1, \"ism\": 2, \"isma\": 2, \"isn\": 97, \"iso\": 110, \"isoc\": 3, \"isod\": 1, \"isol\": 92, \"isom\": 1, \"isop\": 2, \"isot\": 11, \"isp\": 1, \"ispa\": 1, \"isr\": 2652, \"isra\": 2652, \"iss\": 432, \"issa\": 44, \"issh\": 3, \"issu\": 385, \"ist\": 23, \"ista\": 5, \"isth\": 4, \"isti\": 13, \"istv\": 1, \"isu\": 2, \"isua\": 1, \"ita\": 173, \"ital\": 172, \"itas\": 1, \"itc\": 17, \"itch\": 9, \"ite\": 153, \"item\": 88, \"iter\": 1, \"ith\": 43, \"itha\": 24, \"ithi\": 3, \"ithm\": 1, \"ithn\": 1, \"ithr\": 11, \"ithu\": 3, \"iti\": 4, \"itin\": 4, \"itl\": 18, \"ito\": 1, \"itoi\": 1, \"its\": 682, \"itse\": 682, \"itt\": 9, \"itta\": 9, \"itu\": 1, \"itur\": 1, \"itw\": 2, \"itwi\": 2, \"iud\": 25, \"iudg\": 23, \"iudi\": 2, \"iug\": 2, \"iugg\": 1, \"iugl\": 1, \"iul\": 5, \"iuli\": 5, \"ium\": 2, \"iump\": 2, \"ius\": 17, \"iust\": 9, \"iut\": 1, \"iutt\": 1, \"iuv\": 1, \"iuva\": 1, \"iuy\": 1, \"iuyc\": 1, \"iva\": 7, \"ive\": 2, \"ivi\": 2, \"ivie\": 2, \"ivl\": 1, \"ivli\": 1, \"ivo\": 93, \"ivor\": 93, \"ivy\": 1, \"ivyc\": 1, \"ixi\": 1, \"ixio\": 1, \"iza\": 6, \"izaa\": 6, \"ize\": 2, \"izeh\": 2, \"izh\": 11, \"izha\": 11, \"izr\": 4, \"izra\": 3, \"izv\": 1, \"izve\": 1, \"jaa\": 21, \"jaak\": 2, \"jaal\": 6, \"jaan\": 1, \"jaar\": 1, \"jaas\": 2, \"jaaz\": 9, \"jab\": 56, \"jaba\": 1, \"jabb\": 15, \"jabe\": 28, \"jabi\": 8, \"jabn\": 3, \"jac\": 825, \"jacc\": 1, \"jach\": 10, \"jaci\": 3, \"jack\": 253, \"jaco\": 397, \"jacq\": 13, \"jacu\": 1, \"jad\": 11, \"jada\": 1, \"jadd\": 3, \"jade\": 3, \"jado\": 1, \"jae\": 7, \"jaeg\": 1, \"jaf\": 3, \"jaff\": 3, \"jag\": 32, \"jaga\": 2, \"jage\": 1, \"jagg\": 20, \"jagu\": 6, \"jah\": 39, \"jaha\": 23, \"jahd\": 3, \"jahl\": 3, \"jahm\": 1, \"jahv\": 2, \"jahz\": 6, \"jai\": 51, \"jail\": 11, \"jair\": 3, \"jak\": 12, \"jaka\": 2, \"jake\": 2, \"jaki\": 2, \"jal\": 3, \"jala\": 1, \"jalo\": 2, \"jam\": 248, \"jama\": 5, \"jamb\": 3, \"jame\": 212, \"jami\": 7, \"jaml\": 1, \"jamm\": 16, \"jan\": 458, \"jane\": 3, \"jang\": 5, \"jani\": 20, \"jann\": 6, \"jano\": 3, \"jans\": 6, \"janu\": 76, \"jap\": 149, \"japa\": 121, \"japh\": 22, \"japo\": 1, \"jar\": 52, \"jara\": 2, \"jard\": 1, \"jare\": 9, \"jarg\": 7, \"jarh\": 2, \"jari\": 3, \"jarm\": 7, \"jaro\": 2, \"jarr\": 11, \"jarv\": 1, \"jas\": 44, \"jash\": 11, \"jasi\": 1, \"jaso\": 10, \"jasp\": 11, \"jast\": 11, \"jat\": 5, \"jath\": 1, \"jatt\": 4, \"jau\": 5, \"jaun\": 5, \"jav\": 34, \"java\": 10, \"jave\": 13, \"jaw\": 67, \"jawa\": 1, \"jawb\": 4, \"jawe\": 2, \"jay\": 4, \"jayc\": 4, \"jaz\": 113, \"jaze\": 11, \"jazi\": 1, \"jazz\": 2, \"jea\": 160, \"jeal\": 128, \"jean\": 7, \"jear\": 1, \"jeat\": 1, \"jeb\": 47, \"jebe\": 1, \"jebu\": 45, \"jec\": 12, \"jeca\": 1, \"jech\": 3, \"jeco\": 8, \"jed\": 37, \"jeda\": 13, \"jede\": 1, \"jedi\": 8, \"jedu\": 14, \"jee\": 30, \"jeep\": 1, \"jeer\": 7, \"jees\": 1, \"jeez\": 2, \"jef\": 41, \"jeff\": 38, \"jeg\": 1, \"jega\": 1, \"jeh\": 384, \"jeha\": 2, \"jehd\": 2, \"jehe\": 1, \"jehi\": 20, \"jeho\": 291, \"jehu\": 9, \"jei\": 11, \"jeie\": 11, \"jej\": 3, \"jeju\": 3, \"jek\": 6, \"jeka\": 5, \"jeku\": 1, \"jel\": 27, \"jelk\": 7, \"jell\": 20, \"jem\": 9, \"jeme\": 1, \"jemi\": 5, \"jemm\": 1, \"jemu\": 2, \"jen\": 282, \"jenk\": 16, \"jenn\": 260, \"jens\": 4, \"jeo\": 26, \"jeop\": 26, \"jep\": 46, \"jeph\": 46, \"jer\": 1343, \"jera\": 12, \"jere\": 171, \"jeri\": 79, \"jerk\": 38, \"jero\": 131, \"jerr\": 21, \"jers\": 33, \"jeru\": 846, \"jerv\": 1, \"jes\": 1259, \"jesa\": 2, \"jesh\": 50, \"jesi\": 3, \"jess\": 83, \"jest\": 20, \"jesu\": 1062, \"jet\": 54, \"jetb\": 1, \"jeth\": 21, \"jetl\": 1, \"jett\": 8, \"jetu\": 3, \"jeu\": 11, \"jeue\": 1, \"jeun\": 1, \"jeus\": 8, \"jew\": 470, \"jewa\": 1, \"jewb\": 2, \"jewe\": 70, \"jewh\": 1, \"jewi\": 78, \"jewr\": 3, \"jez\": 83, \"jeza\": 2, \"jeze\": 27, \"jezi\": 2, \"jezl\": 1, \"jezo\": 1, \"jezr\": 50, \"jib\": 5, \"jibe\": 2, \"jibs\": 1, \"jid\": 2, \"jidg\": 1, \"jidl\": 1, \"jif\": 7, \"jiff\": 6, \"jig\": 4, \"jigg\": 2, \"jil\": 3, \"jilt\": 3, \"jim\": 30, \"jimb\": 1, \"jime\": 1, \"jimi\": 1, \"jimm\": 20, \"jimn\": 3, \"jin\": 18, \"jing\": 16, \"jinn\": 1, \"jip\": 3, \"jiph\": 3, \"jis\": 13, \"jit\": 4, \"jitt\": 4, \"jiu\": 1, \"jiuj\": 1, \"jiv\": 1, \"jivi\": 1, \"joa\": 230, \"joah\": 1, \"joan\": 6, \"joaq\": 1, \"joas\": 49, \"joat\": 2, \"job\": 84, \"joba\": 9, \"jobb\": 1, \"jobl\": 4, \"jobs\": 2, \"joc\": 29, \"joch\": 2, \"jock\": 6, \"joco\": 1, \"jocu\": 19, \"jod\": 1, \"joe\": 48, \"joel\": 3, \"joez\": 1, \"jog\": 7, \"jogb\": 2, \"jogg\": 2, \"jogl\": 1, \"joh\": 1048, \"joha\": 31, \"john\": 166, \"joi\": 588, \"joia\": 13, \"join\": 373, \"jois\": 6, \"jok\": 119, \"jokd\": 1, \"joke\": 27, \"joki\": 11, \"jokm\": 1, \"jokn\": 4, \"joks\": 4, \"jokt\": 8, \"jol\": 81, \"joll\": 73, \"jolt\": 2, \"jon\": 356, \"jona\": 267, \"jone\": 86, \"jonq\": 2, \"joo\": 2, \"joos\": 2, \"jop\": 22, \"jopl\": 1, \"jopp\": 21, \"jor\": 245, \"jora\": 31, \"jord\": 208, \"jorg\": 4, \"jori\": 1, \"jork\": 1, \"jos\": 630, \"josa\": 3, \"jose\": 325, \"josh\": 228, \"josi\": 60, \"joss\": 5, \"jost\": 5, \"jot\": 31, \"jotb\": 4, \"joth\": 24, \"jott\": 3, \"jou\": 376, \"jour\": 364, \"jous\": 3, \"jouv\": 4, \"jov\": 32, \"jovi\": 5, \"jow\": 6, \"jowl\": 4, \"joy\": 210, \"joyc\": 21, \"joye\": 2, \"joyf\": 68, \"joyi\": 1, \"joyl\": 2, \"joyo\": 45, \"joyr\": 2, \"joz\": 15, \"joza\": 15, \"jua\": 26, \"juan\": 16, \"jub\": 48, \"juba\": 13, \"jubi\": 34, \"juc\": 2, \"juca\": 1, \"juci\": 1, \"jud\": 2315, \"juda\": 900, \"jude\": 4, \"judg\": 1339, \"judi\": 45, \"juds\": 4, \"jue\": 11, \"juet\": 3, \"jug\": 12, \"jugg\": 12, \"jui\": 33, \"juic\": 33, \"juj\": 1, \"juk\": 2, \"jul\": 141, \"jule\": 5, \"juli\": 60, \"july\": 1, \"jum\": 221, \"jumb\": 12, \"jump\": 135, \"jun\": 332, \"junc\": 25, \"jung\": 41, \"juni\": 125, \"junk\": 8, \"junt\": 3, \"jup\": 24, \"jupi\": 24, \"jur\": 169, \"jura\": 2, \"juri\": 50, \"juro\": 13, \"jury\": 5, \"jus\": 2769, \"jush\": 1, \"juss\": 1, \"just\": 559, \"jut\": 5, \"juti\": 1, \"jutt\": 4, \"juv\": 22, \"juve\": 22, \"jux\": 5, \"juxt\": 5, \"kab\": 5, \"kaba\": 1, \"kabo\": 1, \"kabz\": 3, \"kad\": 38, \"kadd\": 1, \"kade\": 28, \"kadm\": 9, \"kaf\": 5, \"kafk\": 5, \"kag\": 1, \"kaga\": 1, \"kah\": 10, \"kahl\": 8, \"kahn\": 1, \"kai\": 7, \"kais\": 7, \"kaj\": 1, \"kaja\": 1, \"kak\": 1, \"kaku\": 1, \"kal\": 12, \"kala\": 1, \"kale\": 5, \"kall\": 1, \"kalm\": 2, \"kalo\": 1, \"kam\": 7, \"kamc\": 1, \"kame\": 1, \"kami\": 4, \"kamt\": 1, \"kan\": 70, \"kana\": 18, \"kand\": 1, \"kang\": 2, \"kani\": 1, \"kank\": 1, \"kann\": 1, \"kans\": 39, \"kant\": 1, \"kanu\": 1, \"kap\": 8, \"kapl\": 1, \"kapn\": 1, \"kapo\": 1, \"kapp\": 5, \"kaq\": 1, \"kaqu\": 1, \"kar\": 59, \"kara\": 4, \"kare\": 17, \"kari\": 5, \"kark\": 2, \"karl\": 3, \"karn\": 10, \"karo\": 2, \"karp\": 1, \"kars\": 3, \"kart\": 2, \"kas\": 12, \"kasa\": 7, \"kash\": 1, \"kask\": 1, \"kass\": 1, \"kast\": 1, \"kat\": 112, \"kata\": 24, \"kate\": 4, \"kath\": 20, \"kati\": 17, \"kato\": 1, \"katt\": 1, \"katy\": 3, \"kau\": 4, \"kaub\": 1, \"kauf\": 3, \"kav\": 1, \"kaw\": 1, \"kawe\": 1, \"kay\": 13, \"kaya\": 12, \"kaz\": 3, \"kaza\": 1, \"kazb\": 1, \"kazo\": 1, \"kea\": 13, \"kean\": 2, \"kear\": 9, \"keat\": 2, \"keb\": 1, \"kebo\": 1, \"ked\": 39, \"keda\": 12, \"kedd\": 1, \"kede\": 18, \"kedg\": 6, \"kedr\": 1, \"kedz\": 1, \"kee\": 1492, \"keee\": 1, \"keeg\": 3, \"keel\": 26, \"keen\": 29, \"keep\": 365, \"keer\": 2, \"kees\": 1, \"keg\": 4, \"kegf\": 1, \"kegh\": 2, \"keh\": 13, \"kehe\": 2, \"kei\": 45, \"keil\": 18, \"kein\": 1, \"keit\": 25, \"keiz\": 1, \"kek\": 1, \"keki\": 1, \"kel\": 99, \"kela\": 1, \"keli\": 3, \"kell\": 77, \"kelp\": 1, \"kels\": 12, \"kelt\": 1, \"kem\": 14, \"kemb\": 7, \"kemc\": 1, \"kemp\": 2, \"kemu\": 3, \"ken\": 300, \"kena\": 14, \"kene\": 4, \"keni\": 16, \"kenn\": 206, \"kens\": 7, \"kent\": 27, \"keny\": 2, \"kenz\": 1, \"kep\": 626, \"kepl\": 1, \"ker\": 50, \"kerb\": 1, \"kerc\": 5, \"kere\": 1, \"keri\": 3, \"kern\": 12, \"kero\": 8, \"kerr\": 3, \"kers\": 1, \"kery\": 3, \"kes\": 1, \"kest\": 1, \"ket\": 37, \"ketc\": 2, \"keto\": 7, \"kett\": 24, \"ketu\": 4, \"key\": 75, \"keyb\": 5, \"keye\": 4, \"keyh\": 9, \"keyn\": 6, \"keyp\": 1, \"keys\": 5, \"kez\": 3, \"kezi\": 2, \"kezz\": 1, \"kha\": 11, \"khaj\": 3, \"khak\": 2, \"khan\": 1, \"khar\": 1, \"khas\": 1, \"khi\": 1, \"khiv\": 1, \"khm\": 1, \"khme\": 1, \"khr\": 85, \"khru\": 85, \"kia\": 1, \"kian\": 1, \"kib\": 8, \"kibb\": 1, \"kibr\": 5, \"kibz\": 1, \"kic\": 138, \"kick\": 82, \"kid\": 141, \"kidd\": 8, \"kidi\": 1, \"kidn\": 43, \"kidr\": 11, \"kie\": 3, \"kief\": 3, \"kik\": 6, \"kike\": 1, \"kiki\": 1, \"kiku\": 3, \"kil\": 735, \"kilh\": 1, \"kili\": 2, \"kill\": 392, \"kilo\": 19, \"kilt\": 1, \"kim\": 15, \"kimb\": 6, \"kimm\": 4, \"kimo\": 1, \"kimp\": 4, \"kin\": 5575, \"kina\": 1, \"kind\": 515, \"kine\": 14, \"king\": 944, \"kinn\": 1, \"kinr\": 1, \"kins\": 43, \"kio\": 2, \"kios\": 1, \"kiow\": 1, \"kip\": 4, \"kipl\": 4, \"kir\": 76, \"kirb\": 12, \"kirh\": 5, \"kiri\": 5, \"kirj\": 37, \"kirk\": 5, \"kiro\": 10, \"kis\": 210, \"kish\": 8, \"kiso\": 1, \"kiss\": 110, \"kit\": 213, \"kitc\": 145, \"kite\": 2, \"kith\": 2, \"kitr\": 1, \"kitt\": 51, \"kiv\": 1, \"kiw\": 2, \"kiwa\": 2, \"kiy\": 1, \"kiyi\": 1, \"kiz\": 5, \"kizz\": 5, \"kla\": 7, \"klau\": 3, \"klax\": 1, \"kle\": 7, \"klee\": 3, \"klei\": 3, \"klem\": 1, \"kli\": 3, \"klim\": 1, \"klin\": 2, \"klo\": 1, \"klom\": 1, \"klu\": 4, \"kluc\": 1, \"kna\": 48, \"knac\": 9, \"knap\": 7, \"knau\": 15, \"knav\": 17, \"kne\": 1459, \"knea\": 9, \"knec\": 1, \"knee\": 191, \"knel\": 23, \"knep\": 1, \"knew\": 11, \"kni\": 713, \"knic\": 3, \"knif\": 168, \"knig\": 451, \"knil\": 1, \"knit\": 32, \"kniu\": 1, \"kniv\": 32, \"kno\": 6177, \"knob\": 8, \"knoc\": 188, \"knol\": 6, \"knop\": 9, \"knot\": 36, \"know\": 2137, \"knox\": 1, \"knu\": 19, \"knuc\": 19, \"koa\": 1, \"kob\": 3, \"koba\": 1, \"kobo\": 2, \"koc\": 3, \"koch\": 2, \"kod\": 13, \"koda\": 3, \"kodi\": 3, \"kody\": 7, \"koe\": 6, \"koeh\": 3, \"koen\": 3, \"kof\": 1, \"kofa\": 1, \"koh\": 83, \"koha\": 47, \"kohn\": 35, \"koi\": 2, \"koin\": 2, \"kok\": 1, \"koko\": 1, \"kol\": 11, \"kola\": 2, \"kolk\": 4, \"kolp\": 2, \"kom\": 3, \"komb\": 1, \"koml\": 1, \"komu\": 1, \"kon\": 20, \"kong\": 1, \"koni\": 4, \"konr\": 1, \"kons\": 1, \"konz\": 1, \"koo\": 6, \"kook\": 1, \"kool\": 1, \"koon\": 2, \"koos\": 1, \"kop\": 1, \"kops\": 1, \"kor\": 77, \"kora\": 31, \"kore\": 28, \"korh\": 4, \"korm\": 1, \"korn\": 8, \"korr\": 1, \"kos\": 12, \"kosh\": 2, \"kosm\": 7, \"koss\": 3, \"kot\": 1, \"koto\": 1, \"kou\": 4, \"kous\": 4, \"kow\": 19, \"kowa\": 19, \"koz\": 1, \"kozi\": 1, \"kqe\": 1, \"kra\": 13, \"krae\": 1, \"kraf\": 1, \"krak\": 5, \"kram\": 1, \"krap\": 1, \"kras\": 1, \"krau\": 3, \"kre\": 17, \"krei\": 1, \"krem\": 14, \"kret\": 2, \"kri\": 31, \"krim\": 9, \"kris\": 5, \"kro\": 12, \"kroe\": 1, \"krog\": 7, \"krom\": 1, \"kron\": 3, \"kru\": 19, \"krug\": 8, \"krum\": 2, \"krup\": 1, \"krus\": 3, \"krut\": 5, \"kry\": 4, \"krys\": 4, \"krz\": 1, \"krzy\": 1, \"ksa\": 2, \"ksu\": 1, \"ksuu\": 1, \"kub\": 1, \"kube\": 1, \"kuh\": 1, \"kul\": 4, \"kult\": 4, \"kun\": 4, \"kunk\": 4, \"kup\": 1, \"kupc\": 1, \"kur\": 3, \"kuri\": 1, \"kus\": 1, \"kush\": 1, \"kwa\": 3, \"kwam\": 1, \"kwan\": 1, \"kwas\": 1, \"kwh\": 3, \"kyn\": 2, \"kyo\": 8, \"kyot\": 7, \"kyoz\": 1, \"kyt\": 1, \"kyte\": 1, \"laa\": 8, \"laad\": 8, \"lab\": 641, \"laba\": 60, \"labe\": 46, \"labi\": 4, \"labo\": 506, \"labr\": 5, \"labu\": 7, \"laby\": 13, \"lac\": 357, \"lace\": 17, \"lach\": 25, \"laci\": 3, \"lack\": 111, \"laco\": 8, \"lacq\": 5, \"lact\": 6, \"lacu\": 1, \"lad\": 1245, \"ladd\": 50, \"lade\": 29, \"ladg\": 2, \"ladi\": 238, \"ladl\": 4, \"lady\": 35, \"lae\": 98, \"laen\": 1, \"laer\": 36, \"laf\": 14, \"lafa\": 13, \"lag\": 45, \"lage\": 3, \"lagg\": 6, \"lago\": 20, \"lagr\": 1, \"lagu\": 9, \"lah\": 7, \"laha\": 3, \"lahm\": 2, \"laho\": 1, \"lai\": 503, \"laic\": 1, \"laid\": 4, \"lain\": 1, \"lair\": 1, \"lais\": 12, \"lait\": 4, \"lak\": 164, \"lake\": 55, \"laku\": 1, \"lal\": 21, \"lala\": 21, \"lam\": 689, \"lama\": 4, \"lamb\": 116, \"lame\": 139, \"lami\": 6, \"lamm\": 4, \"lamo\": 2, \"lamp\": 79, \"lan\": 3533, \"lanc\": 96, \"land\": 617, \"lane\": 24, \"lang\": 324, \"lani\": 1, \"lank\": 4, \"lans\": 1, \"lant\": 63, \"lany\": 2, \"lanz\": 2, \"lao\": 79, \"laod\": 9, \"laot\": 6, \"lap\": 68, \"lape\": 4, \"lapi\": 2, \"lapl\": 5, \"lapp\": 20, \"laps\": 24, \"lapw\": 3, \"lar\": 1300, \"lara\": 4, \"larb\": 10, \"larc\": 7, \"lard\": 5, \"lare\": 2, \"larg\": 1176, \"lari\": 1, \"lark\": 47, \"larn\": 2, \"laro\": 2, \"larr\": 10, \"lars\": 6, \"larv\": 7, \"las\": 2196, \"lasa\": 2, \"lasc\": 11, \"lase\": 1, \"lash\": 40, \"lass\": 16, \"last\": 98, \"lasw\": 1, \"lat\": 1470, \"latc\": 19, \"late\": 636, \"lath\": 11, \"lati\": 118, \"latt\": 250, \"lau\": 962, \"lauc\": 5, \"laud\": 16, \"laug\": 738, \"laui\": 1, \"laun\": 94, \"laur\": 101, \"laus\": 1, \"laut\": 3, \"lav\": 91, \"lava\": 12, \"lave\": 31, \"lavi\": 24, \"lavo\": 1, \"law\": 556, \"lawa\": 1, \"lawb\": 1, \"lawe\": 2, \"lawf\": 51, \"lawg\": 9, \"lawl\": 20, \"lawm\": 12, \"lawn\": 10, \"lawr\": 116, \"laws\": 8, \"lawu\": 1, \"lawy\": 108, \"lax\": 3, \"laxa\": 1, \"laxn\": 2, \"lay\": 171, \"laye\": 66, \"layi\": 58, \"laym\": 11, \"layo\": 12, \"lays\": 4, \"layt\": 1, \"layu\": 1, \"laz\": 88, \"laza\": 24, \"lazi\": 10, \"lazy\": 2, \"lazz\": 1, \"lbd\": 1, \"lbda\": 1, \"lbp\": 1, \"lbpl\": 1, \"lea\": 4589, \"leac\": 2, \"lead\": 569, \"leaf\": 15, \"leag\": 137, \"leak\": 26, \"leal\": 1, \"leam\": 1, \"lean\": 232, \"leap\": 154, \"lear\": 588, \"leas\": 918, \"leat\": 62, \"leau\": 67, \"leav\": 1252, \"leb\": 80, \"leba\": 77, \"lebb\": 1, \"lebe\": 1, \"lebo\": 1, \"lec\": 73, \"leca\": 1, \"lech\": 4, \"leck\": 1, \"lecl\": 1, \"lect\": 66, \"led\": 45, \"ledd\": 5, \"ledf\": 1, \"ledg\": 32, \"ledo\": 4, \"ledy\": 3, \"lee\": 77, \"leec\": 2, \"leed\": 4, \"leek\": 1, \"leer\": 11, \"lees\": 10, \"leew\": 32, \"lef\": 1527, \"left\": 24, \"leg\": 632, \"lega\": 125, \"lege\": 79, \"legg\": 19, \"legi\": 229, \"legs\": 1, \"legu\": 3, \"leh\": 8, \"leha\": 2, \"lehm\": 2, \"lehn\": 1, \"lei\": 89, \"leib\": 1, \"leic\": 10, \"leid\": 1, \"leig\": 5, \"leil\": 1, \"leis\": 69, \"leit\": 2, \"lel\": 2, \"lela\": 2, \"lem\": 54, \"lema\": 1, \"lemm\": 9, \"lemn\": 1, \"lemo\": 40, \"lemu\": 3, \"len\": 673, \"lend\": 25, \"leng\": 481, \"leni\": 18, \"lenn\": 2, \"leno\": 29, \"lens\": 5, \"lent\": 8, \"leny\": 2, \"leo\": 160, \"leon\": 134, \"leop\": 21, \"lep\": 82, \"lepe\": 26, \"lepi\": 8, \"lepr\": 47, \"ler\": 3, \"lern\": 1, \"lero\": 2, \"les\": 1533, \"lesb\": 1, \"lesc\": 1, \"lesh\": 2, \"lesi\": 2, \"lesl\": 2, \"leso\": 3, \"less\": 260, \"lest\": 14, \"let\": 972, \"letc\": 22, \"letd\": 1, \"leth\": 21, \"leti\": 2, \"lets\": 1, \"lett\": 833, \"letu\": 1, \"leu\": 13, \"leuc\": 1, \"leue\": 1, \"leui\": 4, \"leuk\": 3, \"leum\": 1, \"leuw\": 1, \"leuy\": 1, \"lev\": 963, \"leva\": 2, \"leve\": 427, \"levi\": 448, \"lew\": 100, \"lewd\": 21, \"lewe\": 2, \"lewi\": 68, \"lex\": 22, \"lexi\": 22, \"ley\": 16, \"leyd\": 4, \"leys\": 6, \"leyt\": 6, \"lia\": 81, \"liab\": 39, \"liai\": 7, \"liar\": 18, \"lib\": 614, \"libb\": 1, \"libe\": 426, \"libi\": 4, \"libn\": 25, \"libr\": 148, \"liby\": 10, \"lic\": 124, \"lice\": 70, \"lich\": 4, \"lick\": 22, \"lict\": 1, \"lid\": 15, \"lide\": 1, \"lidl\": 1, \"lie\": 363, \"liea\": 1, \"lieb\": 2, \"lied\": 2, \"lief\": 1, \"lieg\": 7, \"lien\": 1, \"lier\": 10, \"lies\": 8, \"liet\": 59, \"lieu\": 54, \"liev\": 2, \"lif\": 2766, \"life\": 68, \"lifs\": 1, \"lift\": 366, \"lig\": 1722, \"liga\": 15, \"ligg\": 1, \"ligh\": 1698, \"lign\": 5, \"ligu\": 2, \"lik\": 5646, \"like\": 852, \"likh\": 1, \"liki\": 32, \"lil\": 112, \"lila\": 33, \"lili\": 27, \"lill\": 26, \"lilt\": 3, \"lim\": 512, \"limb\": 89, \"lime\": 8, \"limi\": 308, \"limo\": 5, \"limp\": 27, \"lin\": 1466, \"linc\": 56, \"lind\": 63, \"line\": 523, \"ling\": 115, \"lini\": 8, \"link\": 56, \"linn\": 6, \"lino\": 1, \"lins\": 1, \"lint\": 6, \"linu\": 2, \"linv\": 1, \"lio\": 289, \"lion\": 77, \"lip\": 339, \"lipc\": 2, \"lipl\": 1, \"lipo\": 1, \"lipp\": 17, \"lips\": 5, \"lipt\": 5, \"liq\": 167, \"liqu\": 167, \"lir\": 2, \"lirr\": 2, \"lis\": 744, \"lisb\": 4, \"lisl\": 1, \"lisp\": 4, \"liss\": 2, \"list\": 546, \"lit\": 4340, \"lite\": 340, \"lith\": 15, \"liti\": 23, \"litl\": 1, \"lito\": 3, \"litt\": 3952, \"litu\": 3, \"liu\": 74, \"liue\": 23, \"liui\": 7, \"liv\": 2278, \"liva\": 2, \"live\": 901, \"livi\": 641, \"livr\": 3, \"livs\": 2, \"liz\": 36, \"liza\": 16, \"lizz\": 20, \"lla\": 3, \"llam\": 3, \"lle\": 3, \"llew\": 3, \"llo\": 14, \"lloy\": 14, \"loa\": 350, \"load\": 96, \"loaf\": 8, \"loam\": 1, \"loan\": 37, \"loat\": 41, \"loav\": 40, \"lob\": 74, \"loba\": 1, \"lobb\": 27, \"lobe\": 7, \"lobl\": 1, \"lobs\": 26, \"lobt\": 1, \"lobu\": 5, \"loc\": 847, \"loca\": 518, \"loch\": 1, \"lock\": 211, \"loco\": 9, \"locu\": 42, \"lod\": 188, \"lode\": 3, \"lodg\": 178, \"lodl\": 2, \"lodo\": 1, \"loe\": 9, \"loes\": 5, \"loew\": 2, \"lof\": 74, \"loft\": 65, \"log\": 123, \"loga\": 5, \"logg\": 17, \"logh\": 1, \"logi\": 85, \"logj\": 1, \"loh\": 1, \"lohm\": 1, \"loi\": 99, \"loin\": 79, \"loir\": 2, \"loit\": 11, \"lol\": 16, \"loll\": 4, \"lolo\": 9, \"lom\": 7, \"lomb\": 5, \"lon\": 3732, \"lond\": 271, \"lone\": 100, \"long\": 815, \"lons\": 4, \"lont\": 1, \"loo\": 4542, \"look\": 2545, \"loom\": 28, \"loon\": 1, \"loop\": 22, \"loos\": 307, \"loot\": 6, \"lop\": 15, \"lopa\": 2, \"lope\": 6, \"lopp\": 1, \"lops\": 2, \"loq\": 4, \"loqu\": 4, \"lor\": 8849, \"lora\": 1, \"lorc\": 2, \"lord\": 131, \"lore\": 5, \"lorl\": 1, \"lorr\": 2, \"loru\": 2, \"los\": 1041, \"lose\": 27, \"losi\": 52, \"loss\": 55, \"lot\": 120, \"lota\": 7, \"loth\": 12, \"loti\": 11, \"lott\": 4, \"lotu\": 3, \"lou\": 933, \"louc\": 1, \"loud\": 97, \"loue\": 30, \"loui\": 399, \"loun\": 40, \"lour\": 4, \"lous\": 20, \"louv\": 4, \"lov\": 2309, \"lova\": 3, \"love\": 850, \"lovi\": 104, \"low\": 549, \"lowb\": 2, \"lowc\": 10, \"lowd\": 6, \"lowe\": 446, \"lowf\": 3, \"lowg\": 2, \"lowh\": 1, \"lowi\": 4, \"lowk\": 2, \"lowl\": 43, \"lowm\": 2, \"lown\": 3, \"lowp\": 6, \"lows\": 2, \"lowt\": 3, \"lowv\": 1, \"loww\": 4, \"loy\": 65, \"loya\": 65, \"loz\": 1, \"loze\": 1, \"ltu\": 3, \"ltur\": 3, \"lua\": 2, \"luan\": 2, \"lub\": 35, \"lubb\": 7, \"lube\": 2, \"lubi\": 3, \"lubl\": 17, \"lubr\": 6, \"luc\": 686, \"luca\": 3, \"lucc\": 1, \"luce\": 1, \"luci\": 144, \"luck\": 120, \"lucr\": 16, \"lucy\": 1, \"lud\": 43, \"ludg\": 14, \"ludi\": 21, \"ludl\": 1, \"ludm\": 1, \"ludw\": 6, \"lue\": 4, \"luec\": 1, \"lueg\": 2, \"luet\": 1, \"luf\": 5, \"luff\": 1, \"luft\": 1, \"lug\": 26, \"luge\": 1, \"lugg\": 24, \"lugu\": 1, \"luh\": 2, \"luhi\": 2, \"lui\": 12, \"luis\": 3, \"luk\": 15, \"luke\": 8, \"luku\": 1, \"lul\": 35, \"lull\": 19, \"lum\": 149, \"lumb\": 51, \"lume\": 3, \"lumi\": 32, \"lumm\": 2, \"lump\": 21, \"lumu\": 15, \"lun\": 251, \"luna\": 59, \"lunc\": 86, \"lund\": 2, \"lung\": 77, \"luni\": 4, \"lup\": 2, \"lupe\": 2, \"lur\": 105, \"lura\": 1, \"lurc\": 18, \"lure\": 6, \"luri\": 12, \"lurk\": 44, \"lus\": 146, \"lusc\": 4, \"lush\": 1, \"lusi\": 1, \"lust\": 93, \"lut\": 16, \"luth\": 12, \"luti\": 1, \"lutt\": 1, \"luv\": 1, \"luva\": 1, \"lux\": 78, \"luxe\": 4, \"luxu\": 74, \"luz\": 1, \"luzo\": 1, \"lvi\": 1, \"lvin\": 1, \"lya\": 3, \"lyab\": 1, \"lyar\": 1, \"lyb\": 1, \"lybi\": 1, \"lyc\": 13, \"lyca\": 2, \"lyci\": 2, \"lyd\": 8, \"lydd\": 3, \"lydi\": 5, \"lye\": 12, \"lyes\": 2, \"lyf\": 9, \"lyfo\": 9, \"lyi\": 185, \"lyin\": 184, \"lyk\": 2, \"lyki\": 2, \"lyl\": 1, \"lym\": 74, \"lyma\": 1, \"lymb\": 1, \"lymi\": 1, \"lymp\": 4, \"lyn\": 11, \"lync\": 2, \"lynd\": 3, \"lyo\": 13, \"lyon\": 4, \"lyop\": 1, \"lyr\": 49, \"lyri\": 47, \"lys\": 11, \"lysa\": 1, \"lysi\": 3, \"lyst\": 6, \"lyt\": 6, \"lytt\": 6, \"maa\": 71, \"maac\": 31, \"maad\": 2, \"maal\": 1, \"maar\": 1, \"maas\": 26, \"maat\": 1, \"maaz\": 2, \"mab\": 5, \"mabe\": 5, \"mac\": 1227, \"maca\": 18, \"macb\": 72, \"macc\": 3, \"macd\": 33, \"mace\": 36, \"macg\": 1, \"mach\": 350, \"maci\": 428, \"mack\": 27, \"macl\": 3, \"macm\": 1, \"macn\": 27, \"maco\": 5, \"macp\": 4, \"macr\": 15, \"macw\": 2, \"mad\": 4267, \"mada\": 124, \"madd\": 51, \"made\": 45, \"madh\": 10, \"madi\": 25, \"madl\": 12, \"madm\": 26, \"madn\": 78, \"mado\": 7, \"madr\": 18, \"mads\": 1, \"madw\": 1, \"mae\": 17, \"maec\": 1, \"mael\": 5, \"maeo\": 2, \"maes\": 7, \"maet\": 1, \"maf\": 2, \"mafe\": 2, \"mag\": 645, \"maga\": 76, \"magb\": 1, \"magd\": 22, \"mage\": 5, \"magg\": 39, \"magi\": 156, \"magn\": 308, \"mago\": 11, \"magp\": 13, \"magu\": 6, \"magw\": 6, \"magy\": 1, \"mah\": 115, \"maha\": 47, \"mahe\": 2, \"mahj\": 1, \"mahl\": 28, \"mahm\": 2, \"maho\": 17, \"mahu\": 1, \"mahz\": 16, \"mai\": 1053, \"maid\": 116, \"maie\": 27, \"mail\": 45, \"maim\": 13, \"main\": 378, \"mais\": 2, \"mait\": 5, \"maiz\": 7, \"maj\": 493, \"majd\": 11, \"maje\": 122, \"majo\": 359, \"maju\": 1, \"mak\": 4305, \"maka\": 1, \"make\": 643, \"makh\": 2, \"maki\": 598, \"makk\": 9, \"makt\": 1, \"mal\": 511, \"mala\": 54, \"malc\": 47, \"mald\": 1, \"male\": 81, \"malf\": 9, \"mali\": 93, \"malk\": 1, \"mall\": 25, \"malm\": 3, \"maln\": 5, \"malo\": 7, \"malp\": 2, \"malr\": 21, \"malt\": 14, \"malv\": 8, \"mam\": 199, \"mama\": 3, \"mamb\": 1, \"mamm\": 123, \"mamr\": 10, \"man\": 5343, \"mana\": 572, \"manc\": 55, \"mand\": 49, \"mane\": 38, \"manf\": 4, \"mang\": 11, \"manh\": 83, \"mani\": 205, \"manj\": 1, \"mank\": 130, \"manl\": 42, \"manm\": 9, \"mann\": 901, \"mano\": 48, \"manp\": 14, \"manr\": 1, \"mans\": 68, \"mant\": 105, \"manu\": 197, \"manv\": 1, \"manx\": 12, \"many\": 8, \"manz\": 2, \"mao\": 9, \"maoc\": 1, \"maon\": 1, \"map\": 82, \"mapl\": 47, \"mapp\": 18, \"maq\": 1, \"maqu\": 1, \"mar\": 4541, \"mara\": 12, \"marb\": 67, \"marc\": 392, \"mard\": 3, \"mare\": 17, \"marg\": 201, \"mari\": 908, \"marj\": 1, \"mark\": 643, \"marl\": 30, \"marm\": 13, \"maro\": 11, \"marq\": 81, \"marr\": 863, \"mars\": 109, \"mart\": 266, \"maru\": 5, \"marv\": 138, \"marx\": 8, \"mary\": 43, \"mas\": 1395, \"masa\": 4, \"masc\": 25, \"mase\": 1, \"mash\": 5, \"mask\": 30, \"maso\": 65, \"masq\": 16, \"masr\": 2, \"mass\": 218, \"mast\": 650, \"masu\": 1, \"mat\": 2217, \"mata\": 2, \"matc\": 230, \"mate\": 507, \"math\": 69, \"mati\": 21, \"matl\": 1, \"matr\": 49, \"mats\": 49, \"matt\": 1038, \"matu\": 103, \"mau\": 116, \"mauc\": 2, \"maud\": 24, \"maug\": 2, \"maul\": 4, \"maur\": 68, \"maus\": 2, \"mauv\": 4, \"mav\": 6, \"mave\": 4, \"mavi\": 2, \"maw\": 5, \"mawe\": 1, \"mawk\": 1, \"max\": 141, \"maxe\": 1, \"maxi\": 125, \"maxw\": 13, \"may\": 393, \"maya\": 1, \"mayb\": 149, \"maye\": 134, \"mayf\": 7, \"mayh\": 12, \"mayn\": 3, \"mayo\": 62, \"mays\": 3, \"maz\": 35, \"maza\": 2, \"maze\": 10, \"mazi\": 1, \"mazo\": 1, \"mazu\": 2, \"mazz\": 1, \"mca\": 6, \"mcal\": 2, \"mcau\": 4, \"mcb\": 11, \"mcbr\": 11, \"mcc\": 55, \"mcca\": 13, \"mccl\": 19, \"mcco\": 14, \"mccr\": 3, \"mccu\": 6, \"mcd\": 4, \"mcda\": 2, \"mcde\": 1, \"mcdo\": 1, \"mce\": 4, \"mcea\": 1, \"mcel\": 2, \"mcen\": 1, \"mcf\": 15, \"mcfa\": 1, \"mcfe\": 14, \"mcg\": 7, \"mcge\": 3, \"mcgh\": 1, \"mcgl\": 1, \"mcgo\": 1, \"mcgr\": 1, \"mci\": 5, \"mcin\": 2, \"mciv\": 3, \"mck\": 21, \"mcke\": 8, \"mcki\": 13, \"mcl\": 8, \"mcla\": 1, \"mcle\": 4, \"mcli\": 3, \"mcn\": 11, \"mcna\": 8, \"mcne\": 3, \"mcp\": 2, \"mcph\": 2, \"mcq\": 1, \"mcqu\": 1, \"mcr\": 1, \"mcro\": 1, \"mcs\": 5, \"mcse\": 3, \"mcso\": 2, \"mcw\": 1, \"mcwh\": 1, \"mdi\": 2, \"mdia\": 2, \"mea\": 3601, \"mead\": 92, \"meag\": 14, \"meal\": 55, \"mean\": 1504, \"mear\": 2, \"meas\": 683, \"meat\": 29, \"meb\": 1, \"mebu\": 1, \"mec\": 180, \"mecc\": 5, \"mech\": 173, \"mecu\": 2, \"med\": 575, \"meda\": 25, \"medc\": 1, \"medd\": 26, \"mede\": 22, \"medf\": 4, \"medi\": 491, \"medl\": 2, \"medm\": 1, \"medo\": 1, \"medu\": 1, \"mee\": 1057, \"meeh\": 1, \"meek\": 47, \"meer\": 10, \"meet\": 436, \"meg\": 39, \"mega\": 26, \"megi\": 12, \"megl\": 1, \"meh\": 14, \"mehe\": 3, \"mehi\": 4, \"meho\": 2, \"mehu\": 5, \"mei\": 6, \"mein\": 2, \"meis\": 3, \"mej\": 1, \"meja\": 1, \"mek\": 4, \"meko\": 4, \"mel\": 391, \"mela\": 88, \"melb\": 3, \"melc\": 17, \"mele\": 6, \"meli\": 13, \"mell\": 15, \"melo\": 95, \"melp\": 1, \"mels\": 1, \"melt\": 93, \"melv\": 8, \"melz\": 6, \"mem\": 962, \"memb\": 612, \"meme\": 4, \"memi\": 1, \"memn\": 3, \"memo\": 325, \"memp\": 12, \"memu\": 3, \"men\": 656, \"mena\": 54, \"menc\": 5, \"mend\": 32, \"mene\": 2, \"menf\": 2, \"meni\": 5, \"menl\": 1, \"menn\": 4, \"meno\": 1, \"menp\": 2, \"mens\": 22, \"ment\": 457, \"menu\": 4, \"meo\": 2, \"meon\": 2, \"mep\": 21, \"meph\": 21, \"mer\": 1816, \"mera\": 52, \"merc\": 775, \"mere\": 343, \"merg\": 67, \"meri\": 168, \"merl\": 7, \"merm\": 10, \"mern\": 1, \"mero\": 7, \"merr\": 138, \"mert\": 1, \"merv\": 2, \"mes\": 549, \"mese\": 2, \"mesh\": 65, \"mesm\": 2, \"meso\": 17, \"mess\": 422, \"mest\": 1, \"met\": 775, \"meta\": 181, \"mete\": 88, \"meth\": 401, \"meti\": 9, \"metl\": 2, \"metr\": 70, \"mett\": 16, \"meu\": 2, \"meun\": 1, \"meur\": 1, \"mew\": 3, \"mewe\": 1, \"mex\": 72, \"mexi\": 72, \"mey\": 22, \"meye\": 9, \"meyl\": 1, \"meyn\": 12, \"mez\": 3, \"meza\": 2, \"mezz\": 1, \"mgl\": 3, \"mglb\": 1, \"mglh\": 2, \"mia\": 33, \"miam\": 28, \"mian\": 2, \"miao\": 1, \"mias\": 2, \"mib\": 7, \"mibh\": 2, \"mibs\": 3, \"mibz\": 2, \"mic\": 422, \"mica\": 51, \"mice\": 11, \"mich\": 201, \"mick\": 37, \"micr\": 96, \"mid\": 1280, \"mida\": 7, \"midc\": 2, \"midd\": 519, \"midf\": 3, \"midg\": 5, \"midi\": 67, \"midj\": 4, \"midl\": 1, \"midm\": 2, \"midn\": 107, \"mido\": 2, \"midp\": 1, \"midr\": 2, \"mids\": 496, \"midt\": 8, \"midv\": 1, \"midw\": 49, \"mie\": 2, \"mif\": 1, \"miff\": 1, \"mig\": 3222, \"migd\": 6, \"migh\": 3181, \"migl\": 1, \"mign\": 1, \"migr\": 28, \"migu\": 4, \"mij\": 7, \"mija\": 2, \"mijb\": 5, \"mik\": 107, \"mike\": 8, \"mikh\": 1, \"mikl\": 4, \"mikn\": 2, \"miko\": 1, \"mil\": 1740, \"mila\": 4, \"milb\": 1, \"milc\": 20, \"mild\": 65, \"mile\": 330, \"milh\": 2, \"mili\": 293, \"milk\": 31, \"mill\": 533, \"milm\": 2, \"milo\": 2, \"milq\": 2, \"mils\": 9, \"milt\": 27, \"milw\": 10, \"mim\": 37, \"mime\": 16, \"mimi\": 20, \"min\": 4064, \"mina\": 6, \"minb\": 1, \"minc\": 29, \"mind\": 249, \"mine\": 103, \"ming\": 83, \"mini\": 501, \"mink\": 1, \"minn\": 38, \"mino\": 99, \"mins\": 13, \"mint\": 3, \"minu\": 721, \"mip\": 1, \"miph\": 1, \"mir\": 308, \"mira\": 124, \"mire\": 1, \"miri\": 51, \"mirm\": 1, \"mirr\": 56, \"mirs\": 1, \"mirt\": 47, \"mis\": 3183, \"misa\": 13, \"misb\": 7, \"misc\": 188, \"misd\": 12, \"mise\": 209, \"misf\": 48, \"misg\": 18, \"mish\": 28, \"misi\": 12, \"misj\": 3, \"misl\": 27, \"mism\": 2, \"misn\": 3, \"miso\": 1, \"misp\": 13, \"misq\": 1, \"misr\": 12, \"miss\": 495, \"mist\": 530, \"misu\": 47, \"misw\": 1, \"mit\": 85, \"mitc\": 30, \"mite\": 5, \"mith\": 7, \"miti\": 9, \"mitr\": 19, \"mitt\": 4, \"mity\": 6, \"miu\": 1, \"miuc\": 1, \"mix\": 187, \"mixe\": 108, \"mixi\": 18, \"mixt\": 58, \"miy\": 2, \"miya\": 2, \"miz\": 68, \"miza\": 1, \"mize\": 8, \"mizp\": 47, \"mizr\": 5, \"mizz\": 7, \"mll\": 1, \"mls\": 1, \"mme\": 5, \"mmm\": 1, \"mmw\": 1, \"mmwi\": 1, \"mna\": 1, \"mnas\": 1, \"moa\": 224, \"moab\": 29, \"moad\": 1, \"moan\": 15, \"moap\": 1, \"moat\": 2, \"mob\": 169, \"mobb\": 2, \"mobc\": 1, \"mobi\": 70, \"mobs\": 1, \"mobu\": 2, \"moc\": 199, \"mocc\": 8, \"mock\": 96, \"mod\": 811, \"moda\": 4, \"mode\": 706, \"modi\": 57, \"modu\": 14, \"mof\": 1, \"moff\": 1, \"mog\": 8, \"moga\": 1, \"mogu\": 7, \"moh\": 6, \"moha\": 5, \"mohr\": 1, \"moi\": 72, \"moid\": 1, \"moie\": 1, \"moin\": 1, \"moir\": 1, \"mois\": 67, \"moit\": 1, \"mol\": 264, \"mola\": 12, \"mold\": 36, \"mole\": 55, \"moli\": 12, \"moll\": 21, \"molo\": 16, \"molt\": 43, \"molu\": 3, \"molv\": 3, \"mom\": 1233, \"momb\": 1, \"mome\": 1223, \"momm\": 5, \"momo\": 3, \"mon\": 2512, \"mona\": 51, \"monc\": 2, \"mond\": 104, \"mone\": 757, \"monf\": 2, \"mong\": 9, \"moni\": 30, \"monk\": 75, \"monm\": 5, \"mono\": 129, \"monr\": 13, \"mons\": 204, \"mont\": 1031, \"monu\": 59, \"monw\": 1, \"moo\": 571, \"mood\": 49, \"mooe\": 1, \"mooi\": 1, \"moon\": 113, \"moor\": 48, \"moos\": 6, \"moot\": 1, \"moou\": 2, \"mop\": 10, \"mope\": 1, \"mopi\": 1, \"mopp\": 7, \"mor\": 9040, \"mora\": 280, \"morb\": 28, \"morc\": 2, \"mord\": 62, \"more\": 353, \"morg\": 85, \"mori\": 21, \"morl\": 3, \"morm\": 2, \"morn\": 1117, \"moro\": 15, \"morp\": 27, \"morq\": 1, \"morr\": 323, \"mors\": 59, \"mort\": 332, \"mos\": 3768, \"mosa\": 9, \"mosc\": 55, \"mose\": 876, \"mosl\": 5, \"mosq\": 20, \"moss\": 13, \"most\": 73, \"mot\": 2188, \"mote\": 35, \"moth\": 1568, \"moti\": 384, \"motl\": 10, \"moto\": 144, \"mott\": 25, \"mou\": 2398, \"moue\": 5, \"moug\": 2, \"moui\": 2, \"moul\": 58, \"moun\": 1107, \"mour\": 202, \"mous\": 169, \"mout\": 837, \"mouv\": 1, \"mov\": 1292, \"mova\": 21, \"move\": 688, \"movi\": 279, \"mow\": 23, \"mowe\": 15, \"mowi\": 1, \"mows\": 1, \"moz\": 11, \"moza\": 6, \"mra\": 4, \"mrad\": 1, \"mse\": 1, \"mub\": 1, \"muba\": 1, \"muc\": 3268, \"much\": 8, \"muci\": 1, \"muck\": 4, \"muco\": 5, \"mucu\": 2, \"mud\": 37, \"mudb\": 1, \"mudc\": 1, \"mudd\": 30, \"mudg\": 1, \"muds\": 2, \"mudu\": 1, \"mudw\": 1, \"mue\": 4, \"muez\": 4, \"muf\": 45, \"muff\": 41, \"muft\": 2, \"mug\": 7, \"mugg\": 3, \"muh\": 1, \"muha\": 1, \"mui\": 1, \"mul\": 659, \"mula\": 2, \"mulb\": 8, \"mulc\": 8, \"mule\": 19, \"mull\": 34, \"mult\": 562, \"mum\": 34, \"mumb\": 20, \"mumf\": 1, \"mumm\": 12, \"mun\": 77, \"munc\": 6, \"mund\": 8, \"mung\": 5, \"muni\": 56, \"muno\": 1, \"munr\": 1, \"muo\": 1, \"muon\": 1, \"mup\": 1, \"mupp\": 1, \"mur\": 545, \"mura\": 3, \"murd\": 317, \"mure\": 2, \"murk\": 15, \"murm\": 124, \"murp\": 8, \"murr\": 15, \"murt\": 52, \"murv\": 1, \"mus\": 4625, \"musc\": 173, \"muse\": 64, \"musg\": 171, \"mush\": 25, \"musi\": 646, \"musk\": 45, \"musl\": 12, \"musm\": 5, \"musn\": 1, \"muss\": 15, \"must\": 88, \"mut\": 278, \"muta\": 6, \"mute\": 14, \"muti\": 33, \"mutt\": 105, \"mutu\": 94, \"muz\": 23, \"muza\": 1, \"muzy\": 1, \"muzz\": 20, \"myc\": 5, \"myce\": 3, \"myco\": 2, \"mye\": 5, \"myel\": 2, \"myer\": 3, \"myl\": 2, \"myla\": 2, \"myn\": 4, \"mynh\": 3, \"myo\": 10, \"myoc\": 4, \"myof\": 3, \"myop\": 2, \"myos\": 1, \"myr\": 120, \"myra\": 6, \"myri\": 43, \"myro\": 1, \"myrr\": 24, \"myrt\": 18, \"mys\": 1240, \"myse\": 925, \"mysi\": 2, \"myst\": 313, \"myt\": 82, \"myth\": 40, \"mytt\": 1, \"naa\": 38, \"naam\": 27, \"naar\": 6, \"naas\": 4, \"nab\": 49, \"naba\": 22, \"nabb\": 3, \"nabi\": 1, \"nabo\": 23, \"nac\": 5, \"nach\": 5, \"nad\": 42, \"nada\": 20, \"nadi\": 22, \"nae\": 2, \"naeb\": 2, \"nag\": 23, \"naga\": 4, \"nage\": 1, \"nagg\": 11, \"nagl\": 1, \"nagr\": 6, \"nah\": 54, \"naha\": 23, \"nahb\": 1, \"nahc\": 1, \"naho\": 17, \"nahs\": 9, \"nahu\": 2, \"nai\": 143, \"nail\": 72, \"naio\": 6, \"nair\": 5, \"naiv\": 27, \"nak\": 229, \"naka\": 4, \"nake\": 222, \"nako\": 1, \"nakt\": 2, \"nam\": 2381, \"name\": 582, \"nami\": 11, \"nan\": 158, \"nanc\": 22, \"nank\": 1, \"nano\": 1, \"nant\": 133, \"nao\": 23, \"naom\": 23, \"nap\": 141, \"naph\": 55, \"napk\": 16, \"napl\": 43, \"napo\": 19, \"napp\": 4, \"napt\": 1, \"nar\": 309, \"narb\": 1, \"narc\": 14, \"nare\": 3, \"narr\": 269, \"narw\": 13, \"nas\": 62, \"nasa\": 6, \"nasc\": 2, \"nash\": 7, \"nass\": 18, \"nast\": 16, \"nat\": 2929, \"nata\": 5, \"natc\": 4, \"nate\": 2, \"nath\": 60, \"nati\": 1559, \"nato\": 1, \"natr\": 1, \"natt\": 1, \"natu\": 1265, \"nau\": 57, \"naug\": 37, \"naui\": 1, \"naus\": 6, \"naut\": 10, \"nav\": 164, \"nava\": 43, \"nave\": 11, \"navi\": 25, \"navo\": 4, \"navv\": 3, \"navy\": 13, \"naw\": 2, \"nawt\": 1, \"nax\": 1, \"naxo\": 1, \"naz\": 74, \"naza\": 47, \"nazi\": 14, \"nbc\": 1, \"nbct\": 1, \"nct\": 3, \"ndi\": 1, \"ndim\": 1, \"ndo\": 1, \"ndol\": 1, \"nea\": 1515, \"nean\": 1, \"neap\": 12, \"near\": 588, \"neat\": 42, \"neb\": 172, \"neba\": 32, \"nebr\": 10, \"nebu\": 116, \"nec\": 957, \"nece\": 607, \"nech\": 3, \"neck\": 58, \"necr\": 8, \"nect\": 12, \"ned\": 1, \"neda\": 1, \"nee\": 1322, \"need\": 649, \"neer\": 28, \"nees\": 2, \"neg\": 495, \"nega\": 77, \"negl\": 135, \"nego\": 59, \"negr\": 223, \"negu\": 1, \"neh\": 22, \"nehe\": 12, \"nehr\": 6, \"nehu\": 3, \"nei\": 1806, \"neie\": 1, \"neig\": 534, \"neil\": 7, \"neim\": 4, \"neis\": 3, \"neit\": 1254, \"nek\": 5, \"neke\": 1, \"neko\": 4, \"nel\": 36, \"nell\": 6, \"nels\": 29, \"nem\": 9, \"neme\": 4, \"nemi\": 1, \"nemu\": 4, \"nen\": 2, \"nenn\": 2, \"neo\": 37, \"neoc\": 10, \"neod\": 1, \"neoe\": 1, \"neoj\": 1, \"neol\": 1, \"neon\": 4, \"neop\": 2, \"neor\": 1, \"neos\": 2, \"nep\": 80, \"nepa\": 1, \"neph\": 72, \"nept\": 7, \"ner\": 187, \"nere\": 1, \"nerg\": 4, \"neri\": 12, \"nern\": 2, \"neru\": 3, \"nerv\": 157, \"nes\": 115, \"nesc\": 1, \"nesk\": 2, \"nest\": 41, \"net\": 200, \"neth\": 84, \"netl\": 1, \"neto\": 13, \"nett\": 17, \"netw\": 64, \"neu\": 171, \"neub\": 1, \"neue\": 54, \"neum\": 1, \"neur\": 36, \"neus\": 2, \"neut\": 77, \"nev\": 2792, \"neva\": 13, \"neve\": 2778, \"nevs\": 1, \"new\": 697, \"newa\": 9, \"newb\": 23, \"newc\": 17, \"newe\": 78, \"newf\": 10, \"newh\": 1, \"newi\": 25, \"newl\": 61, \"newm\": 10, \"newn\": 3, \"newp\": 29, \"newr\": 1, \"news\": 176, \"newt\": 17, \"neww\": 1, \"newy\": 1, \"nex\": 961, \"next\": 2, \"ney\": 6, \"neyt\": 6, \"nez\": 3, \"nezi\": 3, \"nga\": 1, \"ngan\": 1, \"nia\": 11, \"niag\": 11, \"nib\": 22, \"nibb\": 17, \"nibe\": 2, \"nibh\": 1, \"nibs\": 1, \"nic\": 333, \"nica\": 3, \"nicc\": 1, \"nice\": 46, \"nich\": 22, \"nick\": 34, \"nico\": 30, \"nie\": 48, \"nieb\": 2, \"niec\": 38, \"niem\": 3, \"niep\": 1, \"niet\": 4, \"nig\": 2210, \"nige\": 7, \"nigg\": 38, \"nigh\": 1971, \"nign\": 1, \"nigr\": 1, \"nih\": 6, \"nihi\": 6, \"nij\": 1, \"niji\": 1, \"nik\": 15, \"nike\": 1, \"niki\": 7, \"nikk\": 1, \"niko\": 6, \"nil\": 30, \"nill\": 1, \"nilp\": 12, \"nils\": 2, \"nilu\": 1, \"nim\": 38, \"nimb\": 25, \"nimp\": 1, \"nimr\": 7, \"nims\": 5, \"nin\": 465, \"ninc\": 1, \"nine\": 202, \"ninn\": 1, \"nint\": 62, \"nio\": 2, \"niob\": 2, \"nip\": 17, \"nipe\": 1, \"niph\": 4, \"nipp\": 11, \"nir\": 2, \"nirv\": 1, \"nirw\": 1, \"nis\": 15, \"nisa\": 2, \"nisc\": 4, \"nisf\": 1, \"nish\": 4, \"nisr\": 3, \"nit\": 26, \"nitr\": 24, \"niv\": 4, \"nive\": 1, \"nivi\": 3, \"nix\": 26, \"nixo\": 26, \"nju\": 1, \"njus\": 1, \"nkr\": 2, \"nkru\": 2, \"nlr\": 3, \"nlrd\": 2, \"nni\": 2, \"nnig\": 2, \"nnu\": 1, \"nnuo\": 1, \"noa\": 68, \"noad\": 2, \"nob\": 607, \"noba\": 4, \"nobe\": 7, \"nobi\": 12, \"nobl\": 281, \"nobo\": 302, \"noc\": 16, \"noca\": 1, \"noce\": 2, \"noci\": 2, \"noct\": 11, \"nod\": 125, \"noda\": 1, \"nodd\": 109, \"node\": 3, \"nodr\": 1, \"nodu\": 2, \"noe\": 9, \"nog\": 16, \"noga\": 6, \"nogo\": 10, \"noh\": 3, \"noha\": 1, \"nohi\": 2, \"noi\": 352, \"noir\": 1, \"nois\": 349, \"nol\": 6, \"nola\": 1, \"nole\": 1, \"noll\": 1, \"nom\": 73, \"noma\": 6, \"nome\": 8, \"nomi\": 59, \"non\": 1166, \"nona\": 9, \"nonb\": 2, \"nonc\": 48, \"nond\": 17, \"none\": 21, \"nonf\": 14, \"nong\": 3, \"nonh\": 1, \"noni\": 9, \"nonj\": 6, \"nonl\": 5, \"nonm\": 10, \"nonn\": 3, \"nono\": 7, \"nonp\": 28, \"nonr\": 17, \"nons\": 135, \"nont\": 6, \"nonv\": 21, \"nonw\": 7, \"noo\": 129, \"nook\": 5, \"noon\": 21, \"noos\": 5, \"nop\": 9, \"noph\": 1, \"nor\": 1041, \"nora\": 3, \"norb\": 7, \"nord\": 6, \"nore\": 3, \"norf\": 4, \"norl\": 53, \"norm\": 243, \"norr\": 3, \"nors\": 1, \"nort\": 680, \"noru\": 1, \"norw\": 26, \"nos\": 278, \"nose\": 29, \"nosi\": 2, \"nosk\": 1, \"nost\": 52, \"not\": 3080, \"nota\": 53, \"notc\": 27, \"note\": 217, \"noth\": 1901, \"noti\": 526, \"notk\": 1, \"notl\": 1, \"noto\": 18, \"notq\": 1, \"notr\": 8, \"nots\": 3, \"nott\": 20, \"notu\": 1, \"notw\": 53, \"noty\": 1, \"nou\": 115, \"noug\": 49, \"noun\": 3, \"nour\": 58, \"nouv\": 2, \"nov\": 253, \"nova\": 3, \"nove\": 235, \"novi\": 10, \"novo\": 2, \"now\": 77, \"nowa\": 25, \"nowf\": 1, \"nowh\": 47, \"nowi\": 3, \"nowm\": 1, \"nox\": 5, \"noxi\": 5, \"noy\": 11, \"noya\": 1, \"noye\": 1, \"noyo\": 1, \"noys\": 8, \"noz\": 9, \"nozz\": 9, \"nrl\": 1, \"nrld\": 1, \"ntr\": 1, \"ntri\": 1, \"nua\": 4, \"nuan\": 4, \"nub\": 2, \"nubb\": 1, \"nubi\": 1, \"nuc\": 149, \"nucl\": 149, \"nud\": 34, \"nude\": 2, \"nudg\": 7, \"nudi\": 4, \"nue\": 5, \"nuec\": 1, \"nuf\": 3, \"nug\": 7, \"nuge\": 6, \"nugg\": 1, \"nui\": 9, \"nuis\": 7, \"nul\": 20, \"null\": 6, \"num\": 1260, \"numb\": 1126, \"nume\": 125, \"numi\": 4, \"nun\": 13, \"nunc\": 1, \"nune\": 1, \"nunn\": 5, \"nuo\": 1, \"nuov\": 1, \"nup\": 10, \"nupe\": 1, \"nupt\": 9, \"nur\": 164, \"nurs\": 154, \"nurt\": 9, \"nut\": 89, \"nutc\": 3, \"nuth\": 1, \"nutl\": 1, \"nutm\": 6, \"nutr\": 26, \"nuts\": 3, \"nutt\": 2, \"nuz\": 2, \"nuzz\": 2, \"nyb\": 2, \"nybe\": 2, \"nyl\": 1, \"nylo\": 1, \"nym\": 13, \"nymp\": 13, \"nys\": 1, \"nyse\": 1, \"oaf\": 1, \"oah\": 1, \"oak\": 78, \"oake\": 12, \"oakl\": 42, \"oakm\": 1, \"oaku\": 5, \"oakw\": 5, \"oar\": 81, \"oars\": 33, \"oas\": 3, \"oase\": 2, \"oasi\": 1, \"oat\": 140, \"oath\": 22, \"oatm\": 1, \"oatn\": 2, \"oats\": 1, \"oba\": 56, \"obad\": 21, \"oban\": 34, \"obd\": 6, \"obdu\": 6, \"obe\": 391, \"obed\": 130, \"obei\": 10, \"obel\": 8, \"ober\": 5, \"obes\": 6, \"obey\": 98, \"obi\": 9, \"obie\": 4, \"obit\": 3, \"obj\": 667, \"obje\": 667, \"obl\": 500, \"obla\": 40, \"obli\": 445, \"oblo\": 15, \"obn\": 7, \"obno\": 7, \"obo\": 7, \"oboi\": 1, \"obom\": 1, \"obot\": 4, \"obr\": 2, \"obri\": 2, \"obs\": 899, \"obsc\": 89, \"obse\": 726, \"obsi\": 1, \"obso\": 10, \"obst\": 73, \"obt\": 293, \"obta\": 283, \"obtr\": 9, \"obtu\": 1, \"obv\": 259, \"obve\": 1, \"obvi\": 258, \"oca\": 2, \"ocar\": 1, \"ocas\": 1, \"occ\": 1001, \"occa\": 410, \"occh\": 1, \"occi\": 7, \"occl\": 7, \"occu\": 576, \"oce\": 216, \"ocea\": 215, \"ocel\": 1, \"och\": 4, \"oche\": 1, \"ocho\": 1, \"ochr\": 2, \"ocl\": 42, \"oclo\": 42, \"oco\": 7, \"ocon\": 7, \"ocr\": 5, \"ocra\": 5, \"oct\": 140, \"octa\": 50, \"octe\": 1, \"octh\": 1, \"octi\": 4, \"octo\": 66, \"ocu\": 3, \"ocul\": 3, \"ocz\": 2, \"ocza\": 2, \"odd\": 90, \"odde\": 12, \"oddi\": 9, \"oddl\": 34, \"odds\": 1, \"ode\": 12, \"odel\": 1, \"oder\": 1, \"odes\": 1, \"odi\": 26, \"odil\": 1, \"odio\": 19, \"odiu\": 2, \"odo\": 82, \"odon\": 3, \"odor\": 24, \"odou\": 18, \"odw\": 4, \"odwy\": 4, \"ody\": 14, \"odys\": 14, \"oec\": 6, \"oech\": 1, \"oed\": 24, \"oedi\": 24, \"oer\": 21, \"oeri\": 1, \"oers\": 20, \"oet\": 1, \"oeu\": 2, \"oeuv\": 2, \"off\": 3276, \"offa\": 5, \"offb\": 6, \"offc\": 1, \"offd\": 3, \"offe\": 2145, \"offf\": 3, \"offh\": 1, \"offi\": 1035, \"offk\": 1, \"offl\": 1, \"offr\": 2, \"offs\": 71, \"offt\": 1, \"offu\": 1, \"oft\": 835, \"ofte\": 829, \"oftn\": 1, \"oftr\": 1, \"oftt\": 4, \"oga\": 1, \"ogar\": 1, \"ogd\": 3, \"ogde\": 3, \"ogl\": 3, \"ogle\": 3, \"ogr\": 6, \"ogre\": 1, \"oha\": 3, \"ohar\": 1, \"ohe\": 1, \"ohi\": 56, \"ohio\": 1, \"ohm\": 1, \"ohmi\": 1, \"oht\": 1, \"ohth\": 1, \"oil\": 54, \"oilb\": 3, \"oilc\": 3, \"oile\": 7, \"oilf\": 1, \"oilh\": 2, \"oilm\": 1, \"oilp\": 1, \"oils\": 5, \"oilw\": 1, \"oin\": 38, \"oint\": 38, \"ois\": 1, \"oist\": 1, \"oit\": 1, \"oiti\": 1, \"oka\": 23, \"okad\": 1, \"okam\": 2, \"oke\": 1, \"okee\": 1, \"oki\": 1, \"okin\": 1, \"okl\": 18, \"okla\": 15, \"oko\": 2, \"okon\": 1, \"okot\": 1, \"ola\": 3, \"olas\": 1, \"olat\": 1, \"old\": 192, \"olda\": 1, \"olde\": 161, \"oldf\": 7, \"oldg\": 1, \"oldi\": 1, \"oldl\": 1, \"oldn\": 1, \"olds\": 6, \"oldt\": 7, \"ole\": 9, \"olea\": 2, \"oleo\": 5, \"oler\": 1, \"olf\": 2, \"olfa\": 2, \"olg\": 13, \"olgi\": 7, \"oli\": 107, \"oliv\": 106, \"olm\": 1, \"olms\": 1, \"oln\": 1, \"olne\": 1, \"olo\": 1, \"olog\": 1, \"ols\": 3, \"olse\": 1, \"olso\": 2, \"olv\": 1, \"olve\": 1, \"oly\": 21, \"olym\": 21, \"oma\": 15, \"omah\": 2, \"omb\": 1, \"omba\": 1, \"ome\": 30, \"omeg\": 5, \"omel\": 4, \"omen\": 4, \"omer\": 1, \"omi\": 78, \"omin\": 28, \"omis\": 10, \"omit\": 34, \"omm\": 1, \"ommi\": 1, \"omn\": 63, \"omne\": 7, \"omni\": 56, \"omr\": 18, \"oms\": 1, \"ona\": 16, \"onag\": 1, \"onan\": 3, \"onc\": 1440, \"once\": 6, \"onco\": 2, \"one\": 511, \"onea\": 6, \"onec\": 1, \"oned\": 4, \"onee\": 2, \"onef\": 10, \"oneg\": 2, \"oneh\": 10, \"onei\": 11, \"onek\": 1, \"onel\": 27, \"onem\": 6, \"onen\": 5, \"oneo\": 2, \"onep\": 1, \"oneq\": 2, \"oner\": 4, \"ones\": 38, \"onet\": 31, \"oneu\": 1, \"onew\": 2, \"oney\": 1, \"oni\": 25, \"onio\": 25, \"onl\": 4263, \"onle\": 4, \"onli\": 1, \"onlo\": 4, \"onr\": 5, \"onru\": 5, \"ons\": 62, \"onse\": 52, \"onsi\": 2, \"onsl\": 6, \"onst\": 1, \"onsu\": 1, \"ont\": 86, \"onta\": 10, \"onth\": 6, \"onto\": 10, \"onu\": 2, \"onw\": 36, \"onwa\": 36, \"ony\": 12, \"onyc\": 1, \"oom\": 1, \"ooma\": 1, \"oon\": 1, \"ooo\": 1, \"oop\": 2, \"oops\": 1, \"oos\": 1, \"oot\": 4, \"ooti\": 4, \"ooz\": 10, \"ooze\": 3, \"opa\": 18, \"opac\": 2, \"opal\": 2, \"opaq\": 11, \"ope\": 2357, \"opel\": 2, \"open\": 850, \"oper\": 618, \"oph\": 120, \"ophe\": 35, \"ophi\": 18, \"ophn\": 1, \"ophr\": 8, \"opht\": 2, \"opi\": 444, \"opia\": 3, \"opin\": 421, \"opiu\": 20, \"opo\": 2, \"opor\": 1, \"opos\": 1, \"opp\": 912, \"oppe\": 1, \"oppi\": 2, \"oppo\": 730, \"oppr\": 179, \"opt\": 126, \"opte\": 2, \"opti\": 124, \"opu\": 8, \"opul\": 5, \"ora\": 174, \"orac\": 36, \"oral\": 2, \"oran\": 53, \"orat\": 55, \"orb\": 67, \"orbe\": 3, \"orbi\": 42, \"orc\": 150, \"orch\": 146, \"orcu\": 3, \"ord\": 1313, \"orda\": 83, \"orde\": 958, \"ordi\": 267, \"ordn\": 1, \"ordu\": 1, \"ore\": 46, \"orea\": 1, \"oreg\": 22, \"ores\": 4, \"orew\": 1, \"org\": 466, \"orga\": 453, \"orgi\": 11, \"orgo\": 1, \"ori\": 457, \"oria\": 1, \"orie\": 82, \"orif\": 3, \"orig\": 338, \"orin\": 1, \"orio\": 26, \"oris\": 4, \"oriz\": 1, \"ork\": 1, \"orkn\": 1, \"orl\": 64, \"orla\": 2, \"orle\": 53, \"orli\": 6, \"orlo\": 2, \"orm\": 5, \"ormo\": 2, \"orms\": 2, \"ormu\": 1, \"orn\": 95, \"orna\": 90, \"orne\": 4, \"ornr\": 1, \"oro\": 4, \"oron\": 3, \"orot\": 1, \"orp\": 50, \"orpa\": 2, \"orph\": 48, \"ors\": 3, \"orse\": 2, \"ort\": 73, \"orte\": 2, \"orth\": 71, \"oru\": 1, \"orv\": 12, \"orvi\": 12, \"orw\": 5, \"orwe\": 5, \"orz\": 1, \"orza\": 1, \"osa\": 7, \"osag\": 1, \"osak\": 6, \"osb\": 3, \"osbe\": 1, \"osbo\": 2, \"osc\": 20, \"osca\": 11, \"osce\": 1, \"osci\": 8, \"ose\": 1, \"osh\": 4, \"oshe\": 2, \"oshk\": 2, \"osi\": 7, \"osie\": 1, \"osip\": 1, \"osir\": 2, \"osit\": 2, \"osk\": 1, \"oska\": 1, \"osl\": 5, \"osm\": 4, \"osmi\": 1, \"osmo\": 3, \"oso\": 2, \"osp\": 2, \"ospr\": 2, \"osr\": 7, \"osra\": 1, \"osri\": 6, \"oss\": 14, \"osse\": 3, \"ossi\": 10, \"ost\": 45, \"osta\": 1, \"oste\": 31, \"osti\": 1, \"ostl\": 5, \"ostr\": 7, \"osu\": 2, \"osul\": 2, \"osw\": 1, \"oswe\": 1, \"oth\": 5241, \"othe\": 5223, \"othn\": 8, \"otho\": 10, \"oti\": 3, \"ott\": 96, \"otta\": 5, \"otte\": 67, \"otto\": 9, \"otw\": 6, \"otwa\": 6, \"ouc\": 8, \"ouch\": 8, \"oue\": 32, \"ouer\": 10, \"oug\": 464, \"ough\": 464, \"oul\": 9, \"oun\": 14, \"ounc\": 14, \"our\": 298, \"oura\": 2, \"ours\": 204, \"ous\": 10, \"oust\": 3, \"out\": 1231, \"outa\": 1, \"outb\": 40, \"outc\": 60, \"outd\": 50, \"oute\": 76, \"outf\": 38, \"outg\": 30, \"outh\": 3, \"outi\": 5, \"outl\": 171, \"outm\": 14, \"outn\": 5, \"outo\": 28, \"outp\": 53, \"outr\": 79, \"outs\": 446, \"outt\": 2, \"outv\": 2, \"outw\": 111, \"outy\": 1, \"ouz\": 1, \"ova\": 17, \"oval\": 2, \"ovat\": 2, \"ove\": 4940, \"oven\": 2, \"over\": 1115, \"ovi\": 1, \"ovif\": 1, \"ovu\": 1, \"owe\": 86, \"owen\": 1, \"owes\": 5, \"owet\": 1, \"owi\": 39, \"owin\": 39, \"owl\": 16, \"owli\": 1, \"own\": 314, \"owne\": 221, \"owni\": 13, \"owns\": 1, \"oxa\": 3, \"oxal\": 3, \"oxc\": 2, \"oxca\": 2, \"oxe\": 126, \"oxen\": 1, \"oxey\": 1, \"oxf\": 43, \"oxfo\": 43, \"oxi\": 29, \"oxid\": 29, \"oxn\": 1, \"oxna\": 1, \"oxu\": 1, \"oxy\": 53, \"oxyg\": 49, \"oxyh\": 1, \"oxyt\": 3, \"oya\": 5, \"oyab\": 4, \"oyaj\": 1, \"oys\": 26, \"oyst\": 26, \"oza\": 6, \"ozag\": 5, \"ozar\": 1, \"oze\": 2, \"ozi\": 2, \"ozia\": 2, \"ozn\": 2, \"ozni\": 1, \"ozo\": 5, \"ozon\": 4, \"ozz\": 1, \"ozzi\": 1, \"paa\": 1, \"paar\": 1, \"pab\": 4, \"pabl\": 1, \"pabo\": 1, \"pac\": 487, \"pace\": 54, \"pach\": 4, \"paci\": 130, \"pack\": 146, \"paco\": 1, \"pact\": 2, \"pad\": 74, \"pada\": 12, \"padd\": 41, \"padl\": 6, \"pado\": 3, \"padr\": 4, \"pae\": 5, \"paea\": 4, \"paes\": 1, \"pag\": 238, \"paga\": 43, \"page\": 77, \"pagi\": 7, \"pagl\": 1, \"pagn\": 3, \"pago\": 7, \"pah\": 6, \"paha\": 6, \"pai\": 1353, \"paid\": 2, \"pail\": 7, \"pain\": 564, \"paio\": 1, \"pair\": 38, \"paj\": 4, \"paja\": 4, \"pak\": 10, \"paki\": 10, \"pal\": 937, \"pala\": 215, \"pale\": 42, \"palf\": 35, \"pali\": 17, \"pall\": 50, \"palm\": 205, \"palo\": 2, \"palp\": 29, \"pals\": 23, \"palt\": 9, \"pam\": 52, \"pama\": 1, \"pame\": 20, \"pami\": 1, \"paml\": 1, \"pamp\": 28, \"pan\": 422, \"pana\": 15, \"panc\": 6, \"pand\": 14, \"pane\": 113, \"pang\": 25, \"panh\": 1, \"pani\": 43, \"panj\": 1, \"pank\": 2, \"pann\": 11, \"pano\": 15, \"pans\": 20, \"pant\": 114, \"pany\": 2, \"panz\": 3, \"pao\": 3, \"pap\": 595, \"papa\": 28, \"pape\": 414, \"paph\": 2, \"papi\": 6, \"papp\": 3, \"papr\": 2, \"paq\": 1, \"paqu\": 1, \"par\": 5753, \"para\": 546, \"parb\": 3, \"parc\": 97, \"pard\": 155, \"pare\": 237, \"pari\": 154, \"park\": 184, \"parl\": 150, \"parm\": 8, \"parn\": 2, \"paro\": 40, \"parq\": 1, \"parr\": 32, \"pars\": 74, \"part\": 2515, \"paru\": 1, \"parv\": 4, \"pas\": 3726, \"pasa\": 8, \"pasc\": 7, \"pasd\": 1, \"pase\": 3, \"pash\": 17, \"pasl\": 2, \"pass\": 1672, \"past\": 218, \"pat\": 1433, \"pata\": 10, \"patc\": 117, \"pate\": 94, \"path\": 176, \"pati\": 387, \"patm\": 2, \"patr\": 195, \"pats\": 1, \"patt\": 280, \"pau\": 607, \"pauc\": 1, \"paul\": 36, \"paum\": 20, \"paun\": 6, \"paup\": 3, \"paur\": 1, \"paus\": 253, \"pav\": 105, \"pave\": 76, \"pavi\": 21, \"pavl\": 4, \"paw\": 40, \"pawc\": 2, \"pawe\": 1, \"pawi\": 2, \"pawn\": 8, \"paws\": 3, \"pawt\": 7, \"pax\": 11, \"paxa\": 1, \"paxo\": 3, \"paxt\": 7, \"pay\": 259, \"paya\": 4, \"payc\": 2, \"payd\": 3, \"paye\": 7, \"payi\": 60, \"paym\": 115, \"payn\": 21, \"payo\": 1, \"payr\": 16, \"pays\": 1, \"pea\": 1225, \"peab\": 3, \"peac\": 971, \"peak\": 42, \"peal\": 19, \"pean\": 11, \"pear\": 58, \"peas\": 46, \"peat\": 1, \"peaz\": 1, \"peb\": 15, \"pebb\": 14, \"pebw\": 1, \"pec\": 229, \"peca\": 3, \"pecc\": 3, \"peck\": 13, \"peco\": 2, \"pect\": 5, \"pecu\": 195, \"ped\": 117, \"peda\": 24, \"pedd\": 33, \"pede\": 38, \"pedi\": 11, \"pedl\": 3, \"pedr\": 8, \"pee\": 212, \"peeb\": 1, \"peec\": 11, \"peek\": 3, \"peel\": 25, \"peep\": 26, \"peer\": 101, \"peet\": 1, \"peeu\": 2, \"peev\": 4, \"peew\": 1, \"peg\": 36, \"pega\": 1, \"pegb\": 8, \"pegg\": 21, \"pegl\": 1, \"pei\": 1, \"peip\": 1, \"pek\": 20, \"peka\": 14, \"peke\": 2, \"peki\": 2, \"peko\": 2, \"pel\": 162, \"pela\": 9, \"pele\": 92, \"pelh\": 5, \"peli\": 10, \"pell\": 6, \"pelo\": 4, \"pelt\": 18, \"pelv\": 5, \"pem\": 11, \"pemb\": 10, \"pemm\": 1, \"pen\": 670, \"pena\": 57, \"penc\": 87, \"pend\": 76, \"pene\": 92, \"peng\": 2, \"peni\": 32, \"penk\": 2, \"penm\": 1, \"penn\": 196, \"peno\": 1, \"penr\": 1, \"pens\": 57, \"pent\": 21, \"penu\": 16, \"peo\": 3728, \"peon\": 5, \"peop\": 3717, \"pep\": 46, \"pepi\": 1, \"pepp\": 39, \"pept\": 6, \"peq\": 173, \"pequ\": 173, \"per\": 6571, \"pera\": 42, \"perc\": 502, \"perd\": 19, \"pere\": 42, \"perf\": 1216, \"perg\": 8, \"perh\": 940, \"peri\": 781, \"perj\": 6, \"perk\": 5, \"perl\": 6, \"perm\": 397, \"pern\": 12, \"pero\": 2, \"perp\": 198, \"perq\": 2, \"perr\": 101, \"pers\": 2006, \"pert\": 128, \"peru\": 24, \"perv\": 110, \"pery\": 2, \"pes\": 105, \"pesa\": 2, \"pesc\": 3, \"pess\": 21, \"pest\": 73, \"pet\": 468, \"peta\": 5, \"pete\": 267, \"peth\": 7, \"peti\": 92, \"petr\": 16, \"pets\": 1, \"pett\": 42, \"petu\": 6, \"peu\": 3, \"peug\": 1, \"peul\": 1, \"pew\": 14, \"pewt\": 12, \"pez\": 3, \"pezz\": 3, \"pfa\": 5, \"pfaf\": 4, \"pfe\": 1, \"pfen\": 1, \"pff\": 1, \"pfff\": 1, \"pfo\": 3, \"pfoh\": 3, \"pha\": 603, \"phae\": 1, \"phag\": 1, \"phal\": 17, \"phan\": 62, \"phar\": 417, \"phas\": 105, \"phe\": 99, \"phea\": 9, \"pheb\": 1, \"phed\": 6, \"phel\": 2, \"phen\": 81, \"phi\": 935, \"phia\": 1, \"phic\": 3, \"phid\": 1, \"phil\": 830, \"phin\": 27, \"phip\": 2, \"phl\": 4, \"phle\": 3, \"phlo\": 1, \"pho\": 303, \"phob\": 1, \"phoe\": 14, \"phon\": 115, \"phos\": 32, \"phot\": 121, \"phou\": 5, \"phr\": 136, \"phra\": 113, \"phre\": 20, \"phry\": 3, \"pht\": 1, \"phth\": 1, \"phu\": 5, \"phur\": 2, \"phuv\": 1, \"phy\": 332, \"phyf\": 4, \"phyg\": 1, \"phyl\": 4, \"phys\": 323, \"pia\": 132, \"pian\": 105, \"pias\": 1, \"piaz\": 26, \"pib\": 1, \"pibe\": 1, \"pic\": 832, \"pica\": 18, \"picc\": 12, \"pick\": 247, \"picn\": 25, \"pico\": 1, \"pict\": 423, \"pid\": 6, \"pidd\": 4, \"pidg\": 2, \"pie\": 923, \"pieb\": 2, \"piec\": 574, \"pied\": 163, \"piep\": 14, \"pier\": 120, \"piet\": 19, \"piez\": 4, \"pig\": 111, \"pigd\": 2, \"pige\": 75, \"pigg\": 3, \"pigi\": 1, \"pigl\": 1, \"pigm\": 15, \"pigp\": 1, \"pigs\": 1, \"pih\": 4, \"piha\": 4, \"pik\": 60, \"pike\": 4, \"pil\": 584, \"pila\": 61, \"pild\": 1, \"pile\": 55, \"pilf\": 5, \"pilg\": 49, \"pili\": 8, \"pill\": 226, \"pilo\": 109, \"pilt\": 1, \"pim\": 20, \"pime\": 7, \"pimp\": 10, \"pin\": 377, \"pina\": 3, \"pinb\": 1, \"pinc\": 45, \"pind\": 12, \"pine\": 43, \"ping\": 2, \"pinh\": 2, \"pini\": 15, \"pink\": 10, \"pinm\": 1, \"pinn\": 27, \"pino\": 3, \"pinp\": 8, \"pins\": 2, \"pint\": 2, \"pio\": 127, \"pion\": 93, \"piou\": 34, \"pip\": 175, \"pipe\": 50, \"pipg\": 1, \"pipi\": 13, \"pipp\": 1, \"piq\": 11, \"piqu\": 11, \"pir\": 62, \"pira\": 54, \"piri\": 1, \"piro\": 7, \"pis\": 107, \"pisc\": 2, \"pisg\": 5, \"pisi\": 2, \"pism\": 1, \"piso\": 1, \"pisp\": 1, \"piss\": 6, \"pist\": 86, \"pit\": 655, \"pitc\": 293, \"pite\": 9, \"pitf\": 6, \"pith\": 4, \"piti\": 76, \"pitn\": 1, \"pitr\": 3, \"pitt\": 53, \"pitu\": 11, \"pity\": 12, \"piu\": 2, \"piv\": 12, \"pivo\": 12, \"piw\": 1, \"piwe\": 1, \"pix\": 1, \"pixi\": 1, \"piz\": 6, \"piza\": 2, \"pizz\": 4, \"pla\": 6872, \"plac\": 2954, \"plag\": 179, \"plai\": 555, \"plan\": 1243, \"plaq\": 6, \"plas\": 133, \"plat\": 300, \"plau\": 14, \"play\": 677, \"plaz\": 5, \"ple\": 2042, \"plea\": 1735, \"pleb\": 9, \"pled\": 55, \"plee\": 1, \"plei\": 6, \"plen\": 198, \"plet\": 1, \"pleu\": 12, \"plex\": 1, \"pli\": 44, \"plia\": 6, \"plie\": 5, \"plig\": 22, \"plin\": 10, \"plit\": 1, \"plo\": 220, \"plod\": 11, \"plop\": 1, \"plot\": 38, \"plou\": 59, \"plow\": 43, \"plu\": 492, \"pluc\": 117, \"plug\": 9, \"plum\": 128, \"plun\": 96, \"plur\": 4, \"plus\": 6, \"plut\": 5, \"ply\": 39, \"plyi\": 4, \"plym\": 25, \"plyw\": 9, \"pne\": 3, \"pneu\": 3, \"poa\": 3, \"poac\": 2, \"poar\": 1, \"pob\": 2, \"pobo\": 2, \"poc\": 238, \"poca\": 3, \"poch\": 2, \"pock\": 230, \"poco\": 1, \"pod\": 31, \"podg\": 24, \"podi\": 1, \"podo\": 1, \"poe\": 698, \"poem\": 159, \"poep\": 1, \"poes\": 3, \"poet\": 266, \"pog\": 3, \"pogr\": 2, \"pogu\": 1, \"poh\": 16, \"pohl\": 5, \"poi\": 1393, \"poig\": 13, \"poin\": 1238, \"poir\": 2, \"pois\": 127, \"poit\": 5, \"pok\": 45, \"poke\": 26, \"poki\": 7, \"pol\": 1865, \"pola\": 93, \"pold\": 1, \"pole\": 35, \"poli\": 1343, \"polk\": 3, \"poll\": 118, \"polo\": 60, \"polt\": 6, \"poly\": 110, \"pom\": 146, \"poma\": 2, \"pome\": 34, \"pomf\": 44, \"pomh\": 2, \"pomm\": 8, \"pomo\": 3, \"pomp\": 31, \"pon\": 260, \"ponc\": 8, \"pond\": 96, \"pone\": 1, \"poni\": 14, \"ponk\": 1, \"pono\": 1, \"pont\": 37, \"pony\": 1, \"poo\": 1388, \"pooc\": 1, \"pood\": 2, \"pooh\": 4, \"pool\": 53, \"poor\": 80, \"pop\": 495, \"popa\": 1, \"pope\": 4, \"popi\": 7, \"popl\": 8, \"popp\": 22, \"pops\": 1, \"popu\": 382, \"por\": 855, \"pora\": 1, \"porc\": 107, \"pore\": 6, \"porg\": 1, \"pori\": 7, \"pork\": 3, \"porn\": 6, \"poro\": 16, \"porp\": 34, \"porr\": 3, \"port\": 557, \"poru\": 2, \"pos\": 2605, \"pose\": 13, \"posh\": 1, \"posi\": 517, \"poss\": 1559, \"post\": 317, \"pot\": 353, \"pota\": 51, \"potb\": 3, \"pote\": 154, \"poth\": 1, \"poti\": 8, \"potl\": 2, \"poto\": 8, \"potp\": 1, \"pots\": 7, \"pott\": 69, \"pou\": 569, \"pouc\": 7, \"poug\": 2, \"poui\": 1, \"poul\": 24, \"poun\": 203, \"poup\": 1, \"pour\": 210, \"pous\": 4, \"pout\": 2, \"pov\": 63, \"pove\": 63, \"pow\": 1417, \"powd\": 84, \"powe\": 1319, \"powi\": 1, \"powo\": 1, \"powr\": 11, \"poy\": 21, \"poyn\": 5, \"poys\": 16, \"poz\": 5, \"pozz\": 5, \"pra\": 2072, \"prab\": 2, \"prac\": 463, \"prad\": 1, \"prae\": 3, \"prag\": 10, \"prai\": 610, \"pram\": 1, \"pran\": 14, \"prat\": 36, \"prav\": 1, \"praw\": 1, \"pray\": 389, \"pre\": 7401, \"prea\": 251, \"preb\": 1, \"prec\": 707, \"pred\": 162, \"pree\": 13, \"pref\": 237, \"preg\": 22, \"preh\": 9, \"prei\": 2, \"prej\": 60, \"prel\": 66, \"prem\": 117, \"pren\": 9, \"preo\": 25, \"prep\": 708, \"prer\": 7, \"pres\": 3480, \"pret\": 611, \"preu\": 10, \"prev\": 768, \"prew\": 8, \"prex\": 1, \"prey\": 2, \"pri\": 4316, \"pria\": 7, \"pric\": 374, \"prid\": 223, \"prie\": 1213, \"prig\": 1, \"prim\": 361, \"prin\": 1171, \"prio\": 87, \"prip\": 1, \"pris\": 312, \"priu\": 6, \"priv\": 436, \"priz\": 116, \"pro\": 11921, \"proa\": 1, \"prob\": 1201, \"proc\": 1104, \"prod\": 927, \"proe\": 2, \"prof\": 993, \"prog\": 815, \"proh\": 41, \"proi\": 1, \"proj\": 305, \"prok\": 39, \"prol\": 105, \"prom\": 930, \"pron\": 176, \"proo\": 169, \"prop\": 2121, \"pror\": 1, \"pros\": 478, \"prot\": 503, \"prou\": 309, \"prov\": 1578, \"prow\": 38, \"prox\": 21, \"proy\": 1, \"pru\": 141, \"prud\": 108, \"prun\": 17, \"prur\": 3, \"prus\": 12, \"prut\": 1, \"pry\": 22, \"prye\": 1, \"pryi\": 7, \"pryn\": 2, \"pryt\": 12, \"psa\": 67, \"psal\": 67, \"pse\": 28, \"pseu\": 28, \"psh\": 8, \"psha\": 8, \"psi\": 5, \"psit\": 5, \"psy\": 163, \"psyc\": 160, \"psyl\": 3, \"pte\": 1, \"pter\": 1, \"pto\": 29, \"ptol\": 29, \"pua\": 4, \"pual\": 1, \"pub\": 999, \"pube\": 4, \"publ\": 994, \"puc\": 7, \"pucc\": 3, \"puck\": 4, \"pud\": 28, \"pudd\": 27, \"pude\": 1, \"pue\": 26, \"puer\": 26, \"puf\": 57, \"puff\": 37, \"pug\": 15, \"puge\": 2, \"pugi\": 3, \"pugn\": 4, \"puh\": 1, \"puhi\": 1, \"pui\": 9, \"puis\": 9, \"puk\": 1, \"pul\": 619, \"pula\": 1, \"puli\": 3, \"pull\": 289, \"pulm\": 28, \"pulo\": 2, \"pulp\": 34, \"puls\": 74, \"pult\": 1, \"pulv\": 7, \"pum\": 68, \"pumb\": 3, \"pumm\": 1, \"pump\": 43, \"pun\": 295, \"punc\": 72, \"pund\": 2, \"pung\": 6, \"puni\": 192, \"punk\": 1, \"punn\": 2, \"puno\": 2, \"puns\": 1, \"punt\": 1, \"pup\": 108, \"pupa\": 2, \"pupe\": 3, \"pupi\": 62, \"pupp\": 39, \"pur\": 1848, \"purc\": 172, \"purd\": 13, \"pure\": 63, \"purg\": 62, \"puri\": 118, \"purl\": 9, \"purp\": 696, \"purr\": 5, \"purs\": 457, \"purt\": 3, \"puru\": 1, \"purv\": 7, \"pus\": 272, \"push\": 184, \"pusi\": 1, \"puss\": 7, \"put\": 299, \"puta\": 2, \"pute\": 1, \"puti\": 1, \"putn\": 21, \"puto\": 1, \"putr\": 7, \"putt\": 204, \"putu\": 1, \"puz\": 127, \"puze\": 1, \"puzz\": 126, \"pye\": 1, \"pyg\": 2, \"pyga\": 1, \"pygm\": 1, \"pyh\": 1, \"pyhr\": 1, \"pyj\": 3, \"pyja\": 3, \"pyk\": 2, \"pykn\": 2, \"pyn\": 1, \"pynt\": 1, \"pyo\": 2, \"pyoc\": 1, \"pyor\": 1, \"pyr\": 50, \"pyra\": 24, \"pyre\": 7, \"pyro\": 6, \"pyrr\": 12, \"pys\": 1, \"pysc\": 1, \"pyt\": 20, \"pyth\": 20, \"qua\": 1049, \"quac\": 26, \"quad\": 45, \"quaf\": 3, \"quag\": 1, \"quah\": 2, \"quai\": 38, \"quak\": 56, \"qual\": 314, \"quan\": 97, \"quar\": 428, \"quas\": 11, \"quat\": 4, \"quav\": 5, \"quay\": 17, \"que\": 1706, \"quea\": 1, \"queb\": 5, \"quee\": 687, \"quel\": 18, \"quem\": 2, \"quen\": 40, \"quer\": 21, \"ques\": 929, \"quet\": 2, \"queu\": 1, \"qui\": 2683, \"quib\": 3, \"quic\": 557, \"quid\": 1, \"quie\": 437, \"quil\": 25, \"quin\": 69, \"quip\": 1, \"quir\": 21, \"quit\": 1438, \"quiv\": 44, \"quix\": 7, \"quiz\": 9, \"quo\": 96, \"quog\": 1, \"quoh\": 10, \"quoi\": 4, \"quot\": 78, \"quy\": 2, \"quyn\": 2, \"raa\": 8, \"raam\": 7, \"rab\": 182, \"raba\": 2, \"rabb\": 149, \"rabe\": 3, \"rabi\": 5, \"rabm\": 2, \"rabs\": 19, \"rac\": 551, \"racc\": 2, \"race\": 73, \"rach\": 101, \"raci\": 62, \"rack\": 30, \"racq\": 1, \"rad\": 481, \"rada\": 27, \"radd\": 1, \"rade\": 1, \"radh\": 1, \"radi\": 428, \"radn\": 22, \"rae\": 1, \"raes\": 1, \"raf\": 25, \"rafa\": 1, \"rafe\": 1, \"raff\": 1, \"raft\": 16, \"rag\": 226, \"raga\": 2, \"rage\": 20, \"ragg\": 39, \"ragi\": 27, \"ragu\": 1, \"rah\": 12, \"raha\": 11, \"rahe\": 1, \"rai\": 1227, \"raid\": 10, \"rail\": 174, \"raim\": 62, \"rain\": 82, \"rais\": 574, \"raj\": 1, \"raja\": 1, \"rak\": 41, \"rake\": 13, \"raki\": 5, \"rakk\": 2, \"ral\": 58, \"rale\": 1, \"rall\": 29, \"ralp\": 23, \"ram\": 283, \"rama\": 54, \"ramb\": 21, \"rame\": 41, \"rami\": 12, \"ramm\": 10, \"ramo\": 27, \"ramp\": 22, \"rams\": 12, \"ran\": 822, \"rana\": 1, \"ranc\": 64, \"rand\": 168, \"rang\": 290, \"rank\": 109, \"rans\": 43, \"rant\": 2, \"rany\": 1, \"rao\": 2, \"raou\": 2, \"rap\": 391, \"rapa\": 4, \"rape\": 3, \"raph\": 20, \"rapi\": 261, \"rapo\": 1, \"rapp\": 29, \"raps\": 2, \"rapt\": 38, \"rapu\": 1, \"rar\": 139, \"rare\": 51, \"rari\": 7, \"rarm\": 1, \"ras\": 91, \"rasc\": 19, \"rase\": 1, \"rash\": 8, \"raso\": 1, \"rasp\": 38, \"rast\": 1, \"rat\": 1962, \"rata\": 2, \"ratc\": 29, \"rate\": 120, \"ratf\": 1, \"rath\": 1186, \"rati\": 232, \"ratl\": 1, \"rato\": 4, \"ratt\": 73, \"rau\": 21, \"rauc\": 10, \"raue\": 7, \"raui\": 2, \"raus\": 2, \"rav\": 75, \"rava\": 2, \"rave\": 40, \"ravi\": 32, \"raw\": 14, \"rawb\": 1, \"rawh\": 1, \"rawl\": 7, \"rawn\": 1, \"raws\": 3, \"ray\": 97, \"rayb\": 48, \"raye\": 2, \"raym\": 19, \"rayn\": 2, \"rayt\": 1, \"raz\": 37, \"raze\": 1, \"razi\": 1, \"razo\": 34, \"rbe\": 1, \"rber\": 1, \"rbi\": 1, \"rca\": 2, \"rcap\": 1, \"rcav\": 1, \"rdw\": 4, \"rea\": 5991, \"reac\": 914, \"read\": 1121, \"reaf\": 9, \"reag\": 4, \"reai\": 4, \"reak\": 1, \"real\": 1480, \"ream\": 7, \"rean\": 2, \"reap\": 76, \"rear\": 53, \"reas\": 1156, \"reav\": 3, \"reaw\": 1, \"reb\": 423, \"rebe\": 262, \"rebi\": 3, \"rebo\": 14, \"rebu\": 137, \"rec\": 3845, \"reca\": 151, \"rece\": 1607, \"rech\": 24, \"reci\": 83, \"reck\": 138, \"recl\": 37, \"reco\": 1582, \"recr\": 111, \"rect\": 63, \"recu\": 47, \"red\": 660, \"reda\": 4, \"redb\": 6, \"redc\": 17, \"redd\": 53, \"rede\": 220, \"redf\": 1, \"redh\": 14, \"redi\": 9, \"redl\": 4, \"redn\": 7, \"redo\": 22, \"redp\": 1, \"redr\": 14, \"reds\": 2, \"redt\": 4, \"redu\": 265, \"redv\": 1, \"redw\": 10, \"ree\": 186, \"reec\": 2, \"reed\": 21, \"reef\": 7, \"reek\": 5, \"reel\": 31, \"reem\": 4, \"reen\": 14, \"rees\": 6, \"reev\": 6, \"reex\": 10, \"ref\": 1393, \"refa\": 1, \"refe\": 273, \"refi\": 62, \"refl\": 322, \"refo\": 88, \"refr\": 181, \"refu\": 466, \"reg\": 1472, \"rega\": 587, \"rege\": 18, \"regi\": 416, \"regr\": 144, \"regu\": 306, \"reh\": 135, \"reha\": 33, \"rehe\": 30, \"reho\": 64, \"rehu\": 8, \"rei\": 594, \"reic\": 9, \"reif\": 1, \"reig\": 453, \"reil\": 2, \"reim\": 10, \"rein\": 87, \"reio\": 4, \"reip\": 1, \"reis\": 3, \"reit\": 16, \"rej\": 526, \"reje\": 135, \"rejo\": 391, \"rek\": 6, \"reke\": 5, \"reki\": 1, \"rel\": 1917, \"rela\": 847, \"rele\": 208, \"reli\": 778, \"rell\": 3, \"relo\": 3, \"relu\": 52, \"rely\": 9, \"rem\": 3232, \"rema\": 1299, \"remb\": 4, \"reme\": 990, \"remi\": 182, \"remm\": 2, \"remn\": 103, \"remo\": 642, \"remp\": 2, \"remu\": 6, \"ren\": 611, \"rena\": 54, \"renc\": 3, \"rend\": 180, \"rene\": 109, \"renf\": 2, \"reni\": 2, \"renn\": 1, \"reno\": 55, \"rens\": 4, \"rent\": 42, \"renu\": 2, \"renv\": 1, \"reo\": 42, \"reop\": 3, \"reor\": 39, \"rep\": 3381, \"repa\": 193, \"repe\": 664, \"reph\": 21, \"repi\": 5, \"repl\": 862, \"repn\": 2, \"repo\": 634, \"repr\": 709, \"rept\": 8, \"repu\": 280, \"req\": 828, \"requ\": 828, \"rer\": 14, \"rere\": 11, \"reru\": 3, \"res\": 5249, \"resa\": 2, \"resc\": 61, \"rese\": 595, \"resh\": 5, \"resi\": 500, \"resn\": 2, \"reso\": 599, \"resp\": 1093, \"rest\": 690, \"resu\": 784, \"ret\": 2070, \"reta\": 175, \"retc\": 2, \"rete\": 23, \"reth\": 3, \"reti\": 183, \"reto\": 26, \"retr\": 163, \"retu\": 1493, \"rety\": 2, \"reu\": 172, \"reub\": 94, \"reue\": 43, \"reui\": 2, \"reum\": 1, \"reun\": 18, \"reuo\": 4, \"reup\": 1, \"reus\": 2, \"reut\": 1, \"reuv\": 5, \"rev\": 1221, \"reva\": 5, \"reve\": 659, \"revi\": 250, \"revo\": 291, \"revr\": 1, \"revu\": 12, \"revv\": 1, \"rew\": 200, \"rewa\": 191, \"rewr\": 8, \"rex\": 7, \"rexr\": 7, \"rey\": 24, \"reyd\": 1, \"reye\": 1, \"reyn\": 22, \"rez\": 15, \"reze\": 2, \"rezi\": 12, \"rezo\": 1, \"rhe\": 48, \"rhea\": 1, \"rheg\": 1, \"rhei\": 3, \"rhen\": 5, \"rhes\": 1, \"rhet\": 11, \"rheu\": 23, \"rhew\": 1, \"rhi\": 22, \"rhin\": 22, \"rho\": 129, \"rhod\": 127, \"rhom\": 1, \"rhon\": 1, \"rhu\": 3, \"rhub\": 3, \"rhy\": 87, \"rhym\": 27, \"rhyt\": 60, \"ria\": 3, \"rib\": 125, \"riba\": 10, \"ribb\": 55, \"ribc\": 1, \"ribe\": 2, \"ribl\": 11, \"ribo\": 2, \"ric\": 734, \"rica\": 3, \"ricc\": 5, \"rich\": 335, \"rick\": 12, \"rico\": 1, \"rid\": 466, \"ridd\": 65, \"ride\": 85, \"ridg\": 54, \"ridi\": 147, \"ridp\": 1, \"rie\": 7, \"rief\": 3, \"rieg\": 2, \"riem\": 1, \"ries\": 1, \"rif\": 136, \"riff\": 1, \"rifl\": 131, \"rift\": 1, \"rig\": 2688, \"riga\": 1, \"rigg\": 59, \"righ\": 2530, \"rigi\": 68, \"rigo\": 25, \"rigv\": 1, \"ril\": 7, \"rilk\": 1, \"rill\": 5, \"rim\": 37, \"rima\": 3, \"rimb\": 1, \"rimf\": 6, \"rimi\": 1, \"riml\": 1, \"rimm\": 20, \"rin\": 324, \"rina\": 4, \"ring\": 143, \"rink\": 2, \"rinn\": 1, \"rins\": 14, \"rio\": 41, \"riot\": 25, \"rip\": 144, \"ripe\": 24, \"riph\": 2, \"ripl\": 1, \"ripo\": 5, \"ripp\": 52, \"ripr\": 2, \"ris\": 929, \"rise\": 173, \"risi\": 253, \"risk\": 24, \"riss\": 2, \"rit\": 88, \"ritc\": 4, \"rite\": 23, \"rith\": 2, \"riti\": 1, \"rits\": 1, \"ritt\": 11, \"ritu\": 36, \"riu\": 3, \"riua\": 1, \"riue\": 2, \"riv\": 746, \"riva\": 62, \"rive\": 674, \"rivi\": 2, \"rivu\": 8, \"riz\": 5, \"rizp\": 4, \"rizz\": 1, \"roa\": 805, \"roac\": 2, \"road\": 120, \"roam\": 16, \"roan\": 4, \"roar\": 126, \"roas\": 43, \"roat\": 1, \"rob\": 549, \"roba\": 9, \"robb\": 122, \"robe\": 281, \"robi\": 60, \"robo\": 7, \"robu\": 20, \"roc\": 544, \"rocc\": 2, \"roch\": 7, \"rock\": 273, \"roco\": 7, \"rod\": 139, \"rodd\": 7, \"rode\": 9, \"rodg\": 7, \"rodm\": 1, \"rodn\": 6, \"roe\": 20, \"roeb\": 14, \"roem\": 1, \"rog\": 46, \"roge\": 27, \"rogu\": 19, \"roh\": 1, \"rohg\": 1, \"roi\": 3, \"roil\": 3, \"rok\": 4, \"roko\": 3, \"rol\": 629, \"rola\": 8, \"role\": 54, \"roll\": 328, \"roln\": 1, \"rolo\": 1, \"rom\": 467, \"roma\": 299, \"rome\": 10, \"romi\": 3, \"romm\": 1, \"romp\": 2, \"romu\": 1, \"ron\": 19, \"rona\": 5, \"ronc\": 1, \"rond\": 6, \"ronn\": 6, \"rony\": 1, \"roo\": 1742, \"rood\": 2, \"roof\": 33, \"rook\": 23, \"room\": 158, \"roon\": 7, \"roos\": 43, \"root\": 132, \"rop\": 143, \"rope\": 47, \"roq\": 2, \"roqu\": 2, \"ror\": 91, \"rors\": 1, \"ros\": 824, \"rosa\": 58, \"rosb\": 4, \"rose\": 103, \"rosi\": 61, \"rosl\": 1, \"ross\": 73, \"rost\": 7, \"rosw\": 1, \"rosy\": 1, \"rot\": 106, \"rota\": 49, \"rote\": 2, \"roth\": 1, \"roto\": 8, \"rott\": 31, \"rotu\": 9, \"rou\": 1766, \"roub\": 1, \"rouc\": 1, \"roug\": 170, \"roul\": 5, \"roun\": 1338, \"rour\": 22, \"rous\": 95, \"rout\": 110, \"rov\": 23, \"rove\": 10, \"rovi\": 9, \"row\": 105, \"rowd\": 7, \"rowe\": 11, \"rowi\": 12, \"rowl\": 4, \"rows\": 3, \"rowz\": 1, \"rox\": 1, \"roy\": 190, \"roya\": 185, \"royc\": 3, \"royl\": 1, \"roz\": 3, \"roze\": 2, \"rozo\": 1, \"rst\": 6, \"rsta\": 6, \"rua\": 9, \"ruan\": 8, \"ruar\": 1, \"rub\": 140, \"ruba\": 1, \"rubb\": 112, \"rubd\": 1, \"rube\": 1, \"rubi\": 14, \"rubr\": 1, \"ruc\": 2, \"ruce\": 1, \"ruck\": 1, \"rud\": 129, \"rudd\": 24, \"rude\": 15, \"rudi\": 9, \"rudk\": 1, \"rudo\": 4, \"rudy\": 1, \"rue\": 14, \"ruef\": 12, \"ruf\": 22, \"ruff\": 19, \"rufu\": 3, \"rug\": 46, \"ruge\": 5, \"rugg\": 36, \"ruh\": 1, \"ruha\": 1, \"rui\": 166, \"ruid\": 1, \"ruin\": 89, \"rul\": 630, \"rule\": 370, \"ruli\": 38, \"rum\": 122, \"ruma\": 4, \"rumb\": 27, \"rumd\": 2, \"rume\": 2, \"rumf\": 2, \"rumi\": 10, \"rumm\": 9, \"rumo\": 50, \"rump\": 7, \"rums\": 1, \"rumt\": 1, \"run\": 497, \"runa\": 15, \"runc\": 1, \"rund\": 7, \"rune\": 1, \"runi\": 1, \"runn\": 350, \"runo\": 3, \"runs\": 1, \"runu\": 3, \"runw\": 8, \"runy\": 4, \"rup\": 32, \"rupe\": 20, \"rupp\": 1, \"rupt\": 11, \"rur\": 68, \"rura\": 68, \"rus\": 779, \"rush\": 161, \"rusk\": 4, \"rusl\": 1, \"russ\": 387, \"rust\": 90, \"rut\": 77, \"ruta\": 2, \"ruth\": 33, \"ruts\": 1, \"rutt\": 1, \"ruy\": 1, \"ruys\": 1, \"rwa\": 1, \"rwar\": 1, \"rya\": 34, \"ryc\": 1, \"rych\": 1, \"ryd\": 1, \"ryde\": 1, \"rye\": 1, \"ryer\": 1, \"ryl\": 1, \"ryli\": 1, \"ryn\": 1, \"ryr\": 1, \"ryra\": 1, \"ryu\": 8, \"ryus\": 8, \"saa\": 10, \"saab\": 1, \"saad\": 1, \"saam\": 8, \"sab\": 240, \"saba\": 4, \"sabb\": 186, \"sabe\": 13, \"sabi\": 7, \"sabl\": 13, \"sabo\": 4, \"sabr\": 8, \"sabt\": 4, \"sac\": 676, \"saca\": 2, \"sacc\": 1, \"sace\": 1, \"sach\": 6, \"sack\": 69, \"sacr\": 568, \"sad\": 179, \"sadd\": 96, \"sadi\": 11, \"sadl\": 52, \"sadn\": 17, \"sado\": 3, \"saf\": 496, \"safa\": 3, \"safe\": 257, \"saff\": 13, \"saft\": 1, \"sag\": 65, \"saga\": 21, \"sage\": 5, \"sagg\": 12, \"sagi\": 4, \"sagu\": 1, \"sah\": 4, \"saha\": 3, \"sahj\": 1, \"sai\": 13508, \"said\": 31, \"saie\": 3, \"saig\": 1, \"sail\": 450, \"sain\": 179, \"sait\": 1271, \"sak\": 395, \"sake\": 43, \"sako\": 1, \"sal\": 1130, \"sala\": 107, \"salc\": 4, \"sale\": 208, \"salf\": 2, \"sali\": 57, \"sall\": 57, \"salm\": 20, \"salo\": 34, \"salp\": 1, \"sals\": 1, \"salt\": 53, \"salu\": 134, \"salv\": 224, \"saly\": 3, \"sam\": 2523, \"sama\": 136, \"samb\": 2, \"same\": 6, \"samg\": 1, \"saml\": 4, \"samm\": 46, \"samo\": 12, \"samp\": 123, \"sams\": 43, \"samu\": 192, \"san\": 968, \"sana\": 3, \"sanb\": 10, \"sanc\": 354, \"sand\": 145, \"sane\": 10, \"sanf\": 2, \"sang\": 28, \"sanh\": 1, \"sani\": 42, \"sans\": 15, \"sant\": 46, \"sap\": 40, \"saph\": 1, \"sapi\": 7, \"sapl\": 5, \"sapo\": 1, \"sapp\": 24, \"sar\": 201, \"sara\": 119, \"sarc\": 15, \"sard\": 24, \"sare\": 1, \"sarg\": 6, \"sari\": 2, \"sarj\": 1, \"sark\": 2, \"sarm\": 2, \"saro\": 1, \"sarp\": 2, \"sarr\": 1, \"sars\": 2, \"sart\": 10, \"saru\": 2, \"sarv\": 2, \"sas\": 24, \"sash\": 8, \"sask\": 1, \"sass\": 4, \"sat\": 915, \"sata\": 149, \"satc\": 1, \"sate\": 23, \"sati\": 614, \"satt\": 1, \"satu\": 119, \"saty\": 4, \"sau\": 552, \"saua\": 4, \"sauc\": 49, \"saud\": 4, \"saue\": 4, \"saui\": 2, \"sauk\": 1, \"saum\": 2, \"saun\": 20, \"sauo\": 1, \"saur\": 1, \"saus\": 12, \"saut\": 3, \"sauv\": 1, \"sav\": 1044, \"sava\": 173, \"save\": 231, \"savi\": 139, \"savo\": 100, \"savv\": 1, \"saw\": 70, \"sawa\": 2, \"sawc\": 8, \"sawd\": 3, \"sawe\": 34, \"sawh\": 1, \"sawi\": 3, \"sawm\": 1, \"sawn\": 1, \"sawt\": 3, \"sawy\": 2, \"sax\": 38, \"saxo\": 35, \"saxt\": 3, \"say\": 2390, \"saye\": 65, \"sayi\": 1900, \"sayl\": 5, \"sayo\": 1, \"says\": 4, \"sba\": 2, \"sbc\": 1, \"sbcs\": 1, \"sbi\": 3, \"sbir\": 3, \"sca\": 1015, \"scab\": 19, \"scae\": 4, \"scaf\": 24, \"scai\": 1, \"scal\": 177, \"scam\": 13, \"scan\": 93, \"scap\": 28, \"scar\": 394, \"scat\": 218, \"scav\": 2, \"sce\": 485, \"sced\": 1, \"scen\": 417, \"scep\": 66, \"scev\": 1, \"sch\": 1466, \"scha\": 23, \"sche\": 245, \"schi\": 22, \"schl\": 7, \"schm\": 7, \"schn\": 16, \"scho\": 1085, \"schr\": 6, \"schu\": 25, \"schw\": 30, \"sci\": 473, \"scia\": 4, \"scie\": 446, \"scil\": 2, \"scim\": 5, \"scin\": 1, \"scio\": 2, \"scip\": 1, \"scis\": 12, \"scl\": 3, \"scle\": 3, \"sco\": 747, \"scob\": 1, \"scoe\": 6, \"scof\": 9, \"scol\": 33, \"scon\": 5, \"scoo\": 25, \"scop\": 37, \"scor\": 339, \"scot\": 196, \"scou\": 79, \"scow\": 15, \"scr\": 914, \"scra\": 208, \"scre\": 328, \"scri\": 239, \"scro\": 15, \"scru\": 124, \"scu\": 129, \"scud\": 2, \"scuf\": 5, \"scul\": 57, \"scum\": 1, \"scup\": 2, \"scur\": 12, \"scus\": 1, \"scut\": 32, \"scy\": 7, \"scyl\": 1, \"scyt\": 6, \"sde\": 1, \"sdei\": 1, \"sea\": 1593, \"seab\": 10, \"seac\": 3, \"sead\": 1, \"seaf\": 9, \"seag\": 3, \"seah\": 2, \"seal\": 101, \"seam\": 91, \"sean\": 1, \"seao\": 1, \"seap\": 6, \"seaq\": 3, \"sear\": 268, \"seas\": 334, \"seat\": 192, \"seau\": 1, \"seav\": 1, \"seaw\": 22, \"seb\": 15, \"seba\": 10, \"sebo\": 1, \"sec\": 2557, \"seca\": 29, \"secc\": 3, \"sece\": 21, \"sech\": 2, \"secl\": 26, \"seco\": 984, \"secr\": 803, \"sect\": 292, \"secu\": 385, \"sed\": 93, \"seda\": 14, \"sede\": 7, \"sedg\": 6, \"sedi\": 20, \"sedu\": 46, \"see\": 5701, \"seeb\": 5, \"seed\": 95, \"seei\": 429, \"seek\": 220, \"seel\": 3, \"seem\": 2103, \"seen\": 34, \"seep\": 6, \"seer\": 10, \"sees\": 58, \"seet\": 79, \"seev\": 1, \"seg\": 66, \"sega\": 1, \"segm\": 25, \"sego\": 4, \"segr\": 31, \"segu\": 5, \"sei\": 236, \"seid\": 2, \"seig\": 6, \"sein\": 4, \"seir\": 1, \"seis\": 16, \"seiz\": 162, \"sel\": 1383, \"sela\": 76, \"selb\": 1, \"seld\": 110, \"sele\": 233, \"self\": 444, \"seli\": 12, \"selk\": 17, \"sell\": 88, \"selm\": 2, \"sels\": 1, \"selu\": 28, \"selv\": 19, \"sem\": 128, \"sema\": 10, \"semb\": 8, \"seme\": 16, \"semi\": 79, \"semm\": 1, \"semp\": 2, \"semr\": 1, \"sen\": 3383, \"sena\": 142, \"senc\": 7, \"send\": 147, \"sene\": 7, \"seni\": 59, \"senn\": 16, \"seno\": 7, \"sens\": 1123, \"sent\": 313, \"senu\": 1, \"seo\": 3, \"seor\": 1, \"seou\": 1, \"sep\": 621, \"sepa\": 406, \"sepe\": 1, \"seph\": 9, \"sepi\": 1, \"sept\": 90, \"sepu\": 76, \"seq\": 65, \"sequ\": 65, \"ser\": 3733, \"sera\": 51, \"serb\": 2, \"sere\": 71, \"serf\": 3, \"serg\": 51, \"seri\": 529, \"serj\": 5, \"serl\": 4, \"serm\": 27, \"sero\": 2, \"serp\": 139, \"serr\": 8, \"seru\": 69, \"serv\": 2740, \"ses\": 116, \"sesa\": 7, \"sess\": 109, \"set\": 711, \"setb\": 6, \"seth\": 3, \"setl\": 1, \"seto\": 1, \"sets\": 1, \"sett\": 593, \"setu\": 12, \"seu\": 16, \"seue\": 15, \"seur\": 1, \"sev\": 1801, \"seve\": 1799, \"sevi\": 2, \"sew\": 84, \"sewa\": 38, \"sewe\": 28, \"sewi\": 17, \"sex\": 116, \"sexc\": 1, \"sexe\": 20, \"sexm\": 1, \"sext\": 19, \"sexu\": 73, \"sey\": 53, \"seyc\": 1, \"seym\": 30, \"seyn\": 2, \"seyt\": 6, \"seyw\": 12, \"sfo\": 2, \"sfor\": 2, \"sha\": 16721, \"shaa\": 10, \"shab\": 41, \"shac\": 10, \"shad\": 550, \"shae\": 8, \"shaf\": 37, \"shag\": 16, \"shah\": 4, \"shak\": 397, \"shal\": 13701, \"sham\": 315, \"shan\": 29, \"shap\": 482, \"shar\": 860, \"shas\": 7, \"shat\": 45, \"shau\": 8, \"shav\": 80, \"shaw\": 30, \"shay\": 33, \"she\": 2469, \"shea\": 143, \"sheb\": 55, \"shec\": 76, \"shed\": 24, \"shee\": 427, \"shef\": 1, \"sheh\": 1, \"shei\": 6, \"shek\": 140, \"shel\": 363, \"shem\": 66, \"shen\": 9, \"shep\": 163, \"sher\": 118, \"shes\": 14, \"shet\": 9, \"shev\": 3, \"shew\": 284, \"shi\": 2186, \"shib\": 5, \"shic\": 1, \"shie\": 121, \"shif\": 135, \"shig\": 1, \"shih\": 3, \"shil\": 131, \"shim\": 88, \"shin\": 263, \"ship\": 409, \"shir\": 95, \"shis\": 9, \"shit\": 36, \"shiv\": 53, \"shiz\": 1, \"shk\": 1, \"shki\": 1, \"shl\": 3, \"shli\": 1, \"shm\": 1, \"shmo\": 1, \"shn\": 2, \"shni\": 1, \"shnu\": 1, \"sho\": 8261, \"shoa\": 24, \"shob\": 19, \"shoc\": 203, \"shod\": 1, \"shoe\": 152, \"shoh\": 1, \"shoj\": 1, \"shol\": 3, \"shom\": 2, \"shon\": 57, \"shoo\": 356, \"shop\": 96, \"shor\": 1189, \"shot\": 51, \"shou\": 4243, \"shov\": 54, \"show\": 812, \"shp\": 2, \"shpi\": 1, \"shpr\": 1, \"shr\": 365, \"shra\": 7, \"shre\": 66, \"shri\": 170, \"shro\": 45, \"shru\": 77, \"sht\": 10, \"shta\": 2, \"shti\": 3, \"shto\": 3, \"shtr\": 1, \"shu\": 576, \"shua\": 7, \"shub\": 3, \"shuc\": 4, \"shud\": 59, \"shuf\": 25, \"shuh\": 8, \"shui\": 1, \"shul\": 6, \"shum\": 1, \"shun\": 43, \"shup\": 5, \"shur\": 7, \"shus\": 21, \"shut\": 91, \"shy\": 23, \"shyl\": 13, \"shyn\": 10, \"sia\": 10, \"siah\": 1, \"siam\": 7, \"sib\": 40, \"sibb\": 8, \"sibe\": 12, \"sibi\": 5, \"sibl\": 3, \"sibm\": 5, \"sibr\": 1, \"siby\": 6, \"sic\": 401, \"sich\": 1, \"sici\": 31, \"sick\": 135, \"sicu\": 1, \"sid\": 1975, \"sidd\": 10, \"side\": 373, \"sidi\": 6, \"sidl\": 5, \"sidm\": 1, \"sidn\": 13, \"sido\": 20, \"sie\": 64, \"sieb\": 4, \"siec\": 2, \"sied\": 1, \"sieg\": 33, \"sien\": 3, \"siep\": 2, \"sier\": 8, \"sies\": 2, \"sieu\": 1, \"siev\": 8, \"sif\": 11, \"sift\": 5, \"sig\": 2142, \"sige\": 1, \"sigh\": 1020, \"sigm\": 6, \"sign\": 742, \"sigu\": 1, \"sih\": 42, \"siha\": 2, \"siho\": 40, \"sik\": 1, \"siko\": 1, \"sil\": 1560, \"sila\": 14, \"sile\": 845, \"silh\": 15, \"sili\": 8, \"silk\": 14, \"sill\": 89, \"silo\": 8, \"silu\": 4, \"silv\": 509, \"sim\": 1299, \"sima\": 1, \"simb\": 1, \"simc\": 1, \"sime\": 54, \"simi\": 293, \"simm\": 36, \"simo\": 87, \"simp\": 738, \"simr\": 1, \"simu\": 84, \"sin\": 3237, \"sina\": 46, \"sinc\": 1304, \"sind\": 1, \"sine\": 32, \"sinf\": 25, \"sing\": 797, \"sini\": 27, \"sink\": 60, \"sinl\": 6, \"sinn\": 258, \"sino\": 1, \"sint\": 4, \"sinu\": 11, \"sio\": 24, \"siou\": 9, \"sip\": 19, \"siph\": 3, \"sipp\": 16, \"sir\": 68, \"sira\": 1, \"sire\": 9, \"siri\": 5, \"sirl\": 3, \"siro\": 1, \"sirr\": 8, \"sis\": 855, \"sisa\": 2, \"sise\": 21, \"sisk\": 1, \"sist\": 829, \"sit\": 1052, \"sitd\": 1, \"site\": 19, \"sith\": 1, \"siti\": 2, \"sitk\": 1, \"sitn\": 1, \"sits\": 2, \"sitt\": 429, \"situ\": 472, \"sitw\": 3, \"siv\": 2, \"siva\": 1, \"six\": 335, \"sixd\": 1, \"sixe\": 2, \"sixf\": 3, \"sixg\": 1, \"sixi\": 1, \"sixm\": 2, \"sixp\": 33, \"sixs\": 3, \"sixt\": 285, \"siz\": 264, \"siza\": 15, \"size\": 39, \"sizo\": 1, \"sizz\": 5, \"ska\": 8, \"skal\": 3, \"skar\": 1, \"skat\": 4, \"ske\": 148, \"skee\": 2, \"skei\": 1, \"skel\": 62, \"skep\": 17, \"sket\": 60, \"skew\": 6, \"ski\": 667, \"skid\": 6, \"skie\": 47, \"skif\": 19, \"skii\": 9, \"skij\": 1, \"skil\": 190, \"skim\": 12, \"skin\": 60, \"skip\": 34, \"skir\": 88, \"skit\": 2, \"skiw\": 1, \"sko\": 26, \"skol\": 11, \"skop\": 4, \"skor\": 10, \"skou\": 1, \"skr\": 6, \"skre\": 1, \"skri\": 5, \"sku\": 74, \"skul\": 69, \"skun\": 4, \"skur\": 1, \"sky\": 92, \"skyb\": 1, \"skyc\": 1, \"skyg\": 1, \"skyi\": 1, \"skyj\": 2, \"skyl\": 18, \"skyr\": 23, \"skys\": 4, \"skyt\": 1, \"skyw\": 35, \"sla\": 979, \"slab\": 7, \"slac\": 59, \"slad\": 1, \"slai\": 216, \"slak\": 3, \"slam\": 25, \"slan\": 70, \"slap\": 31, \"slas\": 30, \"slat\": 49, \"slau\": 108, \"slav\": 187, \"slay\": 36, \"sle\": 1128, \"sled\": 13, \"slee\": 723, \"slei\": 8, \"slen\": 55, \"slep\": 115, \"sleu\": 1, \"slew\": 6, \"sli\": 835, \"slic\": 42, \"slid\": 78, \"slie\": 1, \"slig\": 364, \"slil\": 5, \"slim\": 21, \"slin\": 27, \"slip\": 154, \"slit\": 5, \"sliu\": 2, \"sliv\": 4, \"slo\": 720, \"sloa\": 30, \"slob\": 2, \"sloc\": 5, \"slof\": 1, \"slog\": 14, \"sloo\": 13, \"slop\": 88, \"slos\": 1, \"slot\": 28, \"slou\": 23, \"slov\": 5, \"slow\": 334, \"slu\": 187, \"sluc\": 1, \"slud\": 4, \"slue\": 1, \"sluf\": 1, \"slug\": 39, \"slui\": 14, \"slum\": 85, \"slun\": 16, \"slur\": 2, \"slus\": 2, \"sly\": 5, \"slyl\": 3, \"slyn\": 2, \"sma\": 1526, \"smac\": 20, \"smal\": 1358, \"smar\": 98, \"smas\": 48, \"smat\": 2, \"sme\": 215, \"smea\": 8, \"smee\": 3, \"smel\": 203, \"smer\": 1, \"smi\": 1256, \"smil\": 749, \"smir\": 6, \"smit\": 499, \"smo\": 726, \"smoa\": 3, \"smok\": 279, \"smol\": 6, \"smoo\": 169, \"smot\": 265, \"smou\": 2, \"smu\": 28, \"smud\": 1, \"smug\": 10, \"smut\": 5, \"smy\": 7, \"smyl\": 1, \"smyr\": 2, \"smyt\": 4, \"sna\": 407, \"snac\": 9, \"snag\": 1, \"snai\": 18, \"snak\": 102, \"snap\": 69, \"snar\": 97, \"snat\": 73, \"snaz\": 1, \"sne\": 99, \"snea\": 31, \"snee\": 59, \"snel\": 9, \"sni\": 35, \"snic\": 4, \"snif\": 18, \"snig\": 2, \"snip\": 7, \"sniv\": 3, \"sno\": 351, \"snob\": 17, \"snod\": 5, \"snoo\": 5, \"snop\": 4, \"snor\": 34, \"snou\": 5, \"snow\": 76, \"snu\": 73, \"snub\": 4, \"snuc\": 1, \"snuf\": 33, \"snug\": 15, \"sny\": 2, \"snyd\": 2, \"soa\": 105, \"soak\": 25, \"soal\": 1, \"soap\": 9, \"soar\": 19, \"sob\": 134, \"sobb\": 28, \"sobe\": 80, \"sobi\": 1, \"sobr\": 9, \"soc\": 1157, \"soca\": 33, \"socc\": 3, \"soch\": 4, \"soci\": 1011, \"sock\": 94, \"soco\": 4, \"socr\": 4, \"sod\": 99, \"soda\": 11, \"sodd\": 10, \"sode\": 1, \"sodi\": 12, \"sodo\": 57, \"soe\": 18, \"soer\": 1, \"soev\": 17, \"sof\": 451, \"sofa\": 8, \"soft\": 201, \"sog\": 6, \"sogg\": 6, \"soh\": 3, \"soi\": 155, \"soig\": 1, \"soil\": 29, \"soir\": 5, \"soj\": 91, \"sojo\": 91, \"sok\": 10, \"soke\": 1, \"soko\": 9, \"sol\": 1830, \"sola\": 47, \"sold\": 284, \"sole\": 244, \"soli\": 458, \"soll\": 5, \"solo\": 346, \"sols\": 2, \"solu\": 99, \"solv\": 71, \"solw\": 1, \"som\": 7373, \"soma\": 3, \"somb\": 13, \"some\": 2966, \"somm\": 4, \"somn\": 6, \"somp\": 1, \"somt\": 2, \"son\": 1874, \"sona\": 24, \"sone\": 1, \"song\": 207, \"soni\": 6, \"sonn\": 56, \"sono\": 18, \"sonu\": 1, \"soo\": 1514, \"soon\": 124, \"soot\": 101, \"sop\": 137, \"sopa\": 1, \"soph\": 119, \"sopp\": 1, \"sopr\": 13, \"sops\": 1, \"sor\": 1580, \"sora\": 1, \"sorb\": 8, \"sorc\": 20, \"sord\": 6, \"sore\": 21, \"sorg\": 3, \"sori\": 1, \"soro\": 3, \"sorp\": 2, \"sorr\": 546, \"sort\": 72, \"sos\": 7, \"sosi\": 1, \"sost\": 2, \"sot\": 5, \"sota\": 2, \"sott\": 2, \"sotu\": 1, \"sou\": 3484, \"soub\": 1, \"souc\": 1, \"soud\": 1, \"soue\": 8, \"souf\": 1, \"soug\": 263, \"souk\": 1, \"soul\": 243, \"soun\": 857, \"soup\": 1, \"sour\": 231, \"sous\": 4, \"sout\": 848, \"souv\": 12, \"sov\": 241, \"sove\": 76, \"sovi\": 147, \"sovr\": 18, \"sow\": 93, \"sowb\": 2, \"sowe\": 44, \"sowi\": 5, \"sowr\": 2, \"sox\": 1, \"soxh\": 1, \"soy\": 14, \"soya\": 1, \"soyb\": 11, \"soyl\": 2, \"spa\": 1779, \"spac\": 429, \"spad\": 63, \"spag\": 2, \"spah\": 7, \"spai\": 20, \"spak\": 667, \"spal\": 1, \"span\": 117, \"spar\": 357, \"spas\": 27, \"spat\": 21, \"spav\": 2, \"spaw\": 2, \"spc\": 1, \"spe\": 4524, \"spea\": 1917, \"spec\": 1144, \"spee\": 592, \"speg\": 1, \"spei\": 1, \"spel\": 72, \"spen\": 495, \"sper\": 271, \"spes\": 1, \"spew\": 3, \"sph\": 86, \"sphe\": 79, \"sphi\": 3, \"sphy\": 4, \"spi\": 2119, \"spic\": 68, \"spid\": 10, \"spie\": 37, \"spig\": 6, \"spik\": 30, \"spil\": 38, \"spin\": 100, \"spio\": 1, \"spir\": 1573, \"spit\": 211, \"spl\": 286, \"spla\": 49, \"sple\": 112, \"spli\": 114, \"splo\": 3, \"splu\": 8, \"spo\": 2005, \"spoa\": 1, \"spof\": 1, \"spoi\": 305, \"spok\": 867, \"spol\": 2, \"spon\": 135, \"spoo\": 54, \"spor\": 174, \"spos\": 11, \"spot\": 112, \"spou\": 145, \"spoy\": 3, \"spr\": 1234, \"spra\": 207, \"spre\": 417, \"spri\": 518, \"spro\": 28, \"spru\": 62, \"spu\": 131, \"spue\": 1, \"spum\": 3, \"spun\": 8, \"spur\": 57, \"sput\": 5, \"spuy\": 1, \"spy\": 8, \"spyc\": 1, \"spyi\": 7, \"squ\": 640, \"squa\": 485, \"sque\": 87, \"squi\": 68, \"sre\": 3, \"srel\": 2, \"sres\": 1, \"sss\": 2, \"sssa\": 1, \"ssss\": 1, \"sta\": 8967, \"stab\": 145, \"stac\": 54, \"stad\": 28, \"staf\": 205, \"stag\": 394, \"stai\": 335, \"stak\": 53, \"stal\": 165, \"stam\": 109, \"stan\": 1811, \"stap\": 5, \"star\": 1721, \"stas\": 7, \"stat\": 3086, \"stau\": 10, \"stav\": 75, \"stay\": 223, \"ste\": 2412, \"stea\": 646, \"sted\": 25, \"stee\": 511, \"stef\": 1, \"stei\": 55, \"stel\": 4, \"stem\": 46, \"sten\": 16, \"steo\": 1, \"step\": 467, \"ster\": 213, \"stet\": 9, \"steu\": 2, \"stev\": 61, \"stew\": 62, \"sti\": 2990, \"stic\": 273, \"stid\": 2, \"stif\": 130, \"stig\": 7, \"stil\": 2198, \"stim\": 70, \"stin\": 69, \"stip\": 10, \"stir\": 147, \"stit\": 16, \"stiv\": 1, \"sto\": 4362, \"stob\": 1, \"stoc\": 313, \"stod\": 1, \"stoi\": 16, \"stok\": 7, \"stol\": 138, \"stom\": 70, \"ston\": 701, \"stoo\": 1257, \"stop\": 419, \"stor\": 1003, \"stou\": 57, \"stov\": 44, \"stow\": 18, \"stp\": 2, \"stpo\": 2, \"str\": 7330, \"stra\": 2005, \"stre\": 2260, \"stri\": 1039, \"stro\": 1330, \"stru\": 688, \"stry\": 4, \"sts\": 1, \"stst\": 1, \"stu\": 1940, \"stua\": 24, \"stub\": 346, \"stuc\": 69, \"stud\": 991, \"stuf\": 127, \"stul\": 2, \"stum\": 143, \"stun\": 59, \"stup\": 109, \"stur\": 51, \"stut\": 2, \"sty\": 276, \"styg\": 5, \"styk\": 16, \"styl\": 237, \"stym\": 1, \"styr\": 14, \"styt\": 1, \"sua\": 8, \"suab\": 2, \"suav\": 5, \"sub\": 2121, \"suba\": 6, \"subb\": 2, \"subc\": 21, \"subd\": 94, \"subf\": 2, \"subg\": 10, \"subh\": 2, \"subi\": 11, \"subj\": 642, \"subl\": 41, \"subm\": 240, \"subn\": 1, \"subo\": 39, \"subp\": 7, \"subr\": 3, \"subs\": 621, \"subt\": 165, \"subu\": 189, \"subv\": 13, \"subw\": 8, \"subz\": 1, \"suc\": 4664, \"succ\": 686, \"suce\": 1, \"such\": 1, \"suck\": 82, \"suct\": 2, \"sud\": 782, \"suda\": 1, \"sudd\": 767, \"sudi\": 1, \"sudr\": 1, \"suds\": 1, \"sue\": 19, \"sued\": 1, \"suez\": 2, \"suf\": 870, \"suff\": 869, \"sug\": 470, \"suga\": 94, \"sugg\": 376, \"suh\": 1, \"suht\": 1, \"sui\": 354, \"suic\": 30, \"suin\": 1, \"suit\": 214, \"suk\": 7, \"suka\": 5, \"sukk\": 1, \"suku\": 1, \"sul\": 154, \"sula\": 2, \"sulc\": 1, \"sulf\": 3, \"suli\": 6, \"sulk\": 34, \"sull\": 57, \"sulp\": 18, \"sult\": 23, \"sulz\": 5, \"sum\": 562, \"suma\": 9, \"suml\": 1, \"summ\": 511, \"sumn\": 2, \"sump\": 10, \"sumt\": 2, \"sun\": 806, \"suna\": 1, \"sunb\": 29, \"sund\": 294, \"sunf\": 5, \"sung\": 1, \"suni\": 1, \"sunk\": 13, \"sunl\": 75, \"sunm\": 1, \"sunn\": 48, \"sunr\": 53, \"suns\": 121, \"sunt\": 4, \"sunw\": 4, \"suo\": 1, \"suon\": 1, \"sup\": 2585, \"supe\": 634, \"supi\": 4, \"supp\": 1819, \"supr\": 123, \"sur\": 3197, \"surc\": 9, \"sure\": 463, \"surf\": 323, \"surg\": 89, \"surl\": 8, \"surm\": 39, \"surn\": 21, \"surp\": 589, \"surr\": 212, \"surt\": 4, \"suru\": 2, \"surv\": 243, \"sus\": 959, \"susa\": 333, \"susc\": 11, \"sush\": 2, \"susi\": 6, \"susp\": 515, \"susq\": 1, \"suss\": 13, \"sust\": 76, \"sut\": 16, \"sute\": 2, \"suth\": 6, \"suto\": 4, \"sutp\": 1, \"sutt\": 2, \"suv\": 8, \"suvo\": 8, \"suz\": 5, \"suza\": 1, \"suze\": 3, \"suzu\": 1, \"sva\": 1, \"sval\": 1, \"sve\": 3, \"svel\": 1, \"sven\": 1, \"svev\": 1, \"swa\": 533, \"swab\": 2, \"swac\": 1, \"swad\": 16, \"swag\": 15, \"swah\": 1, \"swai\": 3, \"swal\": 146, \"swam\": 32, \"swan\": 4, \"swar\": 164, \"swas\": 5, \"swat\": 14, \"sway\": 48, \"swe\": 1362, \"swea\": 302, \"swed\": 27, \"swee\": 809, \"swel\": 127, \"swen\": 1, \"swep\": 74, \"swer\": 20, \"swi\": 759, \"swif\": 261, \"swil\": 1, \"swim\": 115, \"swin\": 143, \"swip\": 4, \"swir\": 15, \"swis\": 26, \"swit\": 116, \"swiv\": 7, \"swo\": 917, \"swol\": 20, \"swoo\": 25, \"swor\": 871, \"swou\": 1, \"swu\": 117, \"swun\": 111, \"syb\": 4, \"sybe\": 1, \"sybi\": 2, \"sybs\": 1, \"syc\": 20, \"syca\": 5, \"sych\": 3, \"syco\": 12, \"syd\": 6, \"sydn\": 6, \"sye\": 2, \"syen\": 2, \"syl\": 61, \"syll\": 50, \"sylp\": 3, \"sylv\": 8, \"sym\": 1035, \"symb\": 216, \"syme\": 2, \"symi\": 2, \"symm\": 26, \"symo\": 1, \"symp\": 269, \"syn\": 166, \"syna\": 75, \"sync\": 11, \"synd\": 14, \"syne\": 2, \"syno\": 19, \"synt\": 45, \"syr\": 182, \"syra\": 3, \"syre\": 2, \"syri\": 168, \"syro\": 1, \"syrt\": 1, \"syru\": 7, \"sys\": 638, \"syst\": 638, \"syu\": 1, \"sze\": 1, \"szel\": 1, \"szo\": 17, \"szol\": 17, \"taa\": 7, \"taan\": 7, \"tab\": 1202, \"taba\": 1, \"tabb\": 3, \"tabe\": 371, \"tabi\": 3, \"tabl\": 784, \"tabo\": 18, \"tabr\": 10, \"tabu\": 11, \"tac\": 144, \"tach\": 11, \"taci\": 13, \"tack\": 54, \"tacl\": 2, \"tact\": 49, \"tad\": 3, \"tadm\": 2, \"tadp\": 1, \"taf\": 16, \"taff\": 12, \"taft\": 3, \"tag\": 11, \"tagg\": 7, \"tagu\": 1, \"tah\": 40, \"taha\": 10, \"tahi\": 13, \"taho\": 5, \"tahp\": 8, \"tahr\": 1, \"tahs\": 2, \"taht\": 1, \"tai\": 299, \"tail\": 110, \"tain\": 18, \"taip\": 1, \"tais\": 1, \"taiw\": 7, \"tak\": 4055, \"take\": 1206, \"taki\": 435, \"taks\": 1, \"tal\": 2023, \"talb\": 129, \"tale\": 227, \"tali\": 16, \"talk\": 714, \"tall\": 78, \"talm\": 15, \"talo\": 4, \"talu\": 1, \"tam\": 101, \"tama\": 27, \"tamb\": 13, \"tame\": 16, \"tami\": 4, \"tamm\": 8, \"tamo\": 1, \"tamp\": 3, \"tan\": 218, \"tana\": 2, \"tanc\": 1, \"tand\": 3, \"tane\": 1, \"tang\": 102, \"tanh\": 2, \"tani\": 1, \"tank\": 28, \"tann\": 25, \"tans\": 1, \"tant\": 23, \"tao\": 10, \"taoi\": 9, \"tap\": 166, \"tapd\": 1, \"tape\": 55, \"taph\": 1, \"tapi\": 1, \"tapl\": 1, \"tapo\": 1, \"tapp\": 63, \"tar\": 402, \"tara\": 9, \"tard\": 8, \"tare\": 11, \"targ\": 83, \"tarh\": 1, \"tari\": 7, \"tark\": 1, \"tarl\": 62, \"tarn\": 5, \"tarp\": 10, \"tarq\": 4, \"tarr\": 100, \"tars\": 39, \"tart\": 41, \"taru\": 1, \"tarz\": 4, \"tas\": 593, \"tash\": 57, \"task\": 42, \"tasm\": 3, \"tass\": 9, \"tast\": 370, \"tat\": 98, \"tati\": 1, \"tatl\": 1, \"tatn\": 4, \"tatr\": 1, \"tatt\": 89, \"tau\": 287, \"taug\": 227, \"taun\": 34, \"taur\": 9, \"taus\": 2, \"taut\": 2, \"tav\": 22, \"tave\": 22, \"taw\": 22, \"tawd\": 3, \"tawe\": 2, \"tawn\": 15, \"tax\": 179, \"taxa\": 22, \"taxe\": 74, \"taxf\": 10, \"taxi\": 19, \"taxp\": 38, \"tay\": 78, \"tayl\": 78, \"tch\": 3, \"tcha\": 3, \"tcu\": 1, \"tea\": 1111, \"teac\": 562, \"tead\": 1, \"teag\": 1, \"teah\": 3, \"teak\": 2, \"teal\": 1, \"team\": 50, \"teap\": 1, \"tear\": 289, \"teas\": 30, \"teat\": 10, \"teaz\": 8, \"teb\": 3, \"teba\": 2, \"tebe\": 1, \"tec\": 396, \"tech\": 385, \"tecu\": 1, \"ted\": 32, \"tedd\": 5, \"tedi\": 27, \"tee\": 337, \"teem\": 27, \"teen\": 39, \"teet\": 263, \"teew\": 2, \"tef\": 1, \"teh\": 5, \"teha\": 1, \"tehe\": 3, \"tehi\": 1, \"tei\": 1, \"tek\": 23, \"teke\": 3, \"teko\": 14, \"tekt\": 6, \"tel\": 1958, \"tela\": 5, \"tele\": 256, \"telh\": 2, \"tell\": 268, \"telm\": 2, \"telo\": 1, \"tem\": 1171, \"tema\": 19, \"teme\": 2, \"temi\": 1, \"temp\": 1141, \"ten\": 1187, \"tena\": 65, \"tenb\": 1, \"tenc\": 1, \"tend\": 397, \"tene\": 22, \"tenf\": 10, \"teng\": 2, \"tenh\": 1, \"teni\": 2, \"tenm\": 2, \"tenn\": 57, \"teno\": 25, \"tenp\": 1, \"tens\": 114, \"tent\": 232, \"tenu\": 21, \"teny\": 4, \"teo\": 1, \"teok\": 1, \"tep\": 6, \"tepe\": 1, \"tepi\": 5, \"ter\": 1157, \"tera\": 19, \"tere\": 3, \"terg\": 1, \"term\": 334, \"tern\": 1, \"terp\": 2, \"terr\": 670, \"ters\": 7, \"tert\": 18, \"terz\": 1, \"tes\": 626, \"tess\": 8, \"test\": 475, \"tet\": 54, \"teta\": 1, \"tete\": 3, \"teth\": 6, \"tetr\": 30, \"tett\": 1, \"teu\": 3, \"teut\": 3, \"tew\": 1, \"tewf\": 1, \"tex\": 276, \"texa\": 100, \"texe\": 2, \"texo\": 2, \"text\": 101, \"tge\": 1, \"tget\": 1, \"tha\": 45004, \"thac\": 3, \"thad\": 4, \"thah\": 1, \"thai\": 4, \"thak\": 1, \"thal\": 2, \"tham\": 25, \"than\": 607, \"thar\": 8, \"that\": 204, \"thau\": 3, \"thaw\": 7, \"thax\": 3, \"thay\": 14, \"thc\": 6, \"thce\": 6, \"the\": 70010, \"thea\": 171, \"theb\": 6, \"thee\": 3, \"thef\": 24, \"thei\": 10558, \"thel\": 8, \"them\": 1161, \"then\": 262, \"theo\": 339, \"ther\": 12787, \"thes\": 4450, \"thet\": 2, \"theu\": 1, \"thev\": 1, \"thew\": 6, \"they\": 143, \"thi\": 24607, \"thia\": 1, \"thib\": 2, \"thic\": 383, \"thid\": 1, \"thie\": 152, \"thig\": 60, \"thii\": 2, \"thil\": 1, \"thim\": 13, \"thin\": 8495, \"thio\": 5, \"thir\": 1078, \"this\": 17, \"thit\": 170, \"tho\": 15322, \"thog\": 1, \"thol\": 1, \"thom\": 183, \"thon\": 3, \"thor\": 279, \"thos\": 2518, \"thou\": 5550, \"thr\": 6789, \"thra\": 19, \"thre\": 2577, \"thri\": 132, \"thro\": 3872, \"thru\": 170, \"ths\": 1, \"thsu\": 1, \"thu\": 2073, \"thud\": 2, \"thug\": 3, \"thul\": 1, \"thum\": 84, \"thun\": 216, \"thur\": 78, \"thut\": 1, \"thw\": 19, \"thwa\": 18, \"thwu\": 1, \"thy\": 380, \"thya\": 4, \"thye\": 1, \"thyi\": 1, \"thyn\": 4, \"thyr\": 75, \"thys\": 295, \"tia\": 4, \"tiar\": 2, \"tib\": 24, \"tibe\": 18, \"tibh\": 1, \"tibi\": 1, \"tibn\": 3, \"tibu\": 1, \"tic\": 72, \"tick\": 62, \"tico\": 4, \"tict\": 1, \"tid\": 178, \"tida\": 3, \"tidb\": 3, \"tide\": 30, \"tidi\": 76, \"tido\": 1, \"tidy\": 1, \"tie\": 165, \"tiec\": 1, \"tief\": 1, \"tiei\": 1, \"tiek\": 1, \"tiep\": 3, \"tier\": 14, \"tif\": 1, \"tig\": 206, \"tiga\": 2, \"tige\": 80, \"tigh\": 115, \"tigl\": 3, \"tigr\": 6, \"tij\": 1, \"tiju\": 1, \"tik\": 4, \"tiko\": 1, \"tikv\": 3, \"til\": 1222, \"tilb\": 1, \"tild\": 1, \"tile\": 23, \"tilg\": 32, \"tili\": 1, \"till\": 35, \"tilo\": 1, \"tilt\": 36, \"tim\": 5381, \"tima\": 1, \"timb\": 97, \"time\": 839, \"timi\": 64, \"timm\": 1, \"timn\": 21, \"timo\": 49, \"tin\": 209, \"tinc\": 5, \"tind\": 12, \"tine\": 2, \"ting\": 29, \"tini\": 4, \"tink\": 23, \"tinn\": 7, \"tinp\": 1, \"tins\": 3, \"tint\": 17, \"tip\": 55, \"tiph\": 2, \"tipo\": 1, \"tipp\": 22, \"tips\": 5, \"tipt\": 9, \"tir\": 266, \"tira\": 6, \"tire\": 197, \"tirh\": 3, \"tiri\": 6, \"tirs\": 5, \"tirz\": 18, \"tis\": 68, \"tisb\": 1, \"tish\": 6, \"tiss\": 58, \"tist\": 2, \"tit\": 287, \"tita\": 21, \"titc\": 1, \"tite\": 8, \"tith\": 44, \"titi\": 33, \"titl\": 146, \"titr\": 3, \"titt\": 6, \"titu\": 20, \"tiv\": 1, \"tive\": 1, \"tiz\": 2, \"tiza\": 1, \"tizi\": 1, \"tja\": 1, \"tjaw\": 1, \"tjo\": 1, \"tjok\": 1, \"tla\": 1, \"toa\": 51, \"toad\": 3, \"toan\": 1, \"toas\": 31, \"tob\": 64, \"toba\": 42, \"tobi\": 22, \"toc\": 3, \"tocc\": 1, \"toch\": 1, \"tod\": 439, \"toda\": 366, \"todd\": 2, \"tode\": 1, \"todh\": 33, \"todm\": 11, \"toe\": 43, \"toet\": 1, \"tof\": 4, \"toff\": 3, \"tog\": 1240, \"toga\": 5, \"toge\": 1232, \"togg\": 1, \"toh\": 1, \"toi\": 103, \"toil\": 64, \"toj\": 1, \"tojo\": 1, \"tok\": 86, \"toke\": 67, \"toki\": 1, \"toky\": 18, \"tol\": 1393, \"tola\": 3, \"tole\": 122, \"toll\": 30, \"tols\": 11, \"tolu\": 1, \"toly\": 1, \"tom\": 223, \"toma\": 34, \"tomb\": 27, \"tome\": 1, \"tomf\": 2, \"tomk\": 2, \"tomm\": 21, \"tomo\": 106, \"ton\": 798, \"tona\": 11, \"tone\": 50, \"tong\": 382, \"toni\": 60, \"tonm\": 1, \"tonn\": 1, \"tons\": 2, \"too\": 1891, \"toob\": 2, \"tood\": 2, \"tooe\": 1, \"tooh\": 1, \"took\": 18, \"tool\": 69, \"toom\": 1, \"toon\": 4, \"toos\": 2, \"toot\": 73, \"top\": 197, \"topa\": 6, \"topc\": 3, \"topd\": 2, \"tope\": 3, \"topg\": 4, \"toph\": 15, \"topi\": 39, \"topk\": 1, \"topl\": 3, \"topm\": 10, \"topn\": 2, \"topo\": 9, \"topp\": 22, \"topq\": 2, \"topr\": 1, \"tops\": 10, \"topt\": 2, \"tor\": 464, \"tora\": 1, \"torc\": 34, \"tori\": 8, \"torm\": 84, \"torn\": 7, \"toro\": 6, \"torp\": 10, \"torq\": 16, \"torr\": 59, \"tors\": 14, \"tort\": 72, \"tos\": 159, \"tosc\": 4, \"toss\": 120, \"tot\": 355, \"tota\": 335, \"tote\": 1, \"toth\": 2, \"toti\": 1, \"tott\": 9, \"tou\": 888, \"touc\": 670, \"toug\": 73, \"touj\": 1, \"toul\": 2, \"tour\": 84, \"tous\": 5, \"tout\": 1, \"tow\": 2329, \"towa\": 1401, \"towb\": 1, \"towe\": 240, \"towh\": 1, \"towi\": 14, \"town\": 177, \"towr\": 3, \"tows\": 2, \"tox\": 4, \"toxi\": 4, \"toy\": 34, \"toye\": 1, \"toyi\": 2, \"toyl\": 5, \"toyn\": 9, \"tra\": 3407, \"trab\": 1, \"trac\": 291, \"trad\": 581, \"traf\": 87, \"trag\": 139, \"trai\": 623, \"traj\": 2, \"tram\": 57, \"tran\": 1095, \"trap\": 47, \"tras\": 11, \"trau\": 6, \"trav\": 378, \"traw\": 1, \"trax\": 1, \"tray\": 5, \"tre\": 2539, \"trea\": 862, \"treb\": 21, \"trec\": 1, \"tred\": 1, \"tree\": 484, \"tref\": 1, \"treg\": 1, \"trek\": 1, \"trel\": 5, \"trem\": 321, \"tren\": 108, \"trep\": 4, \"tres\": 137, \"trev\": 29, \"tri\": 2073, \"tria\": 291, \"trib\": 554, \"tric\": 146, \"trid\": 4, \"trie\": 421, \"trif\": 65, \"trig\": 31, \"trii\": 4, \"trik\": 1, \"tril\": 23, \"trim\": 39, \"trin\": 20, \"trio\": 4, \"trip\": 85, \"tris\": 22, \"trit\": 4, \"triu\": 183, \"triv\": 38, \"tro\": 1069, \"troa\": 6, \"trob\": 1, \"trod\": 41, \"troe\": 1, \"trog\": 1, \"troh\": 1, \"troi\": 2, \"trol\": 8, \"trom\": 3, \"troo\": 128, \"trop\": 74, \"trot\": 17, \"trou\": 719, \"trov\": 2, \"trow\": 19, \"troy\": 1, \"tru\": 2600, \"trua\": 4, \"truc\": 128, \"trud\": 13, \"true\": 40, \"trui\": 5, \"truj\": 16, \"trul\": 182, \"trum\": 209, \"trun\": 79, \"trus\": 491, \"trut\": 683, \"try\": 291, \"trya\": 3, \"tryi\": 285, \"tryp\": 2, \"trys\": 1, \"tsa\": 5, \"tsar\": 4, \"tsc\": 1, \"tsch\": 1, \"tse\": 2, \"tsh\": 9, \"tsho\": 8, \"tsht\": 1, \"tsi\": 1, \"tsit\": 1, \"tso\": 1, \"tsu\": 22, \"tsun\": 22, \"tsv\": 1, \"tsve\": 1, \"tta\": 1, \"tua\": 1, \"tual\": 1, \"tub\": 120, \"tuba\": 10, \"tube\": 35, \"tubi\": 6, \"tubo\": 1, \"tubu\": 5, \"tuc\": 33, \"tuck\": 27, \"tucs\": 3, \"tud\": 6, \"tudo\": 6, \"tue\": 84, \"tues\": 84, \"tuf\": 18, \"tuft\": 11, \"tug\": 19, \"tuga\": 1, \"tugg\": 14, \"tugo\": 3, \"tuh\": 2, \"tuhu\": 2, \"tui\": 11, \"tuil\": 3, \"tuit\": 8, \"tul\": 62, \"tula\": 3, \"tuli\": 51, \"tull\": 3, \"tuls\": 1, \"tult\": 4, \"tum\": 173, \"tumb\": 89, \"tume\": 1, \"tumi\": 1, \"tumo\": 26, \"tumu\": 56, \"tun\": 136, \"tunb\": 2, \"tune\": 34, \"tung\": 4, \"tuni\": 15, \"tunn\": 24, \"tuo\": 1, \"tuoh\": 1, \"tup\": 3, \"tupm\": 2, \"tupp\": 1, \"tur\": 3261, \"tura\": 1, \"turb\": 48, \"turc\": 1, \"ture\": 1, \"turf\": 2, \"turi\": 3, \"turk\": 53, \"turm\": 13, \"turn\": 2160, \"turp\": 6, \"turq\": 4, \"turr\": 13, \"turt\": 95, \"turv\": 2, \"tus\": 31, \"tusc\": 6, \"tush\": 1, \"tusi\": 1, \"tusk\": 11, \"tuss\": 8, \"tut\": 30, \"tute\": 1, \"tuto\": 20, \"tutt\": 9, \"tux\": 6, \"tuxa\": 5, \"tuxe\": 1, \"twa\": 67, \"twai\": 37, \"twan\": 1, \"twe\": 1148, \"twea\": 1, \"twee\": 20, \"twel\": 380, \"twen\": 731, \"twer\": 14, \"twi\": 607, \"twic\": 196, \"twig\": 19, \"twil\": 78, \"twin\": 110, \"twir\": 15, \"twis\": 126, \"twit\": 30, \"twix\": 8, \"two\": 126, \"twoa\": 1, \"twob\": 5, \"twoc\": 4, \"twod\": 11, \"twoe\": 2, \"twof\": 10, \"twog\": 1, \"twoh\": 5, \"twoi\": 2, \"twol\": 2, \"twom\": 1, \"twon\": 1, \"twop\": 18, \"twor\": 4, \"twos\": 16, \"twot\": 18, \"twou\": 4, \"twov\": 1, \"twow\": 3, \"twoy\": 6, \"tyb\": 6, \"tybe\": 5, \"tybu\": 1, \"tyc\": 6, \"tych\": 5, \"tyco\": 1, \"tyd\": 4, \"tydi\": 4, \"tye\": 2, \"tyer\": 1, \"tyg\": 1, \"tyga\": 1, \"tyi\": 10, \"tyin\": 10, \"tyl\": 3, \"tyla\": 1, \"tyle\": 2, \"tym\": 7, \"tymp\": 5, \"typ\": 509, \"type\": 148, \"typh\": 19, \"typi\": 111, \"typo\": 5, \"tyr\": 161, \"tyra\": 91, \"tyre\": 1, \"tyro\": 6, \"tyrr\": 1, \"tyru\": 22, \"tys\": 1, \"tyso\": 1, \"tyt\": 1, \"tyth\": 1, \"ube\": 1, \"uber\": 1, \"ubi\": 5, \"ubiq\": 5, \"uca\": 1, \"ucl\": 1, \"uda\": 7, \"udal\": 7, \"udd\": 1, \"udde\": 1, \"udo\": 1, \"ugl\": 70, \"ugli\": 19, \"uhh\": 9, \"uhhu\": 5, \"uhl\": 1, \"uhle\": 1, \"uhu\": 1, \"ukr\": 4, \"ukra\": 4, \"ula\": 9, \"ulan\": 3, \"ulb\": 2, \"ulbr\": 2, \"ulc\": 11, \"ulce\": 11, \"ull\": 3, \"ullm\": 1, \"ullo\": 1, \"ult\": 157, \"ulti\": 113, \"ultr\": 44, \"uly\": 17, \"ulya\": 15, \"ulys\": 2, \"umb\": 39, \"umbe\": 5, \"umbi\": 1, \"umbr\": 33, \"umc\": 3, \"umci\": 3, \"umm\": 1, \"umma\": 1, \"ump\": 4, \"umpi\": 4, \"ums\": 1, \"umsc\": 1, \"una\": 433, \"unab\": 104, \"unac\": 71, \"unad\": 20, \"unaf\": 25, \"unag\": 3, \"unai\": 6, \"unal\": 15, \"unam\": 13, \"unan\": 52, \"unap\": 13, \"unar\": 8, \"unas\": 14, \"unat\": 18, \"unau\": 3, \"unav\": 29, \"unaw\": 38, \"unb\": 175, \"unba\": 16, \"unbe\": 67, \"unbi\": 10, \"unbl\": 15, \"unbo\": 34, \"unbr\": 23, \"unbu\": 10, \"unc\": 1146, \"unca\": 18, \"unce\": 114, \"unch\": 48, \"unci\": 73, \"unck\": 1, \"uncl\": 456, \"unco\": 403, \"uncr\": 14, \"unct\": 15, \"uncu\": 4, \"und\": 3837, \"unda\": 14, \"unde\": 3576, \"undi\": 65, \"undo\": 91, \"undr\": 28, \"undu\": 47, \"undy\": 6, \"une\": 339, \"unea\": 109, \"unec\": 4, \"uned\": 3, \"unem\": 33, \"unen\": 23, \"uneq\": 35, \"uner\": 11, \"unes\": 3, \"unev\": 11, \"unex\": 107, \"unf\": 459, \"unfa\": 95, \"unfe\": 46, \"unfi\": 31, \"unfl\": 10, \"unfo\": 229, \"unfr\": 31, \"unfu\": 12, \"ung\": 114, \"unga\": 10, \"unge\": 9, \"ungi\": 1, \"ungl\": 3, \"ungo\": 45, \"ungr\": 39, \"ungu\": 7, \"unh\": 208, \"unha\": 136, \"unhe\": 40, \"unhi\": 10, \"unho\": 13, \"unhu\": 9, \"uni\": 2111, \"unic\": 20, \"unid\": 3, \"unif\": 206, \"unil\": 5, \"unim\": 50, \"unin\": 67, \"unio\": 252, \"uniq\": 84, \"unis\": 7, \"unit\": 761, \"univ\": 552, \"unj\": 73, \"unja\": 1, \"unju\": 72, \"unk\": 220, \"unke\": 3, \"unki\": 27, \"unkn\": 190, \"unl\": 556, \"unla\": 15, \"unle\": 297, \"unli\": 139, \"unlo\": 77, \"unlu\": 28, \"unm\": 150, \"unma\": 48, \"unme\": 25, \"unmi\": 44, \"unmo\": 30, \"unmu\": 3, \"unn\": 156, \"unna\": 68, \"unne\": 67, \"unnn\": 1, \"unno\": 13, \"unnu\": 4, \"uno\": 69, \"unob\": 38, \"unoc\": 7, \"unof\": 7, \"unop\": 4, \"unor\": 9, \"unos\": 2, \"unou\": 1, \"unp\": 272, \"unpa\": 48, \"unpe\": 25, \"unph\": 1, \"unpi\": 7, \"unpl\": 67, \"unpo\": 18, \"unpr\": 88, \"unpu\": 18, \"unq\": 46, \"unqu\": 46, \"unr\": 281, \"unra\": 1, \"unre\": 212, \"unri\": 46, \"unro\": 10, \"unru\": 12, \"uns\": 483, \"unsa\": 41, \"unsc\": 23, \"unse\": 119, \"unsh\": 31, \"unsi\": 14, \"unsk\": 6, \"unsl\": 5, \"unsm\": 6, \"unso\": 30, \"unsp\": 51, \"unst\": 61, \"unsu\": 90, \"unsw\": 2, \"unsy\": 4, \"unt\": 10173, \"unta\": 17, \"untc\": 1, \"unte\": 15, \"unth\": 21, \"unti\": 1000, \"unto\": 41, \"untr\": 40, \"untu\": 5, \"untw\": 4, \"unty\": 3, \"unu\": 139, \"unus\": 133, \"unut\": 6, \"unv\": 20, \"unva\": 6, \"unve\": 9, \"unvi\": 4, \"unvo\": 1, \"unw\": 268, \"unwa\": 39, \"unwe\": 57, \"unwh\": 9, \"unwi\": 103, \"unwo\": 53, \"unwr\": 7, \"uny\": 2, \"unyi\": 2, \"uom\": 1, \"uomi\": 1, \"upa\": 1, \"upan\": 1, \"upb\": 16, \"upbe\": 1, \"upbo\": 3, \"upbr\": 11, \"upbu\": 1, \"upc\": 2, \"upca\": 1, \"upco\": 1, \"upd\": 4, \"upda\": 4, \"upg\": 7, \"upgr\": 7, \"uph\": 66, \"upha\": 4, \"uphe\": 24, \"uphi\": 3, \"upho\": 35, \"upj\": 1, \"upju\": 1, \"upk\": 6, \"upke\": 6, \"upl\": 39, \"upla\": 13, \"upli\": 26, \"upo\": 5082, \"upp\": 278, \"uppe\": 274, \"uppo\": 3, \"upr\": 211, \"upra\": 4, \"upre\": 1, \"upri\": 178, \"upro\": 28, \"ups\": 132, \"upse\": 34, \"upsh\": 2, \"upsi\": 21, \"upso\": 1, \"upsp\": 3, \"upst\": 66, \"upsu\": 3, \"upsw\": 2, \"upt\": 46, \"upta\": 5, \"upto\": 27, \"uptr\": 1, \"uptu\": 13, \"upw\": 156, \"upwa\": 155, \"upwh\": 1, \"ura\": 11, \"uran\": 11, \"urb\": 65, \"urba\": 65, \"urc\": 3, \"urch\": 3, \"ure\": 31, \"urem\": 1, \"uret\": 29, \"urg\": 223, \"urge\": 145, \"urgi\": 25, \"uri\": 67, \"uria\": 29, \"uric\": 1, \"urie\": 14, \"urij\": 11, \"urin\": 4, \"urn\": 4, \"urs\": 1, \"ursu\": 1, \"uru\": 1, \"urug\": 1, \"usa\": 51, \"usab\": 8, \"usag\": 43, \"use\": 1219, \"usea\": 2, \"usef\": 139, \"usel\": 60, \"user\": 9, \"uses\": 2, \"uset\": 7, \"usg\": 1, \"ush\": 49, \"ushe\": 49, \"usi\": 177, \"usin\": 176, \"uso\": 1, \"usq\": 1, \"usqu\": 1, \"uss\": 13, \"usso\": 1, \"ussr\": 1, \"usu\": 579, \"usua\": 535, \"usur\": 44, \"uta\": 8, \"ute\": 6, \"uten\": 6, \"uth\": 3, \"utha\": 2, \"uthe\": 1, \"uti\": 91, \"util\": 91, \"utm\": 104, \"utmo\": 104, \"uto\": 54, \"utoa\": 2, \"utop\": 52, \"utt\": 412, \"utte\": 410, \"utto\": 1, \"uttu\": 1, \"uxb\": 2, \"uxbr\": 2, \"uxo\": 1, \"uxor\": 1, \"uza\": 3, \"uzz\": 73, \"uzza\": 4, \"uzze\": 1, \"uzzi\": 47, \"vac\": 177, \"vaca\": 132, \"vacc\": 4, \"vach\": 1, \"vaci\": 1, \"vacu\": 39, \"vad\": 4, \"vadi\": 1, \"vads\": 1, \"vag\": 148, \"vaga\": 16, \"vagi\": 17, \"vagr\": 4, \"vagu\": 111, \"vai\": 369, \"vail\": 1, \"vain\": 20, \"vaj\": 1, \"vaje\": 1, \"val\": 1216, \"vald\": 1, \"vale\": 36, \"vali\": 114, \"vall\": 322, \"valm\": 1, \"valo\": 68, \"valp\": 2, \"valu\": 633, \"valv\": 13, \"vam\": 6, \"vamp\": 5, \"van\": 330, \"vanc\": 3, \"vand\": 10, \"vang\": 4, \"vani\": 256, \"vanq\": 13, \"vant\": 12, \"vap\": 77, \"vapo\": 77, \"vaq\": 2, \"vaqu\": 2, \"var\": 809, \"vara\": 2, \"vari\": 690, \"varl\": 6, \"varm\": 2, \"varn\": 11, \"varr\": 3, \"varv\": 1, \"vary\": 56, \"vas\": 321, \"vasa\": 1, \"vasc\": 4, \"vase\": 13, \"vash\": 11, \"vasi\": 1, \"vask\": 1, \"vaso\": 2, \"vass\": 4, \"vast\": 29, \"vat\": 9, \"vati\": 5, \"vatt\": 1, \"vau\": 65, \"vaud\": 6, \"vaug\": 5, \"vaul\": 37, \"vaun\": 17, \"vbi\": 1, \"vbiq\": 1, \"vea\": 3, \"veb\": 1, \"vebl\": 1, \"vec\": 38, \"vecc\": 8, \"vect\": 30, \"ved\": 5, \"veda\": 4, \"vee\": 14, \"veec\": 1, \"veer\": 10, \"veg\": 59, \"vega\": 5, \"vege\": 54, \"veh\": 136, \"vehe\": 35, \"vehi\": 101, \"vei\": 127, \"veil\": 17, \"vein\": 29, \"vel\": 76, \"vela\": 2, \"veld\": 1, \"vell\": 1, \"velo\": 48, \"velv\": 24, \"ven\": 430, \"vena\": 1, \"vend\": 12, \"vene\": 68, \"veng\": 100, \"veni\": 25, \"veno\": 14, \"vent\": 175, \"venu\": 14, \"ver\": 5555, \"vera\": 22, \"verb\": 54, \"verd\": 51, \"verg\": 11, \"veri\": 198, \"verl\": 3, \"verm\": 51, \"vern\": 42, \"vero\": 1, \"verp\": 1, \"verr\": 6, \"vers\": 188, \"vert\": 96, \"verv\": 5, \"ves\": 413, \"vesi\": 1, \"veso\": 5, \"vesp\": 2, \"vess\": 343, \"vest\": 35, \"vesu\": 18, \"vet\": 78, \"vete\": 67, \"veto\": 1, \"vev\": 1, \"veva\": 1, \"vex\": 98, \"vexa\": 36, \"vexe\": 58, \"vexi\": 3, \"vey\": 2, \"veyl\": 1, \"vgl\": 1, \"via\": 39, \"viab\": 8, \"vial\": 10, \"vian\": 1, \"viar\": 1, \"viat\": 1, \"vib\": 43, \"vibe\": 1, \"vibr\": 42, \"vic\": 533, \"vica\": 32, \"vice\": 35, \"vich\": 1, \"vici\": 64, \"vick\": 10, \"vico\": 1, \"vict\": 324, \"vid\": 9, \"vida\": 2, \"vide\": 3, \"vido\": 1, \"vie\": 621, \"viel\": 1, \"vien\": 35, \"viet\": 12, \"vieu\": 2, \"view\": 193, \"vig\": 147, \"vigi\": 40, \"vign\": 2, \"vigo\": 104, \"vigr\": 1, \"vii\": 13, \"viii\": 1, \"vik\": 4, \"viki\": 3, \"viku\": 1, \"vil\": 506, \"vila\": 1, \"vild\": 6, \"vile\": 3, \"vili\": 2, \"vill\": 444, \"vin\": 387, \"vinc\": 35, \"vind\": 27, \"vine\": 222, \"vinn\": 1, \"vino\": 1, \"vins\": 1, \"vint\": 18, \"viny\": 4, \"vio\": 491, \"vioi\": 1, \"viol\": 487, \"vip\": 10, \"vipe\": 9, \"viph\": 1, \"vir\": 533, \"vird\": 2, \"vire\": 1, \"virg\": 268, \"viri\": 11, \"virs\": 1, \"virt\": 231, \"viru\": 19, \"vis\": 1257, \"visa\": 36, \"visc\": 35, \"vise\": 1, \"vish\": 9, \"visi\": 1090, \"viso\": 2, \"visr\": 1, \"vist\": 20, \"visu\": 57, \"vit\": 164, \"vita\": 142, \"viti\": 4, \"vitr\": 8, \"vitt\": 2, \"vitu\": 2, \"viv\": 112, \"viva\": 26, \"vive\": 1, \"vivi\": 79, \"viy\": 1, \"viye\": 1, \"viz\": 2, \"viza\": 2, \"vla\": 1, \"vlad\": 1, \"vlc\": 2, \"vlce\": 2, \"vna\": 4, \"vnac\": 2, \"vnas\": 1, \"vnat\": 1, \"vnb\": 9, \"vnba\": 3, \"vnbe\": 2, \"vnbo\": 1, \"vnbr\": 3, \"vnc\": 10, \"vnch\": 1, \"vnck\": 3, \"vncl\": 3, \"vnct\": 2, \"vncu\": 1, \"vnd\": 52, \"vnda\": 1, \"vnde\": 44, \"vndi\": 2, \"vndo\": 5, \"vne\": 2, \"vnef\": 1, \"vneq\": 1, \"vnf\": 14, \"vnfe\": 1, \"vnfi\": 3, \"vnfl\": 1, \"vnfo\": 9, \"vng\": 7, \"vnga\": 2, \"vnge\": 2, \"vngo\": 1, \"vngr\": 1, \"vngu\": 1, \"vnh\": 5, \"vnha\": 3, \"vnho\": 2, \"vni\": 10, \"vnic\": 1, \"vnim\": 1, \"vnio\": 2, \"vnit\": 2, \"vniu\": 4, \"vnk\": 19, \"vnke\": 1, \"vnki\": 5, \"vnkl\": 7, \"vnkn\": 6, \"vnl\": 9, \"vnle\": 5, \"vnli\": 2, \"vnlo\": 1, \"vnlu\": 1, \"vnm\": 10, \"vnma\": 8, \"vnme\": 1, \"vnmi\": 1, \"vnn\": 10, \"vnna\": 8, \"vnne\": 1, \"vnnu\": 1, \"vnp\": 10, \"vnpa\": 1, \"vnpe\": 1, \"vnpo\": 1, \"vnpr\": 6, \"vnpu\": 1, \"vnr\": 7, \"vnre\": 2, \"vnri\": 2, \"vnru\": 3, \"vns\": 22, \"vnsa\": 4, \"vnsc\": 2, \"vnse\": 6, \"vnsh\": 4, \"vnsi\": 2, \"vnsk\": 1, \"vnsm\": 1, \"vnsp\": 1, \"vnsu\": 1, \"vnt\": 33, \"vnti\": 7, \"vnto\": 1, \"vntr\": 1, \"vnty\": 2, \"vnu\": 2, \"vnua\": 1, \"vnus\": 1, \"vnw\": 8, \"vnwa\": 1, \"vnwe\": 2, \"vnwh\": 1, \"vnwi\": 2, \"vnwo\": 2, \"vny\": 1, \"vnyo\": 1, \"voc\": 157, \"voca\": 148, \"voci\": 8, \"voe\": 2, \"voeg\": 2, \"vog\": 6, \"vogu\": 6, \"voi\": 1717, \"voic\": 1657, \"void\": 6, \"voit\": 1, \"vol\": 488, \"vola\": 10, \"volc\": 16, \"vole\": 1, \"volg\": 1, \"voli\": 11, \"volk\": 4, \"voll\": 21, \"voln\": 1, \"volp\": 1, \"vols\": 2, \"volt\": 49, \"volu\": 367, \"vom\": 20, \"vomi\": 20, \"von\": 2, \"vonn\": 2, \"voo\": 6, \"vood\": 5, \"voor\": 1, \"vop\": 2, \"voph\": 1, \"vopo\": 1, \"vor\": 12, \"vora\": 7, \"voro\": 1, \"vort\": 4, \"vos\": 1, \"vosg\": 1, \"vot\": 215, \"vote\": 80, \"voti\": 33, \"vou\": 52, \"vouc\": 46, \"voui\": 1, \"voul\": 1, \"voum\": 1, \"vow\": 98, \"vowe\": 53, \"vowi\": 3, \"voy\": 222, \"voya\": 202, \"voyc\": 19, \"vpb\": 1, \"vpbr\": 1, \"vpl\": 1, \"vpli\": 1, \"vpm\": 1, \"vpmo\": 1, \"vpo\": 162, \"vpp\": 4, \"vppe\": 1, \"vppo\": 2, \"vpr\": 1, \"vpro\": 1, \"vps\": 2, \"vpsh\": 1, \"vpsp\": 1, \"vpw\": 3, \"vpwa\": 3, \"vra\": 1, \"vrg\": 3, \"vri\": 2, \"vril\": 1, \"vrin\": 1, \"vro\": 1, \"vrom\": 1, \"vse\": 4, \"vset\": 1, \"vsh\": 4, \"vsha\": 4, \"vsi\": 1, \"vsin\": 1, \"vsu\": 5, \"vsua\": 2, \"vsur\": 3, \"vtm\": 1, \"vtmo\": 1, \"vto\": 2, \"vtt\": 10, \"vtte\": 10, \"vuh\": 1, \"vuhr\": 1, \"vul\": 94, \"vulc\": 2, \"vulg\": 48, \"vuln\": 22, \"vulp\": 1, \"vult\": 21, \"vuo\": 1, \"vyi\": 4, \"vyin\": 4, \"waa\": 1, \"waal\": 1, \"wab\": 5, \"waba\": 5, \"wac\": 11, \"wach\": 1, \"wack\": 4, \"wad\": 49, \"wadd\": 19, \"wade\": 14, \"wadi\": 7, \"waf\": 37, \"wafe\": 9, \"waff\": 2, \"waft\": 24, \"wag\": 320, \"wage\": 106, \"wagg\": 23, \"wagi\": 1, \"wagn\": 34, \"wago\": 97, \"wah\": 3, \"wahs\": 2, \"waht\": 1, \"wai\": 1106, \"waif\": 4, \"waig\": 1, \"wail\": 46, \"wain\": 6, \"wais\": 75, \"wait\": 565, \"waiv\": 6, \"wak\": 214, \"wake\": 82, \"waki\": 39, \"wal\": 2755, \"walb\": 1, \"walc\": 1, \"wald\": 5, \"wale\": 19, \"walf\": 2, \"wali\": 5, \"walk\": 991, \"wall\": 348, \"waln\": 24, \"walp\": 1, \"walr\": 9, \"wals\": 3, \"walt\": 212, \"wam\": 1, \"wamp\": 1, \"wan\": 1900, \"wand\": 197, \"wane\": 6, \"wang\": 4, \"wani\": 10, \"wann\": 5, \"wans\": 2, \"want\": 787, \"wap\": 3, \"wapp\": 3, \"war\": 1292, \"warb\": 27, \"ward\": 55, \"ware\": 23, \"warf\": 55, \"warh\": 2, \"wari\": 4, \"warl\": 34, \"warm\": 219, \"warn\": 148, \"warp\": 12, \"warr\": 204, \"wars\": 15, \"wart\": 15, \"warw\": 22, \"wary\": 1, \"was\": 1172, \"wash\": 436, \"wasn\": 154, \"wasp\": 6, \"wass\": 3, \"wast\": 272, \"wat\": 2791, \"watc\": 785, \"wate\": 1937, \"wath\": 1, \"watl\": 1, \"watr\": 2, \"wats\": 58, \"watt\": 5, \"wau\": 5, \"waue\": 3, \"waui\": 2, \"wav\": 471, \"wave\": 299, \"wavi\": 50, \"wavy\": 1, \"wax\": 76, \"waxe\": 65, \"waxi\": 3, \"waxw\": 3, \"way\": 487, \"waye\": 7, \"wayf\": 6, \"wayl\": 2, \"waym\": 2, \"wayn\": 12, \"wayo\": 1, \"ways\": 9, \"wayt\": 1, \"wayw\": 8, \"wba\": 3, \"wea\": 1393, \"weak\": 180, \"weal\": 142, \"wean\": 15, \"weap\": 213, \"wear\": 301, \"weas\": 7, \"weat\": 223, \"weav\": 68, \"weaz\": 3, \"web\": 20, \"webb\": 2, \"webe\": 2, \"webs\": 12, \"wec\": 1, \"wech\": 1, \"wed\": 170, \"wedd\": 93, \"wedg\": 27, \"wedl\": 4, \"wedn\": 46, \"wee\": 1036, \"weed\": 46, \"week\": 329, \"weem\": 1, \"ween\": 6, \"weep\": 109, \"weg\": 1, \"wege\": 1, \"wei\": 383, \"weid\": 7, \"weig\": 346, \"wein\": 4, \"weir\": 20, \"weis\": 3, \"wel\": 4028, \"welb\": 1, \"welc\": 267, \"weld\": 24, \"welf\": 70, \"welk\": 1, \"well\": 169, \"welm\": 1, \"wels\": 8, \"welt\": 9, \"wem\": 2, \"wemm\": 2, \"wen\": 3070, \"wenc\": 2, \"wend\": 16, \"went\": 235, \"wep\": 108, \"wer\": 10273, \"were\": 28, \"werg\": 1, \"wern\": 6, \"wert\": 3, \"wes\": 1213, \"wese\": 1, \"wesk\": 4, \"wesl\": 6, \"wess\": 1, \"west\": 787, \"wet\": 12, \"wetl\": 2, \"wetn\": 1, \"wett\": 9, \"weu\": 1, \"weun\": 1, \"wev\": 34, \"wex\": 6, \"wexl\": 6, \"wey\": 25, \"weya\": 3, \"weyb\": 1, \"weym\": 18, \"weyw\": 3, \"wha\": 10170, \"whac\": 3, \"whad\": 2, \"whal\": 1839, \"whan\": 2, \"whar\": 46, \"what\": 574, \"whe\": 14470, \"whea\": 95, \"whee\": 408, \"whel\": 26, \"when\": 325, \"wher\": 4044, \"whet\": 895, \"wheu\": 4, \"whi\": 16640, \"whic\": 12354, \"whif\": 8, \"whig\": 6, \"whil\": 2033, \"whim\": 20, \"whin\": 20, \"whip\": 62, \"whir\": 125, \"whis\": 397, \"whit\": 1512, \"whiz\": 9, \"who\": 4578, \"whod\": 1, \"whoe\": 82, \"whol\": 1437, \"whom\": 22, \"whoo\": 8, \"whop\": 2, \"whor\": 105, \"whos\": 1317, \"why\": 3, \"whyf\": 1, \"whyn\": 1, \"wic\": 630, \"wich\": 1, \"wick\": 624, \"wid\": 673, \"widd\": 2, \"wide\": 166, \"wido\": 168, \"wids\": 1, \"widt\": 26, \"wie\": 23, \"wied\": 1, \"wiel\": 20, \"wien\": 2, \"wif\": 1000, \"wife\": 17, \"wig\": 28, \"wigg\": 6, \"wigh\": 9, \"wigl\": 1, \"wigm\": 2, \"wigw\": 10, \"wil\": 11764, \"wilb\": 2, \"wilc\": 3, \"wild\": 460, \"wile\": 18, \"wilf\": 23, \"wilh\": 7, \"wili\": 2, \"wilk\": 31, \"will\": 848, \"wilm\": 10, \"wils\": 81, \"wilt\": 3, \"wim\": 8, \"wimp\": 7, \"wims\": 1, \"win\": 2581, \"winc\": 29, \"wind\": 915, \"wine\": 55, \"winf\": 1, \"wing\": 231, \"wink\": 32, \"winl\": 2, \"winn\": 78, \"wino\": 5, \"wins\": 59, \"wint\": 252, \"wip\": 80, \"wipe\": 37, \"wipi\": 16, \"wir\": 91, \"wire\": 31, \"wiri\": 2, \"wis\": 1942, \"wisc\": 25, \"wisd\": 332, \"wise\": 103, \"wish\": 415, \"wism\": 15, \"wisp\": 5, \"wiss\": 1, \"wist\": 17, \"wit\": 28877, \"witc\": 49, \"with\": 3513, \"witl\": 2, \"witn\": 351, \"wito\": 1, \"witt\": 43, \"wiu\": 2, \"wiue\": 2, \"wiv\": 179, \"wive\": 179, \"wiz\": 14, \"wiza\": 14, \"wli\": 1, \"wob\": 10, \"wobb\": 9, \"wobu\": 1, \"woe\": 22, \"woeb\": 3, \"woef\": 8, \"wof\": 2, \"wofu\": 2, \"woh\": 4, \"woha\": 3, \"wok\": 30, \"woke\": 2, \"wol\": 89, \"wolc\": 2, \"wold\": 1, \"wolf\": 21, \"woll\": 5, \"wolp\": 2, \"wols\": 1, \"wolu\": 1, \"wolv\": 24, \"wom\": 1967, \"woma\": 1233, \"womb\": 6, \"wome\": 631, \"won\": 1097, \"wond\": 938, \"wonl\": 1, \"wonn\": 2, \"wons\": 1, \"wont\": 12, \"woo\": 1196, \"wood\": 694, \"wooe\": 3, \"wooi\": 2, \"wool\": 38, \"woom\": 1, \"woon\": 5, \"wooo\": 1, \"wop\": 1, \"wor\": 9833, \"wora\": 3, \"worc\": 7, \"word\": 1353, \"work\": 1330, \"worl\": 2205, \"worm\": 62, \"worn\": 5, \"worr\": 136, \"wors\": 766, \"wort\": 492, \"wot\": 1, \"wott\": 1, \"wou\": 7279, \"woul\": 7006, \"woun\": 273, \"wov\": 34, \"wove\": 27, \"woz\": 1, \"wozz\": 1, \"wra\": 392, \"wrac\": 10, \"wrag\": 1, \"wrai\": 2, \"wran\": 11, \"wrap\": 93, \"wrat\": 261, \"wre\": 304, \"wrea\": 47, \"wrec\": 78, \"wreg\": 5, \"wren\": 24, \"wres\": 38, \"wret\": 107, \"wri\": 1596, \"wrig\": 74, \"wrin\": 88, \"wris\": 49, \"writ\": 1365, \"wro\": 959, \"wron\": 443, \"wrot\": 376, \"wrou\": 140, \"wru\": 9, \"wrun\": 9, \"wry\": 4, \"wryf\": 1, \"wryl\": 3, \"wus\": 1, \"wust\": 1, \"wwr\": 2, \"wwrl\": 1, \"wya\": 3, \"wyat\": 3, \"wyc\": 5, \"wyck\": 1, \"wycl\": 1, \"wyco\": 3, \"wyl\": 2, \"wyli\": 2, \"wym\": 1, \"wyma\": 1, \"wyn\": 5, \"wynd\": 2, \"wynn\": 1, \"wyns\": 1, \"wyo\": 10, \"wyom\": 10, \"xav\": 2, \"xavi\": 2, \"xen\": 4, \"xeni\": 1, \"xeno\": 3, \"xer\": 3, \"xerx\": 3, \"xgy\": 2, \"xgyr\": 2, \"xii\": 7, \"xim\": 1, \"xime\": 1, \"xra\": 20, \"xray\": 6, \"xre\": 7, \"xreg\": 4, \"xrel\": 2, \"xres\": 1, \"xtr\": 1, \"xtru\": 1, \"xvi\": 12, \"xvii\": 6, \"xxi\": 6, \"xxii\": 2, \"xxv\": 3, \"xxvi\": 2, \"xxx\": 6, \"xxxi\": 4, \"xyd\": 6, \"xydi\": 6, \"xyl\": 5, \"xyle\": 4, \"xylo\": 1, \"yac\": 34, \"yach\": 34, \"yad\": 1, \"yadd\": 1, \"yah\": 1, \"yahw\": 1, \"yak\": 3, \"yaki\": 1, \"yako\": 1, \"yal\": 37, \"yala\": 1, \"yale\": 2, \"yali\": 1, \"yalt\": 14, \"yam\": 2, \"yama\": 2, \"yan\": 106, \"yanc\": 2, \"yand\": 2, \"yank\": 83, \"yap\": 1, \"yapp\": 1, \"yaq\": 1, \"yaqu\": 1, \"yar\": 308, \"yard\": 145, \"yarm\": 10, \"yarn\": 11, \"yarr\": 4, \"yas\": 2, \"yase\": 1, \"yass\": 1, \"yau\": 1, \"yaug\": 1, \"yaw\": 39, \"yawe\": 1, \"yawi\": 2, \"yawn\": 26, \"yce\": 4, \"ycel\": 4, \"yea\": 3498, \"yean\": 1, \"year\": 2163, \"yeas\": 7, \"yeat\": 1, \"yed\": 1, \"yedi\": 1, \"yee\": 16, \"yeel\": 11, \"yeer\": 2, \"yeh\": 3, \"yehh\": 2, \"yehu\": 1, \"yel\": 296, \"yell\": 267, \"yelp\": 3, \"yen\": 1, \"yeo\": 4, \"yeom\": 4, \"yer\": 2, \"yes\": 225, \"yesi\": 1, \"yest\": 224, \"ygg\": 1, \"yggl\": 1, \"ygy\": 1, \"ygyr\": 1, \"yia\": 1, \"yian\": 1, \"yid\": 4, \"yidd\": 4, \"yie\": 215, \"yiel\": 215, \"yin\": 3, \"ying\": 1, \"yiny\": 2, \"ykn\": 2, \"ykno\": 2, \"ymc\": 2, \"ymh\": 1, \"ync\": 1, \"yoa\": 4, \"yoak\": 4, \"yod\": 3, \"yode\": 3, \"yog\": 3, \"yoj\": 17, \"yok\": 100, \"yoke\": 15, \"yoki\": 1, \"yokn\": 2, \"yoko\": 3, \"yoku\": 7, \"yol\": 1, \"yon\": 85, \"yond\": 52, \"yone\": 2, \"yong\": 2, \"yonk\": 1, \"yonn\": 4, \"yoo\": 2, \"yooe\": 1, \"yoor\": 1, \"yor\": 352, \"yori\": 2, \"york\": 34, \"yos\": 7, \"yose\": 6, \"yosh\": 1, \"you\": 9271, \"youl\": 90, \"youn\": 1753, \"your\": 927, \"yout\": 346, \"youu\": 1, \"youv\": 67, \"yre\": 3, \"yreg\": 3, \"yte\": 1, \"ytee\": 1, \"yub\": 2, \"yuc\": 2, \"yuca\": 1, \"yucc\": 1, \"yug\": 12, \"yugo\": 12, \"yuj\": 1, \"yujo\": 1, \"yuk\": 1, \"yum\": 1, \"yumy\": 1, \"yur\": 7, \"yuro\": 1, \"yus\": 1, \"yve\": 2, \"yvet\": 2, \"ywc\": 1, \"yyy\": 1, \"zaa\": 4, \"zaan\": 3, \"zaav\": 1, \"zab\": 24, \"zaba\": 8, \"zabb\": 3, \"zabd\": 8, \"zabe\": 1, \"zabu\": 4, \"zac\": 32, \"zacc\": 14, \"zach\": 18, \"zad\": 54, \"zade\": 1, \"zado\": 53, \"zah\": 1, \"zaha\": 1, \"zai\": 1, \"zal\": 18, \"zala\": 2, \"zalm\": 16, \"zam\": 2, \"zami\": 1, \"zamz\": 1, \"zan\": 6, \"zano\": 5, \"zanz\": 1, \"zap\": 4, \"zapa\": 1, \"zaph\": 2, \"zapo\": 1, \"zar\": 22, \"zara\": 2, \"zare\": 8, \"zarh\": 6, \"zaro\": 1, \"zart\": 2, \"zat\": 4, \"zatt\": 4, \"zav\": 1, \"zava\": 1, \"zax\": 1, \"zaxi\": 1, \"zaz\": 1, \"zea\": 89, \"zeal\": 38, \"zeb\": 100, \"zeba\": 23, \"zebe\": 13, \"zebi\": 1, \"zebo\": 7, \"zebr\": 1, \"zebu\": 55, \"zec\": 40, \"zech\": 40, \"zed\": 64, \"zeda\": 2, \"zede\": 62, \"zee\": 6, \"zef\": 1, \"zeff\": 1, \"zei\": 4, \"zeis\": 3, \"zeit\": 1, \"zel\": 33, \"zela\": 2, \"zele\": 2, \"zeli\": 15, \"zelo\": 13, \"zelz\": 1, \"zem\": 9, \"zema\": 4, \"zemi\": 1, \"zeml\": 3, \"zen\": 14, \"zena\": 2, \"zend\": 3, \"zeni\": 8, \"zenn\": 1, \"zep\": 26, \"zeph\": 26, \"zer\": 115, \"zera\": 24, \"zere\": 11, \"zero\": 6, \"zeru\": 49, \"zes\": 5, \"zet\": 4, \"zeth\": 4, \"zeu\": 2, \"zeug\": 1, \"zgy\": 1, \"zgyr\": 1, \"zhi\": 2, \"zhit\": 2, \"zho\": 1, \"zib\": 27, \"zibe\": 8, \"zibi\": 3, \"zic\": 12, \"zich\": 12, \"zid\": 33, \"zidd\": 1, \"zidk\": 1, \"zido\": 31, \"zie\": 2, \"zieg\": 2, \"zif\": 2, \"ziff\": 2, \"zig\": 7, \"zigg\": 1, \"zigz\": 6, \"zih\": 3, \"zik\": 15, \"zikl\": 15, \"zil\": 12, \"zill\": 3, \"zilp\": 7, \"zilt\": 2, \"zim\": 24, \"zimi\": 1, \"zimm\": 6, \"zimr\": 17, \"zin\": 22, \"zing\": 1, \"zinm\": 1, \"zio\": 162, \"zion\": 2, \"zip\": 27, \"ziph\": 5, \"zipp\": 12, \"zir\": 1, \"zira\": 1, \"zit\": 1, \"zith\": 1, \"ziz\": 3, \"ziza\": 1, \"zlo\": 1, \"zlot\": 1, \"zmi\": 1, \"zmit\": 1, \"zoa\": 17, \"zob\": 14, \"zoba\": 11, \"zobe\": 1, \"zod\": 10, \"zodi\": 10, \"zoe\": 2, \"zoet\": 2, \"zog\": 1, \"zogr\": 1, \"zoh\": 6, \"zoha\": 4, \"zohe\": 2, \"zol\": 1, \"zola\": 1, \"zom\": 2, \"zomb\": 2, \"zon\": 41, \"zone\": 14, \"zoni\": 6, \"zoo\": 14, \"zooe\": 1, \"zook\": 1, \"zool\": 8, \"zoom\": 3, \"zooo\": 1, \"zop\": 9, \"zoph\": 9, \"zor\": 16, \"zora\": 9, \"zore\": 1, \"zori\": 1, \"zoro\": 4, \"zorr\": 1, \"zou\": 2, \"zoun\": 2, \"zua\": 5, \"zub\": 2, \"zubk\": 2, \"zum\": 1, \"zump\": 1, \"zup\": 3, \"zur\": 10, \"zurc\": 2, \"zuri\": 8, \"zuy\": 1, \"zuyd\": 1, \"zuz\": 1, \"zuzi\": 1, \"zwe\": 1, \"zwo\": 2, \"zwor\": 2}, {}]\n ''')", "title": "" }, { "docid": "6d95d6ce3d503694e651e9003d662c77", "score": "0.5078795", "text": "def generate_preamble() -> str:\n return '''// check for specific status code\nassert.equal(response.status, 200, \"status was 200 OK\");\n\n// parse JSON response body into object\nvar data = JSON.parse(response.body);\nvar fees = data.fees;\nvar updated_fees = 0;\nfor (var i in fees) {\nvar fee = fees[i]\n'''", "title": "" }, { "docid": "f343f88f2cd6cf7ffaa14fe082d3dddb", "score": "0.5063138", "text": "def js_string(data):\n if not isinstance(data, basestring):\n return data\n data = data.replace('\\\\', '\\\\\\\\')\n data = data.replace('\\r', '\\\\r')\n data = data.replace('\\n', '\\\\n')\n data = data.replace('\\b', '\\\\b')\n data = data.replace('\"', '\\\\\"')\n data = data.replace(\"'\", \"\\\\'\")\n data = data.replace('<', '\\\\u003c')\n data = data.replace('>', '\\\\u003e')\n data = data.replace('&', '\\\\u0026')\n return jinja2.utils.Markup(data)", "title": "" }, { "docid": "ac97cac28f042dc06689335d5add1101", "score": "0.505243", "text": "def sample_javascript(self):\n\n return '''\nvar foo = function(str) {\n alert(str);\n return true;\n};\n\nfoo(\"Hello CoffeeScript!\");\n'''", "title": "" }, { "docid": "4a480402f6421e83fb5d2d7907d0597c", "score": "0.5051773", "text": "def _javastring():\n js = textwrap.dedent(\n \"\"\"\n <SCRIPT LANGUAGE=\"JavaScript\">\n var months = new Array(13);\n months[1] = \"January\";\n months[2] = \"February\";\n months[3] = \"March\";\n months[4] = \"April\";\n months[5] = \"May\";\n months[6] = \"June\";\n months[7] = \"July\";\n months[8] = \"August\";\n months[9] = \"September\";\n months[10] = \"October\";\n months[11] = \"November\";\n months[12] = \"December\";\n var dateObj = new Date(document.lastModified)\n var lmonth = months[dateObj.getMonth() + 1]\n var date = dateObj.getDate()\n var fyear = dateObj.getYear()\n if (fyear < 2000)\n fyear = fyear + 1900\n if (date == 1 || date == 21 || date == 31)\n document.write(\" \" + lmonth + \" \" + date + \"st, \" + fyear)\n else if (date == 2 || date == 22)\n document.write(\" \" + lmonth + \" \" + date + \"nd, \" + fyear)\n else if (date == 3 || date == 23)\n document.write(\" \" + lmonth + \" \" + date + \"rd, \" + fyear)\n else\n document.write(\" \" + lmonth + \" \" + date + \"th, \" + fyear)\n </SCRIPT>\n \"\"\"\n )\n return js", "title": "" }, { "docid": "266e5742034e8e074ce67a9bbdfefd45", "score": "0.50199956", "text": "def dump_js(obj: dict) -> str:\n\n return json.dumps(obj, cls=JsEncoder).replace('\"<--', '').replace('-->\"', '')", "title": "" }, { "docid": "a01ef186e743ee52aaa29bc40ba0ab04", "score": "0.49889234", "text": "def sample_coffee(self):\n\n return '''\nfoo = (str) ->\n alert str\n true\n\nfoo \"Hello CoffeeScript!\"\n'''", "title": "" }, { "docid": "2e4f9d2d00c228c9c913f4c7b1f4dfa0", "score": "0.49821723", "text": "def hello():\r\n return '<html>\\\r\n <head><title>Title of the document</title></head>\\\r\n <body>\\\r\n <form action=\"/submit\" method=\"POST\">\\\r\n <input type=\"submit\" value=\"submit\"/>\\\r\n <br/>\\\r\n <textarea rows=\"5\" cols=\"100\" name=\"text\" placeholder=\"Paste Zerodha JSON\"></textarea>\\\r\n </form>\\\r\n </body>\\\r\n </html>'", "title": "" }, { "docid": "9719acf4bf3ffc9c25d6e9110720810c", "score": "0.49438962", "text": "def c2js(plural):\n\n if len(plural) > 1000:\n raise ValueError('plural form expression is too long')\n\n return \"function(n) { return (\" + plural + \"); }\"", "title": "" }, { "docid": "b115513c1afdd2520365976210c05381", "score": "0.49367595", "text": "def customization_data(client=None):\n\n yield ImportDefinition(u\"\"\"\neyJhY3Rpb25fb3JkZXIiOiBbXSwgImFjdGlvbnMiOiBbeyJhdXRvbWF0aW9ucyI6IFtdLCAiY29u\nZGl0aW9ucyI6IFt7ImV2YWx1YXRpb25faWQiOiBudWxsLCAiZmllbGRfbmFtZSI6ICJhcnRpZmFj\ndC50eXBlIiwgIm1ldGhvZCI6ICJlcXVhbHMiLCAidHlwZSI6IG51bGwsICJ2YWx1ZSI6ICJJUCBB\nZGRyZXNzIn1dLCAiZW5hYmxlZCI6IHRydWUsICJleHBvcnRfa2V5IjogIkRlbGV0ZSBmcm9tIFFS\nYWRhciBSZWZlcmVuY2UgU2V0IiwgImlkIjogMTQxLCAibG9naWNfdHlwZSI6ICJhbGwiLCAibWVz\nc2FnZV9kZXN0aW5hdGlvbnMiOiBbXSwgIm5hbWUiOiAiRGVsZXRlIGZyb20gUVJhZGFyIFJlZmVy\nZW5jZSBTZXQiLCAib2JqZWN0X3R5cGUiOiAiYXJ0aWZhY3QiLCAidGFncyI6IFtdLCAidGltZW91\ndF9zZWNvbmRzIjogODY0MDAsICJ0eXBlIjogMSwgInV1aWQiOiAiMjg4ZmM2MGMtMmQ0Mi00ODI5\nLWE5ZTYtNGE1MDNjNjkwNmU4IiwgInZpZXdfaXRlbXMiOiBbXSwgIndvcmtmbG93cyI6IFsicXJh\nZGFyX2RlbGV0ZV9yZWZlcmVuY2Vfc2V0X2l0ZW0iXX0sIHsiYXV0b21hdGlvbnMiOiBbXSwgImNv\nbmRpdGlvbnMiOiBbXSwgImVuYWJsZWQiOiB0cnVlLCAiZXhwb3J0X2tleSI6ICJGaW5kIEFsbCBR\nUmFkYXIgUmVmZXJlbmNlIFNldHMiLCAiaWQiOiAxNDIsICJsb2dpY190eXBlIjogImFsbCIsICJt\nZXNzYWdlX2Rlc3RpbmF0aW9ucyI6IFtdLCAibmFtZSI6ICJGaW5kIEFsbCBRUmFkYXIgUmVmZXJl\nbmNlIFNldHMiLCAib2JqZWN0X3R5cGUiOiAiYXJ0aWZhY3QiLCAidGFncyI6IFtdLCAidGltZW91\ndF9zZWNvbmRzIjogODY0MDAsICJ0eXBlIjogMSwgInV1aWQiOiAiNmIxYTM3ZTAtOGRiYy00YTdi\nLTgxNDMtZGEwOTVhMTNiMWVlIiwgInZpZXdfaXRlbXMiOiBbXSwgIndvcmtmbG93cyI6IFsicXJh\nZGFyX2ZpbmRfcmVmZXJlbmNlX3NldHNfYXJ0aWZhY3QiXX0sIHsiYXV0b21hdGlvbnMiOiBbXSwg\nImNvbmRpdGlvbnMiOiBbXSwgImVuYWJsZWQiOiB0cnVlLCAiZXhwb3J0X2tleSI6ICJGaW5kIGlu\nIFFSYWRhciBSZWZlcmVuY2UgU2V0IiwgImlkIjogMTQzLCAibG9naWNfdHlwZSI6ICJhbGwiLCAi\nbWVzc2FnZV9kZXN0aW5hdGlvbnMiOiBbXSwgIm5hbWUiOiAiRmluZCBpbiBRUmFkYXIgUmVmZXJl\nbmNlIFNldCIsICJvYmplY3RfdHlwZSI6ICJhcnRpZmFjdCIsICJ0YWdzIjogW10sICJ0aW1lb3V0\nX3NlY29uZHMiOiA4NjQwMCwgInR5cGUiOiAxLCAidXVpZCI6ICIyYzFjOWZlMy1kMGEzLTRjNDkt\nYjBiMC01NGEyMWQ4NmYwOWUiLCAidmlld19pdGVtcyI6IFtdLCAid29ya2Zsb3dzIjogWyJxcmFk\nYXJfZmluZF9yZWZlcmVuY2Vfc2V0X2l0ZW0iXX0sIHsiYXV0b21hdGlvbnMiOiBbXSwgImNvbmRp\ndGlvbnMiOiBbeyJldmFsdWF0aW9uX2lkIjogbnVsbCwgImZpZWxkX25hbWUiOiAiYXJ0aWZhY3Qu\ndHlwZSIsICJtZXRob2QiOiAiZXF1YWxzIiwgInR5cGUiOiBudWxsLCAidmFsdWUiOiAiSVAgQWRk\ncmVzcyJ9XSwgImVuYWJsZWQiOiB0cnVlLCAiZXhwb3J0X2tleSI6ICJRUmFkYXIgQWRkIHRvIFJl\nZmVyZW5jZSBTZXQiLCAiaWQiOiAxNDQsICJsb2dpY190eXBlIjogImFsbCIsICJtZXNzYWdlX2Rl\nc3RpbmF0aW9ucyI6IFtdLCAibmFtZSI6ICJRUmFkYXIgQWRkIHRvIFJlZmVyZW5jZSBTZXQiLCAi\nb2JqZWN0X3R5cGUiOiAiYXJ0aWZhY3QiLCAidGFncyI6IFtdLCAidGltZW91dF9zZWNvbmRzIjog\nODY0MDAsICJ0eXBlIjogMSwgInV1aWQiOiAiMmI4NTJiMTgtNjE2ZC00YTZmLTk2NDgtOWIwNGVl\nZTZkZjY4IiwgInZpZXdfaXRlbXMiOiBbXSwgIndvcmtmbG93cyI6IFsicXJhZGFyX2FkZF9yZWZl\ncmVuY2Vfc2V0X2l0ZW0iXX0sIHsiYXV0b21hdGlvbnMiOiBbXSwgImNvbmRpdGlvbnMiOiBbeyJl\ndmFsdWF0aW9uX2lkIjogbnVsbCwgImZpZWxkX25hbWUiOiAiYXJ0aWZhY3QudHlwZSIsICJtZXRo\nb2QiOiAiZXF1YWxzIiwgInR5cGUiOiBudWxsLCAidmFsdWUiOiAiSVAgQWRkcmVzcyJ9XSwgImVu\nYWJsZWQiOiB0cnVlLCAiZXhwb3J0X2tleSI6ICJRUmFkYXIgTW92ZSBmcm9tIFNhbXBsZSBCbG9j\na2VkIHRvIFNhbXBsZSBTdXNwZWN0ZWQiLCAiaWQiOiAxNDUsICJsb2dpY190eXBlIjogImFsbCIs\nICJtZXNzYWdlX2Rlc3RpbmF0aW9ucyI6IFtdLCAibmFtZSI6ICJRUmFkYXIgTW92ZSBmcm9tIFNh\nbXBsZSBCbG9ja2VkIHRvIFNhbXBsZSBTdXNwZWN0ZWQiLCAib2JqZWN0X3R5cGUiOiAiYXJ0aWZh\nY3QiLCAidGFncyI6IFtdLCAidGltZW91dF9zZWNvbmRzIjogODY0MDAsICJ0eXBlIjogMSwgInV1\naWQiOiAiOTJmNjJlNDgtYWE3My00OTIyLTk0M2ItMTg4NjYzOTFkYzRjIiwgInZpZXdfaXRlbXMi\nOiBbXSwgIndvcmtmbG93cyI6IFsicXJhZGFyX21vdmVfaXRlbV90b19kaWZmZXJlbnRfcmVmX3Nl\ndCJdfSwgeyJhdXRvbWF0aW9ucyI6IFtdLCAiY29uZGl0aW9ucyI6IFt7ImV2YWx1YXRpb25faWQi\nOiBudWxsLCAiZmllbGRfbmFtZSI6ICJpbmNpZGVudC5wcm9wZXJ0aWVzLnFyYWRhcl9pZCIsICJt\nZXRob2QiOiAiaGFzX2FfdmFsdWUiLCAidHlwZSI6IG51bGwsICJ2YWx1ZSI6IG51bGx9XSwgImVu\nYWJsZWQiOiB0cnVlLCAiZXhwb3J0X2tleSI6ICJTZWFyY2ggUVJhZGFyIGZvciBvZmZlbnNlIGlk\nIiwgImlkIjogMTQ2LCAibG9naWNfdHlwZSI6ICJhbGwiLCAibWVzc2FnZV9kZXN0aW5hdGlvbnMi\nOiBbXSwgIm5hbWUiOiAiU2VhcmNoIFFSYWRhciBmb3Igb2ZmZW5zZSBpZCIsICJvYmplY3RfdHlw\nZSI6ICJpbmNpZGVudCIsICJ0YWdzIjogW10sICJ0aW1lb3V0X3NlY29uZHMiOiA4NjQwMCwgInR5\ncGUiOiAxLCAidXVpZCI6ICJjODJkYjI0ZS04ZmVkLTRlNzMtOWI3Mi0zMDRjYzEzMDgxNDgiLCAi\ndmlld19pdGVtcyI6IFt7ImNvbnRlbnQiOiAiZDQ2ZGM2ODMtMmQxZS00MjY5LWJlMDctMzE3YmE2\nZDUzOThmIiwgImVsZW1lbnQiOiAiZmllbGRfdXVpZCIsICJmaWVsZF90eXBlIjogImFjdGlvbmlu\ndm9jYXRpb24iLCAic2hvd19pZiI6IG51bGwsICJzaG93X2xpbmtfaGVhZGVyIjogZmFsc2UsICJz\ndGVwX2xhYmVsIjogbnVsbH1dLCAid29ya2Zsb3dzIjogWyJxcmFkYXJfc2VhcmNoX2V2ZW50X29m\nZmVuc2UiXX1dLCAiYXV0b21hdGljX3Rhc2tzIjogW10sICJleHBvcnRfZGF0ZSI6IDE1OTUzODU0\nNzQ3MjEsICJleHBvcnRfZm9ybWF0X3ZlcnNpb24iOiAyLCAiZmllbGRzIjogW3siYWxsb3dfZGVm\nYXVsdF92YWx1ZSI6IGZhbHNlLCAiYmxhbmtfb3B0aW9uIjogZmFsc2UsICJjYWxjdWxhdGVkIjog\nZmFsc2UsICJjaGFuZ2VhYmxlIjogdHJ1ZSwgImNob3NlbiI6IGZhbHNlLCAiZGVmYXVsdF9jaG9z\nZW5fYnlfc2VydmVyIjogZmFsc2UsICJkZXByZWNhdGVkIjogZmFsc2UsICJleHBvcnRfa2V5Ijog\nIl9fZnVuY3Rpb24vcXJhZGFyX3F1ZXJ5X3BhcmFtMiIsICJoaWRlX25vdGlmaWNhdGlvbiI6IGZh\nbHNlLCAiaWQiOiAxMDA4LCAiaW5wdXRfdHlwZSI6ICJ0ZXh0IiwgImludGVybmFsIjogZmFsc2Us\nICJpc190cmFja2VkIjogZmFsc2UsICJuYW1lIjogInFyYWRhcl9xdWVyeV9wYXJhbTIiLCAib3Bl\ncmF0aW9uX3Blcm1zIjoge30sICJvcGVyYXRpb25zIjogW10sICJwbGFjZWhvbGRlciI6ICIiLCAi\ncHJlZml4IjogbnVsbCwgInJlYWRfb25seSI6IGZhbHNlLCAicmljaF90ZXh0IjogZmFsc2UsICJ0\nYWdzIjogW10sICJ0ZW1wbGF0ZXMiOiBbXSwgInRleHQiOiAicXJhZGFyX3F1ZXJ5X3BhcmFtMiIs\nICJ0b29sdGlwIjogIiIsICJ0eXBlX2lkIjogMTEsICJ1dWlkIjogIjlkOGZjNjQ1LTUwNjctNGFi\nMC1iMTUxLTFkMTA2ZGFkOTFkNiIsICJ2YWx1ZXMiOiBbXX0sIHsiYWxsb3dfZGVmYXVsdF92YWx1\nZSI6IGZhbHNlLCAiYmxhbmtfb3B0aW9uIjogZmFsc2UsICJjYWxjdWxhdGVkIjogZmFsc2UsICJj\naGFuZ2VhYmxlIjogdHJ1ZSwgImNob3NlbiI6IGZhbHNlLCAiZGVmYXVsdF9jaG9zZW5fYnlfc2Vy\ndmVyIjogZmFsc2UsICJkZXByZWNhdGVkIjogZmFsc2UsICJleHBvcnRfa2V5IjogIl9fZnVuY3Rp\nb24vcXJhZGFyX3F1ZXJ5X3BhcmFtMyIsICJoaWRlX25vdGlmaWNhdGlvbiI6IGZhbHNlLCAiaWQi\nOiAxMDE0LCAiaW5wdXRfdHlwZSI6ICJ0ZXh0IiwgImludGVybmFsIjogZmFsc2UsICJpc190cmFj\na2VkIjogZmFsc2UsICJuYW1lIjogInFyYWRhcl9xdWVyeV9wYXJhbTMiLCAib3BlcmF0aW9uX3Bl\ncm1zIjoge30sICJvcGVyYXRpb25zIjogW10sICJwbGFjZWhvbGRlciI6ICIiLCAicHJlZml4Ijog\nbnVsbCwgInJlYWRfb25seSI6IGZhbHNlLCAicmljaF90ZXh0IjogZmFsc2UsICJ0YWdzIjogW10s\nICJ0ZW1wbGF0ZXMiOiBbXSwgInRleHQiOiAicXJhZGFyX3F1ZXJ5X3BhcmFtMyIsICJ0b29sdGlw\nIjogIiIsICJ0eXBlX2lkIjogMTEsICJ1dWlkIjogImI2MDUzNjkwLWQ3YjItNDIwNC1iNDhhLWY4\nNjUwM2I0MTlkOSIsICJ2YWx1ZXMiOiBbXX0sIHsiYWxsb3dfZGVmYXVsdF92YWx1ZSI6IGZhbHNl\nLCAiYmxhbmtfb3B0aW9uIjogZmFsc2UsICJjYWxjdWxhdGVkIjogZmFsc2UsICJjaGFuZ2VhYmxl\nIjogdHJ1ZSwgImNob3NlbiI6IGZhbHNlLCAiZGVmYXVsdF9jaG9zZW5fYnlfc2VydmVyIjogZmFs\nc2UsICJkZXByZWNhdGVkIjogZmFsc2UsICJleHBvcnRfa2V5IjogIl9fZnVuY3Rpb24vcXJhZGFy\nX3F1ZXJ5X3JhbmdlX2VuZCIsICJoaWRlX25vdGlmaWNhdGlvbiI6IGZhbHNlLCAiaWQiOiAxMDA1\nLCAiaW5wdXRfdHlwZSI6ICJudW1iZXIiLCAiaW50ZXJuYWwiOiBmYWxzZSwgImlzX3RyYWNrZWQi\nOiBmYWxzZSwgIm5hbWUiOiAicXJhZGFyX3F1ZXJ5X3JhbmdlX2VuZCIsICJvcGVyYXRpb25fcGVy\nbXMiOiB7fSwgIm9wZXJhdGlvbnMiOiBbXSwgInBsYWNlaG9sZGVyIjogIiIsICJwcmVmaXgiOiBu\ndWxsLCAicmVhZF9vbmx5IjogZmFsc2UsICJyaWNoX3RleHQiOiBmYWxzZSwgInRhZ3MiOiBbXSwg\nInRlbXBsYXRlcyI6IFtdLCAidGV4dCI6ICJxcmFkYXJfcXVlcnlfcmFuZ2VfZW5kIiwgInRvb2x0\naXAiOiAiIiwgInR5cGVfaWQiOiAxMSwgInV1aWQiOiAiZDIxZjI4MTQtNDBlNi00ZjdhLWIyNjkt\nNmZmMmM3YTMxOTZlIiwgInZhbHVlcyI6IFtdfSwgeyJhbGxvd19kZWZhdWx0X3ZhbHVlIjogZmFs\nc2UsICJibGFua19vcHRpb24iOiBmYWxzZSwgImNhbGN1bGF0ZWQiOiBmYWxzZSwgImNoYW5nZWFi\nbGUiOiB0cnVlLCAiY2hvc2VuIjogZmFsc2UsICJkZWZhdWx0X2Nob3Nlbl9ieV9zZXJ2ZXIiOiBm\nYWxzZSwgImRlcHJlY2F0ZWQiOiBmYWxzZSwgImV4cG9ydF9rZXkiOiAiX19mdW5jdGlvbi9xcmFk\nYXJfcXVlcnlfcmFuZ2Vfc3RhcnQiLCAiaGlkZV9ub3RpZmljYXRpb24iOiBmYWxzZSwgImlkIjog\nMTAwNywgImlucHV0X3R5cGUiOiAibnVtYmVyIiwgImludGVybmFsIjogZmFsc2UsICJpc190cmFj\na2VkIjogZmFsc2UsICJuYW1lIjogInFyYWRhcl9xdWVyeV9yYW5nZV9zdGFydCIsICJvcGVyYXRp\nb25fcGVybXMiOiB7fSwgIm9wZXJhdGlvbnMiOiBbXSwgInBsYWNlaG9sZGVyIjogIiIsICJwcmVm\naXgiOiBudWxsLCAicmVhZF9vbmx5IjogZmFsc2UsICJyaWNoX3RleHQiOiBmYWxzZSwgInRhZ3Mi\nOiBbXSwgInRlbXBsYXRlcyI6IFtdLCAidGV4dCI6ICJxcmFkYXJfcXVlcnlfcmFuZ2Vfc3RhcnQi\nLCAidG9vbHRpcCI6ICIiLCAidHlwZV9pZCI6IDExLCAidXVpZCI6ICJjMzNmYmUxZC0xMjVjLTRh\nNzktODJlOC02NjA4ZDFjN2JiNWUiLCAidmFsdWVzIjogW119LCB7ImFsbG93X2RlZmF1bHRfdmFs\ndWUiOiBmYWxzZSwgImJsYW5rX29wdGlvbiI6IGZhbHNlLCAiY2FsY3VsYXRlZCI6IGZhbHNlLCAi\nY2hhbmdlYWJsZSI6IHRydWUsICJjaG9zZW4iOiBmYWxzZSwgImRlZmF1bHRfY2hvc2VuX2J5X3Nl\ncnZlciI6IGZhbHNlLCAiZGVwcmVjYXRlZCI6IGZhbHNlLCAiZXhwb3J0X2tleSI6ICJfX2Z1bmN0\naW9uL3FyYWRhcl9xdWVyeV9wYXJhbTQiLCAiaGlkZV9ub3RpZmljYXRpb24iOiBmYWxzZSwgImlk\nIjogMTAxNSwgImlucHV0X3R5cGUiOiAidGV4dCIsICJpbnRlcm5hbCI6IGZhbHNlLCAiaXNfdHJh\nY2tlZCI6IGZhbHNlLCAibmFtZSI6ICJxcmFkYXJfcXVlcnlfcGFyYW00IiwgIm9wZXJhdGlvbl9w\nZXJtcyI6IHt9LCAib3BlcmF0aW9ucyI6IFtdLCAicGxhY2Vob2xkZXIiOiAiIiwgInByZWZpeCI6\nIG51bGwsICJyZWFkX29ubHkiOiBmYWxzZSwgInJpY2hfdGV4dCI6IGZhbHNlLCAidGFncyI6IFtd\nLCAidGVtcGxhdGVzIjogW10sICJ0ZXh0IjogInFyYWRhcl9xdWVyeV9wYXJhbTQiLCAidG9vbHRp\ncCI6ICIiLCAidHlwZV9pZCI6IDExLCAidXVpZCI6ICJlZWZjOWU1Ni0yODA2LTQ1MzQtYjQwOS00\nMmUyNTE1NGVjYzkiLCAidmFsdWVzIjogW119LCB7ImFsbG93X2RlZmF1bHRfdmFsdWUiOiBmYWxz\nZSwgImJsYW5rX29wdGlvbiI6IGZhbHNlLCAiY2FsY3VsYXRlZCI6IGZhbHNlLCAiY2hhbmdlYWJs\nZSI6IHRydWUsICJjaG9zZW4iOiBmYWxzZSwgImRlZmF1bHRfY2hvc2VuX2J5X3NlcnZlciI6IGZh\nbHNlLCAiZGVwcmVjYXRlZCI6IGZhbHNlLCAiZXhwb3J0X2tleSI6ICJfX2Z1bmN0aW9uL3FyYWRh\ncl9xdWVyeV9hbGxfcmVzdWx0cyIsICJoaWRlX25vdGlmaWNhdGlvbiI6IGZhbHNlLCAiaWQiOiAx\nMDA5LCAiaW5wdXRfdHlwZSI6ICJzZWxlY3QiLCAiaW50ZXJuYWwiOiBmYWxzZSwgImlzX3RyYWNr\nZWQiOiBmYWxzZSwgIm5hbWUiOiAicXJhZGFyX3F1ZXJ5X2FsbF9yZXN1bHRzIiwgIm9wZXJhdGlv\nbl9wZXJtcyI6IHt9LCAib3BlcmF0aW9ucyI6IFtdLCAicGxhY2Vob2xkZXIiOiAiIiwgInByZWZp\neCI6IG51bGwsICJyZWFkX29ubHkiOiBmYWxzZSwgInJpY2hfdGV4dCI6IGZhbHNlLCAidGFncyI6\nIFtdLCAidGVtcGxhdGVzIjogW10sICJ0ZXh0IjogInFyYWRhcl9xdWVyeV9hbGxfcmVzdWx0cyIs\nICJ0b29sdGlwIjogIkRpc3BsYXkgYWxsIHJlc3VsdHMgZnJvbSBzZWFyY2guIEJ5IGRlZmF1bHQs\nIGEgcmFuZ2UgZm9yIHRoZSBudW1iZXIgb2YgcmV0dXJuZWQgcmVzdWx0cyBpcyBzZXQuIiwgInR5\ncGVfaWQiOiAxMSwgInV1aWQiOiAiZDdhNTQ0ZmYtNjg5Yi00ZjE1LWIzYzEtYTdlYmQyMGJiZjNi\nIiwgInZhbHVlcyI6IFt7ImRlZmF1bHQiOiBmYWxzZSwgImVuYWJsZWQiOiB0cnVlLCAiaGlkZGVu\nIjogZmFsc2UsICJsYWJlbCI6ICJZZXMiLCAicHJvcGVydGllcyI6IG51bGwsICJ1dWlkIjogImQ0\nODBlMzJjLWZkMGQtNGMwYy04NTBmLTM5MGJmM2E3ZGJlOCIsICJ2YWx1ZSI6IDUwNH0sIHsiZGVm\nYXVsdCI6IHRydWUsICJlbmFibGVkIjogdHJ1ZSwgImhpZGRlbiI6IGZhbHNlLCAibGFiZWwiOiAi\nTm8iLCAicHJvcGVydGllcyI6IG51bGwsICJ1dWlkIjogIjU0YzRlYjUyLWQ5NTUtNGUwNS05Zjc2\nLWMzODE5ODUzZmY2OCIsICJ2YWx1ZSI6IDUwNX1dfSwgeyJhbGxvd19kZWZhdWx0X3ZhbHVlIjog\nZmFsc2UsICJibGFua19vcHRpb24iOiBmYWxzZSwgImNhbGN1bGF0ZWQiOiBmYWxzZSwgImNoYW5n\nZWFibGUiOiB0cnVlLCAiY2hvc2VuIjogZmFsc2UsICJkZWZhdWx0X2Nob3Nlbl9ieV9zZXJ2ZXIi\nOiBmYWxzZSwgImRlcHJlY2F0ZWQiOiBmYWxzZSwgImV4cG9ydF9rZXkiOiAiX19mdW5jdGlvbi9x\ncmFkYXJfcmVmZXJlbmNlX3NldF9uYW1lIiwgImhpZGVfbm90aWZpY2F0aW9uIjogZmFsc2UsICJp\nZCI6IDEwMTIsICJpbnB1dF90eXBlIjogInRleHQiLCAiaW50ZXJuYWwiOiBmYWxzZSwgImlzX3Ry\nYWNrZWQiOiBmYWxzZSwgIm5hbWUiOiAicXJhZGFyX3JlZmVyZW5jZV9zZXRfbmFtZSIsICJvcGVy\nYXRpb25fcGVybXMiOiB7fSwgIm9wZXJhdGlvbnMiOiBbXSwgInBsYWNlaG9sZGVyIjogIiIsICJw\ncmVmaXgiOiBudWxsLCAicmVhZF9vbmx5IjogZmFsc2UsICJyaWNoX3RleHQiOiBmYWxzZSwgInRh\nZ3MiOiBbXSwgInRlbXBsYXRlcyI6IFtdLCAidGV4dCI6ICJxcmFkYXJfcmVmZXJlbmNlX3NldF9u\nYW1lIiwgInRvb2x0aXAiOiAiTmFtZSBvZiBhIFFSYWRhciByZWZlcmVuY2Ugc2V0IiwgInR5cGVf\naWQiOiAxMSwgInV1aWQiOiAiYWE1ZTIxMWQtYjVlMC00Mjg5LTg4YmItNDc1OTVhZmFjMzg1Iiwg\nInZhbHVlcyI6IFtdfSwgeyJhbGxvd19kZWZhdWx0X3ZhbHVlIjogZmFsc2UsICJibGFua19vcHRp\nb24iOiBmYWxzZSwgImNhbGN1bGF0ZWQiOiBmYWxzZSwgImNoYW5nZWFibGUiOiB0cnVlLCAiY2hv\nc2VuIjogZmFsc2UsICJkZWZhdWx0X2Nob3Nlbl9ieV9zZXJ2ZXIiOiBmYWxzZSwgImRlcHJlY2F0\nZWQiOiBmYWxzZSwgImV4cG9ydF9rZXkiOiAiX19mdW5jdGlvbi9xcmFkYXJfcXVlcnlfcGFyYW0x\nIiwgImhpZGVfbm90aWZpY2F0aW9uIjogZmFsc2UsICJpZCI6IDEwMDYsICJpbnB1dF90eXBlIjog\nInRleHQiLCAiaW50ZXJuYWwiOiBmYWxzZSwgImlzX3RyYWNrZWQiOiBmYWxzZSwgIm5hbWUiOiAi\ncXJhZGFyX3F1ZXJ5X3BhcmFtMSIsICJvcGVyYXRpb25fcGVybXMiOiB7fSwgIm9wZXJhdGlvbnMi\nOiBbXSwgInBsYWNlaG9sZGVyIjogIiIsICJwcmVmaXgiOiBudWxsLCAicmVhZF9vbmx5IjogZmFs\nc2UsICJyaWNoX3RleHQiOiBmYWxzZSwgInRhZ3MiOiBbXSwgInRlbXBsYXRlcyI6IFtdLCAidGV4\ndCI6ICJxcmFkYXJfcXVlcnlfcGFyYW0xIiwgInRvb2x0aXAiOiAiIiwgInR5cGVfaWQiOiAxMSwg\nInV1aWQiOiAiMDFlZDQ2NTItZmU2MS00ZGJmLWJjODMtZjgxYzYyYzU1NjNjIiwgInZhbHVlcyI6\nIFtdfSwgeyJhbGxvd19kZWZhdWx0X3ZhbHVlIjogZmFsc2UsICJibGFua19vcHRpb24iOiBmYWxz\nZSwgImNhbGN1bGF0ZWQiOiBmYWxzZSwgImNoYW5nZWFibGUiOiB0cnVlLCAiY2hvc2VuIjogZmFs\nc2UsICJkZWZhdWx0X2Nob3Nlbl9ieV9zZXJ2ZXIiOiBmYWxzZSwgImRlcHJlY2F0ZWQiOiBmYWxz\nZSwgImV4cG9ydF9rZXkiOiAiX19mdW5jdGlvbi9xcmFkYXJfcmVmZXJlbmNlX3NldF9pdGVtX3Zh\nbHVlIiwgImhpZGVfbm90aWZpY2F0aW9uIjogZmFsc2UsICJpZCI6IDEwMTEsICJpbnB1dF90eXBl\nIjogInRleHQiLCAiaW50ZXJuYWwiOiBmYWxzZSwgImlzX3RyYWNrZWQiOiBmYWxzZSwgIm5hbWUi\nOiAicXJhZGFyX3JlZmVyZW5jZV9zZXRfaXRlbV92YWx1ZSIsICJvcGVyYXRpb25fcGVybXMiOiB7\nfSwgIm9wZXJhdGlvbnMiOiBbXSwgInBsYWNlaG9sZGVyIjogIiIsICJwcmVmaXgiOiBudWxsLCAi\ncmVhZF9vbmx5IjogZmFsc2UsICJyaWNoX3RleHQiOiBmYWxzZSwgInRhZ3MiOiBbXSwgInRlbXBs\nYXRlcyI6IFtdLCAidGV4dCI6ICJxcmFkYXJfcmVmZXJlbmNlX3NldF9pdGVtX3ZhbHVlIiwgInRv\nb2x0aXAiOiAiVmFsdWUgb2YgYSBRUmFkYXIgcmVmZXJlbmNlIHNldCBpdGVtIiwgInR5cGVfaWQi\nOiAxMSwgInV1aWQiOiAiZGI1YWYyZWUtY2IxYS00NmM3LTgyZmYtYzZmODhhNWFhN2U5IiwgInZh\nbHVlcyI6IFtdfSwgeyJhbGxvd19kZWZhdWx0X3ZhbHVlIjogZmFsc2UsICJibGFua19vcHRpb24i\nOiBmYWxzZSwgImNhbGN1bGF0ZWQiOiBmYWxzZSwgImNoYW5nZWFibGUiOiB0cnVlLCAiY2hvc2Vu\nIjogZmFsc2UsICJkZWZhdWx0X2Nob3Nlbl9ieV9zZXJ2ZXIiOiBmYWxzZSwgImRlcHJlY2F0ZWQi\nOiBmYWxzZSwgImV4cG9ydF9rZXkiOiAiX19mdW5jdGlvbi9xcmFkYXJfcXVlcnkiLCAiaGlkZV9u\nb3RpZmljYXRpb24iOiBmYWxzZSwgImlkIjogMTAxMCwgImlucHV0X3R5cGUiOiAidGV4dGFyZWEi\nLCAiaW50ZXJuYWwiOiBmYWxzZSwgImlzX3RyYWNrZWQiOiBmYWxzZSwgIm5hbWUiOiAicXJhZGFy\nX3F1ZXJ5IiwgIm9wZXJhdGlvbl9wZXJtcyI6IHt9LCAib3BlcmF0aW9ucyI6IFtdLCAicGxhY2Vo\nb2xkZXIiOiAiIiwgInByZWZpeCI6IG51bGwsICJyZWFkX29ubHkiOiBmYWxzZSwgInJpY2hfdGV4\ndCI6IGZhbHNlLCAidGFncyI6IFtdLCAidGVtcGxhdGVzIjogW3siaWQiOiA1LCAibmFtZSI6ICJz\nZWFyY2ggZXZlbnRzIGZvciB1c2VybmFtZSIsICJ0ZW1wbGF0ZSI6IHsiZm9ybWF0IjogInRleHQi\nLCAiY29udGVudCI6ICJTRUxFQ1QgJXBhcmFtMSUgRlJPTSBldmVudHMgV0hFUkUgdXNlcm5hbWU9\nJXBhcmFtMiUgTEFTVCAlcGFyYW0zJSBNSU5VVEVTIn0sICJ1dWlkIjogIjM0ZmEzMzAwLWMyOGMt\nNDM0Ni04ZDNiLTc0NWExYWZhNzVkYyJ9LCB7ImlkIjogNCwgIm5hbWUiOiAic2VhcmNoIGlwIGFk\nZHJlc3MiLCAidGVtcGxhdGUiOiB7ImZvcm1hdCI6ICJ0ZXh0IiwgImNvbnRlbnQiOiAiU0VMRUNU\nICVwYXJhbTElIEZST00gZXZlbnRzIFdIRVJFIHNvdXJjZWlwPSclcGFyYW0yJScgTEFTVCAlcGFy\nYW0zJSBNSU5VVEVTIn0sICJ1dWlkIjogImRjYmRiN2M4LTIwNjgtNGU5Ny04MTI3LTU3MTMxY2Jk\nY2NiZiJ9LCB7ImlkIjogNiwgIm5hbWUiOiAic2VhcmNoIGV2ZW50cyBmb3Igb2ZmZW5zZV9pZCIs\nICJ0ZW1wbGF0ZSI6IHsiZm9ybWF0IjogInRleHQiLCAiY29udGVudCI6ICJTRUxFQ1QgJXBhcmFt\nMSUgRlJPTSBldmVudHMgV0hFUkUgSU5PRkZFTlNFKCVwYXJhbTIlKSBMQVNUICVwYXJhbTMlIE1J\nTlVURVMifSwgInV1aWQiOiAiMmI4OGViNWMtNTA0YS00OTUwLTg2NzQtMGNmOGY1NmYxYTgzIn1d\nLCAidGV4dCI6ICJxcmFkYXJfcXVlcnkiLCAidG9vbHRpcCI6ICJBIHFyYWRhciBxdWVyeSBzdHJp\nbmcgd2l0aCBwYXJhbWV0ZXJzIiwgInR5cGVfaWQiOiAxMSwgInV1aWQiOiAiMDQ4YmEzOWEtYWI5\nNC00ZDFmLWEwZjgtMjQ2MmRlM2MwNDRjIiwgInZhbHVlcyI6IFtdfSwgeyJhbGxvd19kZWZhdWx0\nX3ZhbHVlIjogZmFsc2UsICJibGFua19vcHRpb24iOiBmYWxzZSwgImNhbGN1bGF0ZWQiOiBmYWxz\nZSwgImNoYW5nZWFibGUiOiB0cnVlLCAiY2hvc2VuIjogZmFsc2UsICJkZWZhdWx0X2Nob3Nlbl9i\neV9zZXJ2ZXIiOiBmYWxzZSwgImRlcHJlY2F0ZWQiOiBmYWxzZSwgImV4cG9ydF9rZXkiOiAiX19m\ndW5jdGlvbi9xcmFkYXJfcXVlcnlfcGFyYW01IiwgImhpZGVfbm90aWZpY2F0aW9uIjogZmFsc2Us\nICJpZCI6IDEwMTMsICJpbnB1dF90eXBlIjogInRleHQiLCAiaW50ZXJuYWwiOiBmYWxzZSwgImlz\nX3RyYWNrZWQiOiBmYWxzZSwgIm5hbWUiOiAicXJhZGFyX3F1ZXJ5X3BhcmFtNSIsICJvcGVyYXRp\nb25fcGVybXMiOiB7fSwgIm9wZXJhdGlvbnMiOiBbXSwgInBsYWNlaG9sZGVyIjogIiIsICJwcmVm\naXgiOiBudWxsLCAicmVhZF9vbmx5IjogZmFsc2UsICJyaWNoX3RleHQiOiBmYWxzZSwgInRhZ3Mi\nOiBbXSwgInRlbXBsYXRlcyI6IFtdLCAidGV4dCI6ICJxcmFkYXJfcXVlcnlfcGFyYW01IiwgInRv\nb2x0aXAiOiAiIiwgInR5cGVfaWQiOiAxMSwgInV1aWQiOiAiMTk3NzhmYzAtOWEzMC00YjhiLTlj\nMjEtNjQyNDk1ZjllZWJjIiwgInZhbHVlcyI6IFtdfSwgeyJhbGxvd19kZWZhdWx0X3ZhbHVlIjog\nZmFsc2UsICJibGFua19vcHRpb24iOiBmYWxzZSwgImNhbGN1bGF0ZWQiOiBmYWxzZSwgImNoYW5n\nZWFibGUiOiB0cnVlLCAiY2hvc2VuIjogZmFsc2UsICJkZWZhdWx0X2Nob3Nlbl9ieV9zZXJ2ZXIi\nOiBmYWxzZSwgImRlcHJlY2F0ZWQiOiBmYWxzZSwgImV4cG9ydF9rZXkiOiAiYWN0aW9uaW52b2Nh\ndGlvbi9xcmFkYXJfcXVlcnlfYWxsX3Jlc3VsdHMiLCAiaGlkZV9ub3RpZmljYXRpb24iOiBmYWxz\nZSwgImlkIjogMTAwNCwgImlucHV0X3R5cGUiOiAic2VsZWN0IiwgImludGVybmFsIjogZmFsc2Us\nICJpc190cmFja2VkIjogZmFsc2UsICJuYW1lIjogInFyYWRhcl9xdWVyeV9hbGxfcmVzdWx0cyIs\nICJvcGVyYXRpb25fcGVybXMiOiB7fSwgIm9wZXJhdGlvbnMiOiBbXSwgInBsYWNlaG9sZGVyIjog\nIiIsICJwcmVmaXgiOiAicHJvcGVydGllcyIsICJyZWFkX29ubHkiOiBmYWxzZSwgInJpY2hfdGV4\ndCI6IGZhbHNlLCAidGFncyI6IFtdLCAidGVtcGxhdGVzIjogW10sICJ0ZXh0IjogIlFSYWRhciBR\ndWVyeSBhbGwgUmVzdWx0cyIsICJ0b29sdGlwIjogIkRpc3BsYXkgYWxsIHJlc3VsdHMgZnJvbSBz\nZWFyY2guIEJ5IGRlZmF1bHQsIGEgcmFuZ2UgZm9yIHRoZSBudW1iZXIgb2YgcmV0dXJuZWQgcmVz\ndWx0cyBpcyBzZXQuIiwgInR5cGVfaWQiOiA2LCAidXVpZCI6ICJkNDZkYzY4My0yZDFlLTQyNjkt\nYmUwNy0zMTdiYTZkNTM5OGYiLCAidmFsdWVzIjogW3siZGVmYXVsdCI6IGZhbHNlLCAiZW5hYmxl\nZCI6IHRydWUsICJoaWRkZW4iOiBmYWxzZSwgImxhYmVsIjogIlllcyIsICJwcm9wZXJ0aWVzIjog\nbnVsbCwgInV1aWQiOiAiZjlhZDQyNDgtYmU4Zi00YmYxLThkZDMtYmY5NWExYjE5NGNhIiwgInZh\nbHVlIjogNTAyfSwgeyJkZWZhdWx0IjogdHJ1ZSwgImVuYWJsZWQiOiB0cnVlLCAiaGlkZGVuIjog\nZmFsc2UsICJsYWJlbCI6ICJObyIsICJwcm9wZXJ0aWVzIjogbnVsbCwgInV1aWQiOiAiYWJkYTEw\nMGItMzA1OC00NjgzLTgyZjgtMjNkNjVlMWFkZGM4IiwgInZhbHVlIjogNTAzfV19LCB7ImFsbG93\nX2RlZmF1bHRfdmFsdWUiOiBmYWxzZSwgImJsYW5rX29wdGlvbiI6IGZhbHNlLCAiY2FsY3VsYXRl\nZCI6IGZhbHNlLCAiY2hhbmdlYWJsZSI6IHRydWUsICJjaG9zZW4iOiBmYWxzZSwgImRlZmF1bHRf\nY2hvc2VuX2J5X3NlcnZlciI6IGZhbHNlLCAiZGVwcmVjYXRlZCI6IGZhbHNlLCAiZXhwb3J0X2tl\neSI6ICJpbmNpZGVudC9xcmFkYXJfaWQiLCAiaGlkZV9ub3RpZmljYXRpb24iOiBmYWxzZSwgImlk\nIjogOTk1LCAiaW5wdXRfdHlwZSI6ICJ0ZXh0IiwgImludGVybmFsIjogZmFsc2UsICJpc190cmFj\na2VkIjogZmFsc2UsICJuYW1lIjogInFyYWRhcl9pZCIsICJvcGVyYXRpb25fcGVybXMiOiB7fSwg\nIm9wZXJhdGlvbnMiOiBbXSwgInBsYWNlaG9sZGVyIjogIiIsICJwcmVmaXgiOiAicHJvcGVydGll\ncyIsICJyZWFkX29ubHkiOiBmYWxzZSwgInJpY2hfdGV4dCI6IGZhbHNlLCAidGFncyI6IFtdLCAi\ndGVtcGxhdGVzIjogW10sICJ0ZXh0IjogInFyYWRhcl9pZCIsICJ0b29sdGlwIjogIiIsICJ0eXBl\nX2lkIjogMCwgInV1aWQiOiAiYWVkYjdkZjYtNjQyYS00NDM4LTgyNGQtZmUyNGJlMzRjZmMwIiwg\nInZhbHVlcyI6IFtdfSwgeyJleHBvcnRfa2V5IjogImluY2lkZW50L2ludGVybmFsX2N1c3RvbWl6\nYXRpb25zX2ZpZWxkIiwgImlkIjogMCwgImlucHV0X3R5cGUiOiAidGV4dCIsICJpbnRlcm5hbCI6\nIHRydWUsICJuYW1lIjogImludGVybmFsX2N1c3RvbWl6YXRpb25zX2ZpZWxkIiwgInJlYWRfb25s\neSI6IHRydWUsICJ0ZXh0IjogIkN1c3RvbWl6YXRpb25zIEZpZWxkIChpbnRlcm5hbCkiLCAidHlw\nZV9pZCI6IDAsICJ1dWlkIjogImJmZWVjMmQ0LTM3NzAtMTFlOC1hZDM5LTRhMDAwNDA0NGFhMSJ9\nXSwgImZ1bmN0aW9ucyI6IFt7ImNyZWF0b3IiOiB7ImRpc3BsYXlfbmFtZSI6ICJSZXNpbGllbnQg\nU3lzYWRtaW4iLCAiaWQiOiAzLCAibmFtZSI6ICJhQGV4YW1wbGUuY29tIiwgInR5cGUiOiAidXNl\nciJ9LCAiZGVzY3JpcHRpb24iOiB7ImZvcm1hdCI6ICJ0ZXh0IiwgImNvbnRlbnQiOiAiQWRkIGFu\nIGl0ZW0gdG8gYSBnaXZlbiBRUmFkYXIgcmVmZXJlbmNlIHNldCJ9LCAiZGVzdGluYXRpb25faGFu\nZGxlIjogImZuX3FyYWRhcl9pbnRlZ3JhdGlvbiIsICJkaXNwbGF5X25hbWUiOiAiUVJhZGFyIEFk\nZCBSZWZlcmVuY2UgU2V0IEl0ZW0iLCAiZXhwb3J0X2tleSI6ICJxcmFkYXJfYWRkX3JlZmVyZW5j\nZV9zZXRfaXRlbSIsICJpZCI6IDUyLCAibGFzdF9tb2RpZmllZF9ieSI6IHsiZGlzcGxheV9uYW1l\nIjogIlJlc2lsaWVudCBTeXNhZG1pbiIsICJpZCI6IDMsICJuYW1lIjogImFAZXhhbXBsZS5jb20i\nLCAidHlwZSI6ICJ1c2VyIn0sICJsYXN0X21vZGlmaWVkX3RpbWUiOiAxNTk1MzY1MzQwMTc5LCAi\nbmFtZSI6ICJxcmFkYXJfYWRkX3JlZmVyZW5jZV9zZXRfaXRlbSIsICJ0YWdzIjogW10sICJ1dWlk\nIjogIjMwYjY4OTlhLWQwMTUtNDhjMy04ZmQ5LTUwMDc4OGQ0YjQzNyIsICJ2ZXJzaW9uIjogMSwg\nInZpZXdfaXRlbXMiOiBbeyJjb250ZW50IjogImFhNWUyMTFkLWI1ZTAtNDI4OS04OGJiLTQ3NTk1\nYWZhYzM4NSIsICJlbGVtZW50IjogImZpZWxkX3V1aWQiLCAiZmllbGRfdHlwZSI6ICJfX2Z1bmN0\naW9uIiwgInNob3dfaWYiOiBudWxsLCAic2hvd19saW5rX2hlYWRlciI6IGZhbHNlLCAic3RlcF9s\nYWJlbCI6IG51bGx9LCB7ImNvbnRlbnQiOiAiZGI1YWYyZWUtY2IxYS00NmM3LTgyZmYtYzZmODhh\nNWFhN2U5IiwgImVsZW1lbnQiOiAiZmllbGRfdXVpZCIsICJmaWVsZF90eXBlIjogIl9fZnVuY3Rp\nb24iLCAic2hvd19pZiI6IG51bGwsICJzaG93X2xpbmtfaGVhZGVyIjogZmFsc2UsICJzdGVwX2xh\nYmVsIjogbnVsbH1dLCAid29ya2Zsb3dzIjogW3siYWN0aW9ucyI6IFtdLCAiZGVzY3JpcHRpb24i\nOiBudWxsLCAibmFtZSI6ICJFeGFtcGxlIG9mIGFkZGluZyBhbiBpdGVtIHRvIFFSYWRhciByZWZl\ncmVuY2Ugc2V0IiwgIm9iamVjdF90eXBlIjogImFydGlmYWN0IiwgInByb2dyYW1tYXRpY19uYW1l\nIjogInFyYWRhcl9hZGRfcmVmZXJlbmNlX3NldF9pdGVtIiwgInRhZ3MiOiBbXSwgInV1aWQiOiBu\ndWxsLCAid29ya2Zsb3dfaWQiOiA2Nn0sIHsiYWN0aW9ucyI6IFtdLCAiZGVzY3JpcHRpb24iOiBu\ndWxsLCAibmFtZSI6ICJFeGFtcGxlIG9mIG1vdmluZyBRUmFkYXIgaXRlbSBmcm9tIG9uZSByZWZl\ncmVuY2Ugc2V0IHRvIGFub3RoZXIiLCAib2JqZWN0X3R5cGUiOiAiYXJ0aWZhY3QiLCAicHJvZ3Jh\nbW1hdGljX25hbWUiOiAicXJhZGFyX21vdmVfaXRlbV90b19kaWZmZXJlbnRfcmVmX3NldCIsICJ0\nYWdzIjogW10sICJ1dWlkIjogbnVsbCwgIndvcmtmbG93X2lkIjogNjd9XX0sIHsiY3JlYXRvciI6\nIHsiZGlzcGxheV9uYW1lIjogIlJlc2lsaWVudCBTeXNhZG1pbiIsICJpZCI6IDMsICJuYW1lIjog\nImFAZXhhbXBsZS5jb20iLCAidHlwZSI6ICJ1c2VyIn0sICJkZXNjcmlwdGlvbiI6IHsiZm9ybWF0\nIjogInRleHQiLCAiY29udGVudCI6ICJEZWxldGUgYW4gaXRlbSBmcm9tIGEgZ2l2ZW4gUVJhZGFy\nIHJlZmVyZW5jZSBzZXQifSwgImRlc3RpbmF0aW9uX2hhbmRsZSI6ICJmbl9xcmFkYXJfaW50ZWdy\nYXRpb24iLCAiZGlzcGxheV9uYW1lIjogIlFSYWRhciBEZWxldGUgUmVmZXJlbmNlIFNldCBJdGVt\nIiwgImV4cG9ydF9rZXkiOiAicXJhZGFyX2RlbGV0ZV9yZWZlcmVuY2Vfc2V0X2l0ZW0iLCAiaWQi\nOiA1MywgImxhc3RfbW9kaWZpZWRfYnkiOiB7ImRpc3BsYXlfbmFtZSI6ICJSZXNpbGllbnQgU3lz\nYWRtaW4iLCAiaWQiOiAzLCAibmFtZSI6ICJhQGV4YW1wbGUuY29tIiwgInR5cGUiOiAidXNlciJ9\nLCAibGFzdF9tb2RpZmllZF90aW1lIjogMTU5NTM2NTM0MDI1OCwgIm5hbWUiOiAicXJhZGFyX2Rl\nbGV0ZV9yZWZlcmVuY2Vfc2V0X2l0ZW0iLCAidGFncyI6IFtdLCAidXVpZCI6ICJhN2RjM2QyNi1h\nYjk3LTQ0YTMtYjU2YS1lMzY3MzE1YjA4ZTAiLCAidmVyc2lvbiI6IDEsICJ2aWV3X2l0ZW1zIjog\nW3siY29udGVudCI6ICJhYTVlMjExZC1iNWUwLTQyODktODhiYi00NzU5NWFmYWMzODUiLCAiZWxl\nbWVudCI6ICJmaWVsZF91dWlkIiwgImZpZWxkX3R5cGUiOiAiX19mdW5jdGlvbiIsICJzaG93X2lm\nIjogbnVsbCwgInNob3dfbGlua19oZWFkZXIiOiBmYWxzZSwgInN0ZXBfbGFiZWwiOiBudWxsfSwg\neyJjb250ZW50IjogImRiNWFmMmVlLWNiMWEtNDZjNy04MmZmLWM2Zjg4YTVhYTdlOSIsICJlbGVt\nZW50IjogImZpZWxkX3V1aWQiLCAiZmllbGRfdHlwZSI6ICJfX2Z1bmN0aW9uIiwgInNob3dfaWYi\nOiBudWxsLCAic2hvd19saW5rX2hlYWRlciI6IGZhbHNlLCAic3RlcF9sYWJlbCI6IG51bGx9XSwg\nIndvcmtmbG93cyI6IFt7ImFjdGlvbnMiOiBbXSwgImRlc2NyaXB0aW9uIjogbnVsbCwgIm5hbWUi\nOiAiRXhhbXBsZSBvZiBkZWxldGluZyBRUmFkYXIgcmVmZXJlbmNlIHNldCBJdGVtIiwgIm9iamVj\ndF90eXBlIjogImFydGlmYWN0IiwgInByb2dyYW1tYXRpY19uYW1lIjogInFyYWRhcl9kZWxldGVf\ncmVmZXJlbmNlX3NldF9pdGVtIiwgInRhZ3MiOiBbXSwgInV1aWQiOiBudWxsLCAid29ya2Zsb3df\naWQiOiA2NX0sIHsiYWN0aW9ucyI6IFtdLCAiZGVzY3JpcHRpb24iOiBudWxsLCAibmFtZSI6ICJF\neGFtcGxlIG9mIG1vdmluZyBRUmFkYXIgaXRlbSBmcm9tIG9uZSByZWZlcmVuY2Ugc2V0IHRvIGFu\nb3RoZXIiLCAib2JqZWN0X3R5cGUiOiAiYXJ0aWZhY3QiLCAicHJvZ3JhbW1hdGljX25hbWUiOiAi\ncXJhZGFyX21vdmVfaXRlbV90b19kaWZmZXJlbnRfcmVmX3NldCIsICJ0YWdzIjogW10sICJ1dWlk\nIjogbnVsbCwgIndvcmtmbG93X2lkIjogNjd9XX0sIHsiY3JlYXRvciI6IHsiZGlzcGxheV9uYW1l\nIjogIlJlc2lsaWVudCBTeXNhZG1pbiIsICJpZCI6IDMsICJuYW1lIjogImFAZXhhbXBsZS5jb20i\nLCAidHlwZSI6ICJ1c2VyIn0sICJkZXNjcmlwdGlvbiI6IHsiZm9ybWF0IjogInRleHQiLCAiY29u\ndGVudCI6ICJGaW5kIGFuIGl0ZW0gaW4gYSBnaXZlbiBRUmFkYXIgcmVmZXJlbmNlIHNldCJ9LCAi\nZGVzdGluYXRpb25faGFuZGxlIjogImZuX3FyYWRhcl9pbnRlZ3JhdGlvbiIsICJkaXNwbGF5X25h\nbWUiOiAiUVJhZGFyIEZpbmQgUmVmZXJlbmNlIFNldCBJdGVtIiwgImV4cG9ydF9rZXkiOiAicXJh\nZGFyX2ZpbmRfcmVmZXJlbmNlX3NldF9pdGVtIiwgImlkIjogNTQsICJsYXN0X21vZGlmaWVkX2J5\nIjogeyJkaXNwbGF5X25hbWUiOiAiUmVzaWxpZW50IFN5c2FkbWluIiwgImlkIjogMywgIm5hbWUi\nOiAiYUBleGFtcGxlLmNvbSIsICJ0eXBlIjogInVzZXIifSwgImxhc3RfbW9kaWZpZWRfdGltZSI6\nIDE1OTUzNjUzNDAzNTYsICJuYW1lIjogInFyYWRhcl9maW5kX3JlZmVyZW5jZV9zZXRfaXRlbSIs\nICJ0YWdzIjogW10sICJ1dWlkIjogIjlkODE3ZWUzLWE4Y2YtNGEwYS1hOGE2LTk2OWY2MDkwZjI3\nNiIsICJ2ZXJzaW9uIjogMSwgInZpZXdfaXRlbXMiOiBbeyJjb250ZW50IjogImFhNWUyMTFkLWI1\nZTAtNDI4OS04OGJiLTQ3NTk1YWZhYzM4NSIsICJlbGVtZW50IjogImZpZWxkX3V1aWQiLCAiZmll\nbGRfdHlwZSI6ICJfX2Z1bmN0aW9uIiwgInNob3dfaWYiOiBudWxsLCAic2hvd19saW5rX2hlYWRl\nciI6IGZhbHNlLCAic3RlcF9sYWJlbCI6IG51bGx9LCB7ImNvbnRlbnQiOiAiZGI1YWYyZWUtY2Ix\nYS00NmM3LTgyZmYtYzZmODhhNWFhN2U5IiwgImVsZW1lbnQiOiAiZmllbGRfdXVpZCIsICJmaWVs\nZF90eXBlIjogIl9fZnVuY3Rpb24iLCAic2hvd19pZiI6IG51bGwsICJzaG93X2xpbmtfaGVhZGVy\nIjogZmFsc2UsICJzdGVwX2xhYmVsIjogbnVsbH1dLCAid29ya2Zsb3dzIjogW3siYWN0aW9ucyI6\nIFtdLCAiZGVzY3JpcHRpb24iOiBudWxsLCAibmFtZSI6ICJFeGFtcGxlIG9mIGZpbmRpbmcgYW4g\naXRlbSBmcm9tIGEgUVJhZGFyIHJlZmVyZW5jZSBzZXQiLCAib2JqZWN0X3R5cGUiOiAiYXJ0aWZh\nY3QiLCAicHJvZ3JhbW1hdGljX25hbWUiOiAicXJhZGFyX2ZpbmRfcmVmZXJlbmNlX3NldF9pdGVt\nIiwgInRhZ3MiOiBbXSwgInV1aWQiOiBudWxsLCAid29ya2Zsb3dfaWQiOiA3MH1dfSwgeyJjcmVh\ndG9yIjogeyJkaXNwbGF5X25hbWUiOiAiUmVzaWxpZW50IFN5c2FkbWluIiwgImlkIjogMywgIm5h\nbWUiOiAiYUBleGFtcGxlLmNvbSIsICJ0eXBlIjogInVzZXIifSwgImRlc2NyaXB0aW9uIjogeyJm\nb3JtYXQiOiAidGV4dCIsICJjb250ZW50IjogIkZpbmQgcmVmZXJlbmNlIHNldHMgdGhhdCBjb250\nYWluIGEgZ2l2ZW4gaXRlbSB2YWx1ZSwgdG9nZXRoZXIgd2l0aCBpbmZvcm1hdGlvbiBhYm91dCB0\naGlzIGl0ZW0gaW4gdGhvc2UgcmVmZXJlbmNlIHNldHMuIEluZm9ybWF0aW9uIGluY2x1ZGVzIHdo\nZXRoZXIgdGhpcyBpdGVtIGlzIGFkZGVkIHRvIHRoZSByZWZlcmVuY2Ugc2V0IG1hbnVhbGx5IG9y\nIGJ5IGEgcnVsZS4ifSwgImRlc3RpbmF0aW9uX2hhbmRsZSI6ICJmbl9xcmFkYXJfaW50ZWdyYXRp\nb24iLCAiZGlzcGxheV9uYW1lIjogIlFSYWRhciBGaW5kIFJlZmVyZW5jZSBTZXRzIiwgImV4cG9y\ndF9rZXkiOiAicXJhZGFyX2ZpbmRfcmVmZXJlbmNlX3NldHMiLCAiaWQiOiA1NSwgImxhc3RfbW9k\naWZpZWRfYnkiOiB7ImRpc3BsYXlfbmFtZSI6ICJSZXNpbGllbnQgU3lzYWRtaW4iLCAiaWQiOiAz\nLCAibmFtZSI6ICJhQGV4YW1wbGUuY29tIiwgInR5cGUiOiAidXNlciJ9LCAibGFzdF9tb2RpZmll\nZF90aW1lIjogMTU5NTM2NTM0MDQzMiwgIm5hbWUiOiAicXJhZGFyX2ZpbmRfcmVmZXJlbmNlX3Nl\ndHMiLCAidGFncyI6IFtdLCAidXVpZCI6ICIwOTg4NTgxMy1mNjQwLTQ1YmMtODg5Mi1kMWE3NDFh\nN2Q1M2UiLCAidmVyc2lvbiI6IDEsICJ2aWV3X2l0ZW1zIjogW3siY29udGVudCI6ICJkYjVhZjJl\nZS1jYjFhLTQ2YzctODJmZi1jNmY4OGE1YWE3ZTkiLCAiZWxlbWVudCI6ICJmaWVsZF91dWlkIiwg\nImZpZWxkX3R5cGUiOiAiX19mdW5jdGlvbiIsICJzaG93X2lmIjogbnVsbCwgInNob3dfbGlua19o\nZWFkZXIiOiBmYWxzZSwgInN0ZXBfbGFiZWwiOiBudWxsfV0sICJ3b3JrZmxvd3MiOiBbeyJhY3Rp\nb25zIjogW10sICJkZXNjcmlwdGlvbiI6IG51bGwsICJuYW1lIjogIkV4YW1wbGUgb2YgZmluZGlu\nZyBhbGwgUVJhZGFyIHJlZmVyZW5jZSBzZXRzIGZvciBhcnRpZmFjdCIsICJvYmplY3RfdHlwZSI6\nICJhcnRpZmFjdCIsICJwcm9ncmFtbWF0aWNfbmFtZSI6ICJxcmFkYXJfZmluZF9yZWZlcmVuY2Vf\nc2V0c19hcnRpZmFjdCIsICJ0YWdzIjogW10sICJ1dWlkIjogbnVsbCwgIndvcmtmbG93X2lkIjog\nNjl9XX0sIHsiY3JlYXRvciI6IHsiZGlzcGxheV9uYW1lIjogIlJlc2lsaWVudCBTeXNhZG1pbiIs\nICJpZCI6IDMsICJuYW1lIjogImFAZXhhbXBsZS5jb20iLCAidHlwZSI6ICJ1c2VyIn0sICJkZXNj\ncmlwdGlvbiI6IHsiZm9ybWF0IjogInRleHQiLCAiY29udGVudCI6ICJTZWFyY2ggUVJhZGFyIGZv\nciBldmVudHMifSwgImRlc3RpbmF0aW9uX2hhbmRsZSI6ICJmbl9xcmFkYXJfaW50ZWdyYXRpb24i\nLCAiZGlzcGxheV9uYW1lIjogIlFSYWRhciBTZWFyY2giLCAiZXhwb3J0X2tleSI6ICJxcmFkYXJf\nc2VhcmNoIiwgImlkIjogNTYsICJsYXN0X21vZGlmaWVkX2J5IjogeyJkaXNwbGF5X25hbWUiOiAi\nUmVzaWxpZW50IFN5c2FkbWluIiwgImlkIjogMywgIm5hbWUiOiAiYUBleGFtcGxlLmNvbSIsICJ0\neXBlIjogInVzZXIifSwgImxhc3RfbW9kaWZpZWRfdGltZSI6IDE1OTUzNjUzNDA0OTMsICJuYW1l\nIjogInFyYWRhcl9zZWFyY2giLCAidGFncyI6IFtdLCAidXVpZCI6ICJjM2U2ZjZjYy04OTA1LTQx\nZTYtOTg0MS1lYmU5OTg0NWQ3NzgiLCAidmVyc2lvbiI6IDEsICJ2aWV3X2l0ZW1zIjogW3siY29u\ndGVudCI6ICIwNDhiYTM5YS1hYjk0LTRkMWYtYTBmOC0yNDYyZGUzYzA0NGMiLCAiZWxlbWVudCI6\nICJmaWVsZF91dWlkIiwgImZpZWxkX3R5cGUiOiAiX19mdW5jdGlvbiIsICJzaG93X2lmIjogbnVs\nbCwgInNob3dfbGlua19oZWFkZXIiOiBmYWxzZSwgInN0ZXBfbGFiZWwiOiBudWxsfSwgeyJjb250\nZW50IjogIjAxZWQ0NjUyLWZlNjEtNGRiZi1iYzgzLWY4MWM2MmM1NTYzYyIsICJlbGVtZW50Ijog\nImZpZWxkX3V1aWQiLCAiZmllbGRfdHlwZSI6ICJfX2Z1bmN0aW9uIiwgInNob3dfaWYiOiBudWxs\nLCAic2hvd19saW5rX2hlYWRlciI6IGZhbHNlLCAic3RlcF9sYWJlbCI6IG51bGx9LCB7ImNvbnRl\nbnQiOiAiOWQ4ZmM2NDUtNTA2Ny00YWIwLWIxNTEtMWQxMDZkYWQ5MWQ2IiwgImVsZW1lbnQiOiAi\nZmllbGRfdXVpZCIsICJmaWVsZF90eXBlIjogIl9fZnVuY3Rpb24iLCAic2hvd19pZiI6IG51bGws\nICJzaG93X2xpbmtfaGVhZGVyIjogZmFsc2UsICJzdGVwX2xhYmVsIjogbnVsbH0sIHsiY29udGVu\ndCI6ICJiNjA1MzY5MC1kN2IyLTQyMDQtYjQ4YS1mODY1MDNiNDE5ZDkiLCAiZWxlbWVudCI6ICJm\naWVsZF91dWlkIiwgImZpZWxkX3R5cGUiOiAiX19mdW5jdGlvbiIsICJzaG93X2lmIjogbnVsbCwg\nInNob3dfbGlua19oZWFkZXIiOiBmYWxzZSwgInN0ZXBfbGFiZWwiOiBudWxsfSwgeyJjb250ZW50\nIjogImVlZmM5ZTU2LTI4MDYtNDUzNC1iNDA5LTQyZTI1MTU0ZWNjOSIsICJlbGVtZW50IjogImZp\nZWxkX3V1aWQiLCAiZmllbGRfdHlwZSI6ICJfX2Z1bmN0aW9uIiwgInNob3dfaWYiOiBudWxsLCAi\nc2hvd19saW5rX2hlYWRlciI6IGZhbHNlLCAic3RlcF9sYWJlbCI6IG51bGx9LCB7ImNvbnRlbnQi\nOiAiMTk3NzhmYzAtOWEzMC00YjhiLTljMjEtNjQyNDk1ZjllZWJjIiwgImVsZW1lbnQiOiAiZmll\nbGRfdXVpZCIsICJmaWVsZF90eXBlIjogIl9fZnVuY3Rpb24iLCAic2hvd19pZiI6IG51bGwsICJz\naG93X2xpbmtfaGVhZGVyIjogZmFsc2UsICJzdGVwX2xhYmVsIjogbnVsbH0sIHsiY29udGVudCI6\nICJjMzNmYmUxZC0xMjVjLTRhNzktODJlOC02NjA4ZDFjN2JiNWUiLCAiZWxlbWVudCI6ICJmaWVs\nZF91dWlkIiwgImZpZWxkX3R5cGUiOiAiX19mdW5jdGlvbiIsICJzaG93X2lmIjogbnVsbCwgInNo\nb3dfbGlua19oZWFkZXIiOiBmYWxzZSwgInN0ZXBfbGFiZWwiOiBudWxsfSwgeyJjb250ZW50Ijog\nImQyMWYyODE0LTQwZTYtNGY3YS1iMjY5LTZmZjJjN2EzMTk2ZSIsICJlbGVtZW50IjogImZpZWxk\nX3V1aWQiLCAiZmllbGRfdHlwZSI6ICJfX2Z1bmN0aW9uIiwgInNob3dfaWYiOiBudWxsLCAic2hv\nd19saW5rX2hlYWRlciI6IGZhbHNlLCAic3RlcF9sYWJlbCI6IG51bGx9LCB7ImNvbnRlbnQiOiAi\nZDdhNTQ0ZmYtNjg5Yi00ZjE1LWIzYzEtYTdlYmQyMGJiZjNiIiwgImVsZW1lbnQiOiAiZmllbGRf\ndXVpZCIsICJmaWVsZF90eXBlIjogIl9fZnVuY3Rpb24iLCAic2hvd19pZiI6IG51bGwsICJzaG93\nX2xpbmtfaGVhZGVyIjogZmFsc2UsICJzdGVwX2xhYmVsIjogbnVsbH1dLCAid29ya2Zsb3dzIjog\nW3siYWN0aW9ucyI6IFtdLCAiZGVzY3JpcHRpb24iOiBudWxsLCAibmFtZSI6ICJFeGFtcGxlIG9m\nIHNlYXJjaGluZyBRUmFkYXIgZXZlbnRzIHVzaW5nIG9mZmVuc2UgaWQiLCAib2JqZWN0X3R5cGUi\nOiAiaW5jaWRlbnQiLCAicHJvZ3JhbW1hdGljX25hbWUiOiAicXJhZGFyX3NlYXJjaF9ldmVudF9v\nZmZlbnNlIiwgInRhZ3MiOiBbXSwgInV1aWQiOiBudWxsLCAid29ya2Zsb3dfaWQiOiA2OH1dfV0s\nICJnZW9zIjogbnVsbCwgImdyb3VwcyI6IG51bGwsICJpZCI6IDYsICJpbmJvdW5kX21haWxib3hl\ncyI6IG51bGwsICJpbmNpZGVudF9hcnRpZmFjdF90eXBlcyI6IFtdLCAiaW5jaWRlbnRfdHlwZXMi\nOiBbeyJ1cGRhdGVfZGF0ZSI6IDE1OTUzODU0NzMwNTcsICJjcmVhdGVfZGF0ZSI6IDE1OTUzODU0\nNzMwNTcsICJ1dWlkIjogImJmZWVjMmQ0LTM3NzAtMTFlOC1hZDM5LTRhMDAwNDA0NGFhMCIsICJk\nZXNjcmlwdGlvbiI6ICJDdXN0b21pemF0aW9uIFBhY2thZ2VzIChpbnRlcm5hbCkiLCAiZXhwb3J0\nX2tleSI6ICJDdXN0b21pemF0aW9uIFBhY2thZ2VzIChpbnRlcm5hbCkiLCAibmFtZSI6ICJDdXN0\nb21pemF0aW9uIFBhY2thZ2VzIChpbnRlcm5hbCkiLCAiZW5hYmxlZCI6IGZhbHNlLCAic3lzdGVt\nIjogZmFsc2UsICJwYXJlbnRfaWQiOiBudWxsLCAiaGlkZGVuIjogZmFsc2UsICJpZCI6IDB9XSwg\nImluZHVzdHJpZXMiOiBudWxsLCAibGF5b3V0cyI6IFtdLCAibG9jYWxlIjogbnVsbCwgIm1lc3Nh\nZ2VfZGVzdGluYXRpb25zIjogW3siYXBpX2tleXMiOiBbXSwgImRlc3RpbmF0aW9uX3R5cGUiOiAw\nLCAiZXhwZWN0X2FjayI6IHRydWUsICJleHBvcnRfa2V5IjogImZuX3FyYWRhcl9pbnRlZ3JhdGlv\nbiIsICJuYW1lIjogImZuX3FyYWRhcl9pbnRlZ3JhdGlvbiIsICJwcm9ncmFtbWF0aWNfbmFtZSI6\nICJmbl9xcmFkYXJfaW50ZWdyYXRpb24iLCAidGFncyI6IFtdLCAidXNlcnMiOiBbImFAZXhhbXBs\nZS5jb20iXSwgInV1aWQiOiAiMTE4NGQzMzgtOTZlMy00MzE1LThiMmMtYjBiOWU2MjM1NzNjIn1d\nLCAibm90aWZpY2F0aW9ucyI6IG51bGwsICJvdmVycmlkZXMiOiBbXSwgInBoYXNlcyI6IFtdLCAi\ncmVndWxhdG9ycyI6IG51bGwsICJyb2xlcyI6IFtdLCAic2NyaXB0cyI6IFtdLCAic2VydmVyX3Zl\ncnNpb24iOiB7ImJ1aWxkX251bWJlciI6IDMyLCAibWFqb3IiOiAzNSwgIm1pbm9yIjogMiwgInZl\ncnNpb24iOiAiMzUuMi4zMiJ9LCAidGFncyI6IFtdLCAidGFza19vcmRlciI6IFtdLCAidGltZWZy\nYW1lcyI6IG51bGwsICJ0eXBlcyI6IFt7ImFjdGlvbnMiOiBbXSwgImRpc3BsYXlfbmFtZSI6ICJR\nUmFkYXIgT2ZmZW5zZSBFdmVudHMiLCAiZXhwb3J0X2tleSI6ICJxcmFkYXJfb2ZmZW5zZV9ldmVu\ndCIsICJmaWVsZHMiOiB7InN0YXJ0X3RpbWUiOiB7ImFsbG93X2RlZmF1bHRfdmFsdWUiOiBmYWxz\nZSwgImJsYW5rX29wdGlvbiI6IGZhbHNlLCAiY2FsY3VsYXRlZCI6IGZhbHNlLCAiY2hhbmdlYWJs\nZSI6IHRydWUsICJjaG9zZW4iOiBmYWxzZSwgImRlZmF1bHRfY2hvc2VuX2J5X3NlcnZlciI6IGZh\nbHNlLCAiZGVwcmVjYXRlZCI6IGZhbHNlLCAiZXhwb3J0X2tleSI6ICJxcmFkYXJfb2ZmZW5zZV9l\ndmVudC9zdGFydF90aW1lIiwgImhpZGVfbm90aWZpY2F0aW9uIjogZmFsc2UsICJpZCI6IDk5Niwg\nImlucHV0X3R5cGUiOiAidGV4dCIsICJpbnRlcm5hbCI6IGZhbHNlLCAiaXNfdHJhY2tlZCI6IGZh\nbHNlLCAibmFtZSI6ICJzdGFydF90aW1lIiwgIm9wZXJhdGlvbl9wZXJtcyI6IHt9LCAib3BlcmF0\naW9ucyI6IFtdLCAib3JkZXIiOiAwLCAicGxhY2Vob2xkZXIiOiAiIiwgInByZWZpeCI6IG51bGws\nICJyZWFkX29ubHkiOiBmYWxzZSwgInJpY2hfdGV4dCI6IGZhbHNlLCAidGFncyI6IFtdLCAidGVt\ncGxhdGVzIjogW10sICJ0ZXh0IjogIlN0YXJ0IFRpbWUiLCAidG9vbHRpcCI6ICJzdGFydHRpbWUi\nLCAidHlwZV9pZCI6IDEwMTYsICJ1dWlkIjogImQ5OWIzYTIwLTVhNTYtNDZmNS1hMTVjLWQ0MTcx\nNDg5NDAxYSIsICJ2YWx1ZXMiOiBbXSwgIndpZHRoIjogMTMxfSwgInByb3RvY29sIjogeyJhbGxv\nd19kZWZhdWx0X3ZhbHVlIjogZmFsc2UsICJibGFua19vcHRpb24iOiBmYWxzZSwgImNhbGN1bGF0\nZWQiOiBmYWxzZSwgImNoYW5nZWFibGUiOiB0cnVlLCAiY2hvc2VuIjogZmFsc2UsICJkZWZhdWx0\nX2Nob3Nlbl9ieV9zZXJ2ZXIiOiBmYWxzZSwgImRlcHJlY2F0ZWQiOiBmYWxzZSwgImV4cG9ydF9r\nZXkiOiAicXJhZGFyX29mZmVuc2VfZXZlbnQvcHJvdG9jb2wiLCAiaGlkZV9ub3RpZmljYXRpb24i\nOiBmYWxzZSwgImlkIjogOTk3LCAiaW5wdXRfdHlwZSI6ICJ0ZXh0IiwgImludGVybmFsIjogZmFs\nc2UsICJpc190cmFja2VkIjogZmFsc2UsICJuYW1lIjogInByb3RvY29sIiwgIm9wZXJhdGlvbl9w\nZXJtcyI6IHt9LCAib3BlcmF0aW9ucyI6IFtdLCAib3JkZXIiOiAzLCAicGxhY2Vob2xkZXIiOiAi\nIiwgInByZWZpeCI6IG51bGwsICJyZWFkX29ubHkiOiBmYWxzZSwgInJpY2hfdGV4dCI6IGZhbHNl\nLCAidGFncyI6IFtdLCAidGVtcGxhdGVzIjogW10sICJ0ZXh0IjogIlByb3RvY29sIiwgInRvb2x0\naXAiOiAicHJvdG9jb2xpZCIsICJ0eXBlX2lkIjogMTAxNiwgInV1aWQiOiAiNjMwMzM3YjEtZmVh\nOC00NzFlLTk2MDEtZjg5NjI5ZWMxM2I0IiwgInZhbHVlcyI6IFtdLCAid2lkdGgiOiAxMjN9LCAi\nbG9nX3NvdXJjZSI6IHsiYWxsb3dfZGVmYXVsdF92YWx1ZSI6IGZhbHNlLCAiYmxhbmtfb3B0aW9u\nIjogZmFsc2UsICJjYWxjdWxhdGVkIjogZmFsc2UsICJjaGFuZ2VhYmxlIjogdHJ1ZSwgImNob3Nl\nbiI6IGZhbHNlLCAiZGVmYXVsdF9jaG9zZW5fYnlfc2VydmVyIjogZmFsc2UsICJkZXByZWNhdGVk\nIjogZmFsc2UsICJleHBvcnRfa2V5IjogInFyYWRhcl9vZmZlbnNlX2V2ZW50L2xvZ19zb3VyY2Ui\nLCAiaGlkZV9ub3RpZmljYXRpb24iOiBmYWxzZSwgImlkIjogOTk4LCAiaW5wdXRfdHlwZSI6ICJ0\nZXh0IiwgImludGVybmFsIjogZmFsc2UsICJpc190cmFja2VkIjogZmFsc2UsICJuYW1lIjogImxv\nZ19zb3VyY2UiLCAib3BlcmF0aW9uX3Blcm1zIjoge30sICJvcGVyYXRpb25zIjogW10sICJvcmRl\nciI6IDIsICJwbGFjZWhvbGRlciI6ICIiLCAicHJlZml4IjogbnVsbCwgInJlYWRfb25seSI6IGZh\nbHNlLCAicmljaF90ZXh0IjogZmFsc2UsICJ0YWdzIjogW10sICJ0ZW1wbGF0ZXMiOiBbXSwgInRl\neHQiOiAiTG9nIFNvdXJjZSIsICJ0b29sdGlwIjogImxvZ3NvdXJjZWlkIiwgInR5cGVfaWQiOiAx\nMDE2LCAidXVpZCI6ICIzMjljODI0OS1hODFlLTQ0NGEtYWFmOC1jY2Q4NTU4ZGM5ODAiLCAidmFs\ndWVzIjogW10sICJ3aWR0aCI6IDE0MH0sICJydWxlIjogeyJhbGxvd19kZWZhdWx0X3ZhbHVlIjog\nZmFsc2UsICJibGFua19vcHRpb24iOiBmYWxzZSwgImNhbGN1bGF0ZWQiOiBmYWxzZSwgImNoYW5n\nZWFibGUiOiB0cnVlLCAiY2hvc2VuIjogZmFsc2UsICJkZWZhdWx0X2Nob3Nlbl9ieV9zZXJ2ZXIi\nOiBmYWxzZSwgImRlcHJlY2F0ZWQiOiBmYWxzZSwgImV4cG9ydF9rZXkiOiAicXJhZGFyX29mZmVu\nc2VfZXZlbnQvcnVsZSIsICJoaWRlX25vdGlmaWNhdGlvbiI6IGZhbHNlLCAiaWQiOiA5OTksICJp\nbnB1dF90eXBlIjogInRleHQiLCAiaW50ZXJuYWwiOiBmYWxzZSwgImlzX3RyYWNrZWQiOiBmYWxz\nZSwgIm5hbWUiOiAicnVsZSIsICJvcGVyYXRpb25fcGVybXMiOiB7fSwgIm9wZXJhdGlvbnMiOiBb\nXSwgIm9yZGVyIjogNCwgInBsYWNlaG9sZGVyIjogIiIsICJwcmVmaXgiOiBudWxsLCAicmVhZF9v\nbmx5IjogZmFsc2UsICJyaWNoX3RleHQiOiBmYWxzZSwgInRhZ3MiOiBbXSwgInRlbXBsYXRlcyI6\nIFtdLCAidGV4dCI6ICJSdWxlIiwgInRvb2x0aXAiOiAiY3JlZXZlbnRsaXN0IiwgInR5cGVfaWQi\nOiAxMDE2LCAidXVpZCI6ICJlOWZjMmFiZi00M2RmLTRmNGMtOGMyYS0yN2YxZDA3M2YwMTAiLCAi\ndmFsdWVzIjogW10sICJ3aWR0aCI6IDEyM30sICJjYXRlZ29yeSI6IHsiYWxsb3dfZGVmYXVsdF92\nYWx1ZSI6IGZhbHNlLCAiYmxhbmtfb3B0aW9uIjogZmFsc2UsICJjYWxjdWxhdGVkIjogZmFsc2Us\nICJjaGFuZ2VhYmxlIjogdHJ1ZSwgImNob3NlbiI6IGZhbHNlLCAiZGVmYXVsdF9jaG9zZW5fYnlf\nc2VydmVyIjogZmFsc2UsICJkZXByZWNhdGVkIjogZmFsc2UsICJleHBvcnRfa2V5IjogInFyYWRh\ncl9vZmZlbnNlX2V2ZW50L2NhdGVnb3J5IiwgImhpZGVfbm90aWZpY2F0aW9uIjogZmFsc2UsICJp\nZCI6IDEwMDAsICJpbnB1dF90eXBlIjogInRleHQiLCAiaW50ZXJuYWwiOiBmYWxzZSwgImlzX3Ry\nYWNrZWQiOiBmYWxzZSwgIm5hbWUiOiAiY2F0ZWdvcnkiLCAib3BlcmF0aW9uX3Blcm1zIjoge30s\nICJvcGVyYXRpb25zIjogW10sICJvcmRlciI6IDEsICJwbGFjZWhvbGRlciI6ICIiLCAicHJlZml4\nIjogbnVsbCwgInJlYWRfb25seSI6IGZhbHNlLCAicmljaF90ZXh0IjogZmFsc2UsICJ0YWdzIjog\nW10sICJ0ZW1wbGF0ZXMiOiBbXSwgInRleHQiOiAiQ2F0ZWdvcnkiLCAidG9vbHRpcCI6ICJjYXRl\nZ29yeSIsICJ0eXBlX2lkIjogMTAxNiwgInV1aWQiOiAiMjY1ZWYxMmQtOTJjZS00MGQ2LTk2YmIt\nZjZlMGMzMDBkY2YyIiwgInZhbHVlcyI6IFtdLCAid2lkdGgiOiAxMjN9fSwgImZvcl9hY3Rpb25z\nIjogZmFsc2UsICJmb3JfY3VzdG9tX2ZpZWxkcyI6IGZhbHNlLCAiZm9yX25vdGlmaWNhdGlvbnMi\nOiBmYWxzZSwgImZvcl93b3JrZmxvd3MiOiBmYWxzZSwgImlkIjogbnVsbCwgInBhcmVudF90eXBl\ncyI6IFsiaW5jaWRlbnQiXSwgInByb3BlcnRpZXMiOiB7ImNhbl9jcmVhdGUiOiBmYWxzZSwgImNh\nbl9kZXN0cm95IjogZmFsc2UsICJmb3Jfd2hvIjogW119LCAic2NyaXB0cyI6IFtdLCAidGFncyI6\nIFtdLCAidHlwZV9pZCI6IDgsICJ0eXBlX25hbWUiOiAicXJhZGFyX29mZmVuc2VfZXZlbnQiLCAi\ndXVpZCI6ICI4ZmNlZWU4Yy05ZDBlLTRlMzMtOGQzNy0zNjZlYzcxY2ZlZDMifSwgeyJhY3Rpb25z\nIjogW10sICJkaXNwbGF5X25hbWUiOiAiUVJhZGFyIFJlZmVyZW5jZSBTZXRzIiwgImV4cG9ydF9r\nZXkiOiAicXJhZGFyX3JlZmVyZW5jZV9zZXQiLCAiZmllbGRzIjogeyJzb3VyY2UiOiB7ImFsbG93\nX2RlZmF1bHRfdmFsdWUiOiBmYWxzZSwgImJsYW5rX29wdGlvbiI6IHRydWUsICJjYWxjdWxhdGVk\nIjogZmFsc2UsICJjaGFuZ2VhYmxlIjogdHJ1ZSwgImNob3NlbiI6IHRydWUsICJkZWZhdWx0X2No\nb3Nlbl9ieV9zZXJ2ZXIiOiBmYWxzZSwgImRlcHJlY2F0ZWQiOiBmYWxzZSwgImV4cG9ydF9rZXki\nOiAicXJhZGFyX3JlZmVyZW5jZV9zZXQvc291cmNlIiwgImhpZGVfbm90aWZpY2F0aW9uIjogZmFs\nc2UsICJpZCI6IDEwMDEsICJpbnB1dF90eXBlIjogInRleHQiLCAiaW50ZXJuYWwiOiBmYWxzZSwg\nImlzX3RyYWNrZWQiOiBmYWxzZSwgIm5hbWUiOiAic291cmNlIiwgIm9wZXJhdGlvbl9wZXJtcyI6\nIHt9LCAib3BlcmF0aW9ucyI6IFtdLCAib3JkZXIiOiAyLCAicGxhY2Vob2xkZXIiOiAiIiwgInBy\nZWZpeCI6IG51bGwsICJyZWFkX29ubHkiOiBmYWxzZSwgInJpY2hfdGV4dCI6IGZhbHNlLCAidGFn\ncyI6IFtdLCAidGVtcGxhdGVzIjogW10sICJ0ZXh0IjogIlNvdXJjZSIsICJ0b29sdGlwIjogImhv\ndyB0aGlzIHZhbHVlIGlzIGFkZGVkIHRvIHRoZSByZWZlcmVuY2Ugc2V0IiwgInR5cGVfaWQiOiAx\nMDE3LCAidXVpZCI6ICI1MTEwYTA3NS04MmE4LTRlZTEtOWU5Yi0yNmJiN2ZiMTA3NWYiLCAidmFs\ndWVzIjogW10sICJ3aWR0aCI6IDE5Nn0sICJpdGVtX3ZhbHVlIjogeyJhbGxvd19kZWZhdWx0X3Zh\nbHVlIjogZmFsc2UsICJibGFua19vcHRpb24iOiB0cnVlLCAiY2FsY3VsYXRlZCI6IGZhbHNlLCAi\nY2hhbmdlYWJsZSI6IHRydWUsICJjaG9zZW4iOiB0cnVlLCAiZGVmYXVsdF9jaG9zZW5fYnlfc2Vy\ndmVyIjogZmFsc2UsICJkZXByZWNhdGVkIjogZmFsc2UsICJleHBvcnRfa2V5IjogInFyYWRhcl9y\nZWZlcmVuY2Vfc2V0L2l0ZW1fdmFsdWUiLCAiaGlkZV9ub3RpZmljYXRpb24iOiBmYWxzZSwgImlk\nIjogMTAwMiwgImlucHV0X3R5cGUiOiAidGV4dCIsICJpbnRlcm5hbCI6IGZhbHNlLCAiaXNfdHJh\nY2tlZCI6IGZhbHNlLCAibmFtZSI6ICJpdGVtX3ZhbHVlIiwgIm9wZXJhdGlvbl9wZXJtcyI6IHt9\nLCAib3BlcmF0aW9ucyI6IFtdLCAib3JkZXIiOiAxLCAicGxhY2Vob2xkZXIiOiAiIiwgInByZWZp\neCI6IG51bGwsICJyZWFkX29ubHkiOiBmYWxzZSwgInJpY2hfdGV4dCI6IGZhbHNlLCAidGFncyI6\nIFtdLCAidGVtcGxhdGVzIjogW10sICJ0ZXh0IjogIkl0ZW0gVmFsdWUiLCAidG9vbHRpcCI6ICJJ\ndGVtIHZhbHVlIiwgInR5cGVfaWQiOiAxMDE3LCAidXVpZCI6ICJmZGU1NjgzNi01ODFjLTQ3YTkt\nYTk2MS02NzQyN2E1YjNhODAiLCAidmFsdWVzIjogW10sICJ3aWR0aCI6IDIxOX0sICJyZWZlcmVu\nY2Vfc2V0IjogeyJhbGxvd19kZWZhdWx0X3ZhbHVlIjogZmFsc2UsICJibGFua19vcHRpb24iOiB0\ncnVlLCAiY2FsY3VsYXRlZCI6IGZhbHNlLCAiY2hhbmdlYWJsZSI6IHRydWUsICJjaG9zZW4iOiB0\ncnVlLCAiZGVmYXVsdF9jaG9zZW5fYnlfc2VydmVyIjogZmFsc2UsICJkZXByZWNhdGVkIjogZmFs\nc2UsICJleHBvcnRfa2V5IjogInFyYWRhcl9yZWZlcmVuY2Vfc2V0L3JlZmVyZW5jZV9zZXQiLCAi\naGlkZV9ub3RpZmljYXRpb24iOiBmYWxzZSwgImlkIjogMTAwMywgImlucHV0X3R5cGUiOiAidGV4\ndCIsICJpbnRlcm5hbCI6IGZhbHNlLCAiaXNfdHJhY2tlZCI6IGZhbHNlLCAibmFtZSI6ICJyZWZl\ncmVuY2Vfc2V0IiwgIm9wZXJhdGlvbl9wZXJtcyI6IHt9LCAib3BlcmF0aW9ucyI6IFtdLCAib3Jk\nZXIiOiAwLCAicGxhY2Vob2xkZXIiOiAiIiwgInByZWZpeCI6IG51bGwsICJyZWFkX29ubHkiOiBm\nYWxzZSwgInJpY2hfdGV4dCI6IGZhbHNlLCAidGFncyI6IFtdLCAidGVtcGxhdGVzIjogW10sICJ0\nZXh0IjogIlJlZmVyZW5jZSBTZXQiLCAidG9vbHRpcCI6ICJOYW1lIG9mIHJlZmVyZW5jZSBzZXQi\nLCAidHlwZV9pZCI6IDEwMTcsICJ1dWlkIjogIjBjMGEwNDgyLTJlMTktNDllOC1iNjNiLThlYjkx\nMmVkOGY4ZSIsICJ2YWx1ZXMiOiBbXSwgIndpZHRoIjogMjU4fX0sICJmb3JfYWN0aW9ucyI6IGZh\nbHNlLCAiZm9yX2N1c3RvbV9maWVsZHMiOiBmYWxzZSwgImZvcl9ub3RpZmljYXRpb25zIjogZmFs\nc2UsICJmb3Jfd29ya2Zsb3dzIjogZmFsc2UsICJpZCI6IG51bGwsICJwYXJlbnRfdHlwZXMiOiBb\nImluY2lkZW50Il0sICJwcm9wZXJ0aWVzIjogeyJjYW5fY3JlYXRlIjogZmFsc2UsICJjYW5fZGVz\ndHJveSI6IGZhbHNlLCAiZm9yX3dobyI6IFtdfSwgInNjcmlwdHMiOiBbXSwgInRhZ3MiOiBbXSwg\nInR5cGVfaWQiOiA4LCAidHlwZV9uYW1lIjogInFyYWRhcl9yZWZlcmVuY2Vfc2V0IiwgInV1aWQi\nOiAiMWVlZjg2MDgtMDYyMS00NzVlLWEyOGUtZTkyYTM4MjA0NmFjIn1dLCAid29ya2Zsb3dzIjog\nW3siYWN0aW9ucyI6IFtdLCAiY29udGVudCI6IHsidmVyc2lvbiI6IDYsICJ3b3JrZmxvd19pZCI6\nICJxcmFkYXJfZGVsZXRlX3JlZmVyZW5jZV9zZXRfaXRlbSIsICJ4bWwiOiAiPD94bWwgdmVyc2lv\nbj1cIjEuMFwiIGVuY29kaW5nPVwiVVRGLThcIj8+PGRlZmluaXRpb25zIHhtbG5zPVwiaHR0cDov\nL3d3dy5vbWcub3JnL3NwZWMvQlBNTi8yMDEwMDUyNC9NT0RFTFwiIHhtbG5zOmJwbW5kaT1cImh0\ndHA6Ly93d3cub21nLm9yZy9zcGVjL0JQTU4vMjAxMDA1MjQvRElcIiB4bWxuczpvbWdkYz1cImh0\ndHA6Ly93d3cub21nLm9yZy9zcGVjL0RELzIwMTAwNTI0L0RDXCIgeG1sbnM6b21nZGk9XCJodHRw\nOi8vd3d3Lm9tZy5vcmcvc3BlYy9ERC8yMDEwMDUyNC9ESVwiIHhtbG5zOnJlc2lsaWVudD1cImh0\ndHA6Ly9yZXNpbGllbnQuaWJtLmNvbS9icG1uXCIgeG1sbnM6eHNkPVwiaHR0cDovL3d3dy53My5v\ncmcvMjAwMS9YTUxTY2hlbWFcIiB4bWxuczp4c2k9XCJodHRwOi8vd3d3LnczLm9yZy8yMDAxL1hN\nTFNjaGVtYS1pbnN0YW5jZVwiIHRhcmdldE5hbWVzcGFjZT1cImh0dHA6Ly93d3cuY2FtdW5kYS5v\ncmcvdGVzdFwiPjxwcm9jZXNzIGlkPVwicXJhZGFyX2RlbGV0ZV9yZWZlcmVuY2Vfc2V0X2l0ZW1c\nIiBpc0V4ZWN1dGFibGU9XCJ0cnVlXCIgbmFtZT1cIkV4YW1wbGUgb2YgZGVsZXRpbmcgUVJhZGFy\nIHJlZmVyZW5jZSBzZXQgSXRlbVwiPjxkb2N1bWVudGF0aW9uPkRlbGV0ZSBhbiBJUCBhZGRyZXNz\nIGZyb20gdGhlIFFSYWRhciByZWZlcmVuY2Ugc2V0LCBcdTIwMWNTYW1wbGUgQmxvY2tlZCBJUHNc\ndTIwMWQ8L2RvY3VtZW50YXRpb24+PHN0YXJ0RXZlbnQgaWQ9XCJTdGFydEV2ZW50XzE1NWFzeG1c\nIj48b3V0Z29pbmc+U2VxdWVuY2VGbG93XzEwamtyZGo8L291dGdvaW5nPjwvc3RhcnRFdmVudD48\nc2VydmljZVRhc2sgaWQ9XCJTZXJ2aWNlVGFza18wNnJ0bnRjXCIgbmFtZT1cIlFSYWRhciBEZWxl\ndGUgUmVmZXJlbmNlIFNldCBJdGVtXCIgcmVzaWxpZW50OnR5cGU9XCJmdW5jdGlvblwiPjxleHRl\nbnNpb25FbGVtZW50cz48cmVzaWxpZW50OmZ1bmN0aW9uIHV1aWQ9XCJhN2RjM2QyNi1hYjk3LTQ0\nYTMtYjU2YS1lMzY3MzE1YjA4ZTBcIj57XCJpbnB1dHNcIjp7XCJhYTVlMjExZC1iNWUwLTQyODkt\nODhiYi00NzU5NWFmYWMzODVcIjp7XCJpbnB1dF90eXBlXCI6XCJzdGF0aWNcIixcInN0YXRpY19p\nbnB1dFwiOntcIm11bHRpc2VsZWN0X3ZhbHVlXCI6W10sXCJ0ZXh0X3ZhbHVlXCI6XCJTYW1wbGUg\nQmxvY2tlZCBJUHNcIn19fSxcInBvc3RfcHJvY2Vzc2luZ19zY3JpcHRcIjpcImlmIHJlc3VsdHNb\nJ3N0YXR1c19jb2RlJ10gPT0gMjAwOlxcbiAgaW5jaWRlbnQuYWRkTm90ZSh1XFxcIkFydGlmYWN0\nOiB7fSByZW1vdmVkIGZyb20gcmVmZXJlbmNlIHNldDoge31cXFwiLmZvcm1hdChhcnRpZmFjdC52\nYWx1ZSwgcmVzdWx0c1snY29udGVudCddWyduYW1lJ10pKVxcbmVsc2U6XFxuICBpbmNpZGVudC5h\nZGROb3RlKHVcXFwiQXJ0aWZhY3Q6IHt9IHJlbW92YWwgZmFpbGVkIHdpdGggc3RhdHVzIGNvZGU6\nIHt9LCBtZXNzYWdlOiB7fVxcXCIuZm9ybWF0KGFydGlmYWN0LnZhbHVlLCByZXN1bHRzWydzdGF0\ndXNfY29kZSddLCByZXN1bHRzWydjb250ZW50J11bJ21lc3NhZ2UnXSkpXFxuICBcXG5cXFwiXFxc\nIlxcXCJcXG57XFxuICAnc3RhdHVzX2NvZGUnOiAyMDAsXFxuICAnY29udGVudCc6IHtcXG4gICAg\nJ3RpbWVfdG9fbGl2ZSc6ICcyMCB5ZWFycyA0IG1vbnMgMjIgZGF5cyAwIGhvdXJzIDAgbWlucyAw\nLjAwIHNlY3MnLFxcbiAgICAndGltZW91dF90eXBlJzogJ0ZJUlNUX1NFRU4nLFxcbiAgICAnbnVt\nYmVyX29mX2VsZW1lbnRzJzogMSxcXG4gICAgJ2NyZWF0aW9uX3RpbWUnOiAxNTg3NTg1MjE5Njk3\nLFxcbiAgICAnbmFtZSc6ICdTYW1wbGUgQmxvY2tlZCBJUHMnLFxcbiAgICAnbmFtZXNwYWNlJzog\nJ1NIQVJFRCcsXFxuICAgICdlbGVtZW50X3R5cGUnOiAnSVAnLFxcbiAgICAnY29sbGVjdGlvbl9p\nZCc6IDMyXFxuICB9XFxufVxcbntcXG4gICdzdGF0dXNfY29kZSc6IDQwNCxcXG4gICdjb250ZW50\nJzoge1xcbiAgICAnaHR0cF9yZXNwb25zZSc6IHtcXG4gICAgICAnY29kZSc6IDQwNCxcXG4gICAg\nICAnbWVzc2FnZSc6ICdXZSBjb3VsZCBub3QgZmluZCB0aGUgcmVzb3VyY2UgeW91IHJlcXVlc3Rl\nZC4nXFxuICAgIH0sXFxuICAgICdjb2RlJzogMTAwMyxcXG4gICAgJ2Rlc2NyaXB0aW9uJzogJ1Ro\nZSByZWNvcmQgZG9lcyBub3QgZXhpc3QgaW4gdGhlIHJlZmVyZW5jZSBzZXQnLFxcbiAgICAnZGV0\nYWlscyc6IHtcXG4gICAgICBcXG4gICAgfSxcXG4gICAgJ21lc3NhZ2UnOiAnU2V0IFNhbXBsZSBC\nbG9ja2VkIElQcyBkb2VzIG5vdCBjb250YWluIHZhbHVlIDIwOS44NS4yMjAuMTgyIGluIHNoYXJl\nZCdcXG4gIH1cXG59XFxuXFxcIlxcXCJcXFwiXCIsXCJwcmVfcHJvY2Vzc2luZ19zY3JpcHRcIjpc\nImlucHV0cy5xcmFkYXJfcmVmZXJlbmNlX3NldF9pdGVtX3ZhbHVlID0gYXJ0aWZhY3QudmFsdWVc\nIn08L3Jlc2lsaWVudDpmdW5jdGlvbj48L2V4dGVuc2lvbkVsZW1lbnRzPjxpbmNvbWluZz5TZXF1\nZW5jZUZsb3dfMTBqa3JkajwvaW5jb21pbmc+PG91dGdvaW5nPlNlcXVlbmNlRmxvd18xM3llbDB3\nPC9vdXRnb2luZz48L3NlcnZpY2VUYXNrPjxzZXF1ZW5jZUZsb3cgaWQ9XCJTZXF1ZW5jZUZsb3df\nMTBqa3JkalwiIHNvdXJjZVJlZj1cIlN0YXJ0RXZlbnRfMTU1YXN4bVwiIHRhcmdldFJlZj1cIlNl\ncnZpY2VUYXNrXzA2cnRudGNcIi8+PGVuZEV2ZW50IGlkPVwiRW5kRXZlbnRfMXE1M3AwMFwiPjxp\nbmNvbWluZz5TZXF1ZW5jZUZsb3dfMTN5ZWwwdzwvaW5jb21pbmc+PC9lbmRFdmVudD48c2VxdWVu\nY2VGbG93IGlkPVwiU2VxdWVuY2VGbG93XzEzeWVsMHdcIiBzb3VyY2VSZWY9XCJTZXJ2aWNlVGFz\na18wNnJ0bnRjXCIgdGFyZ2V0UmVmPVwiRW5kRXZlbnRfMXE1M3AwMFwiLz48dGV4dEFubm90YXRp\nb24gaWQ9XCJUZXh0QW5ub3RhdGlvbl8xa3h4aXl0XCI+PHRleHQ+U3RhcnQgeW91ciB3b3JrZmxv\ndyBoZXJlPC90ZXh0PjwvdGV4dEFubm90YXRpb24+PGFzc29jaWF0aW9uIGlkPVwiQXNzb2NpYXRp\nb25fMXNldWo0OFwiIHNvdXJjZVJlZj1cIlN0YXJ0RXZlbnRfMTU1YXN4bVwiIHRhcmdldFJlZj1c\nIlRleHRBbm5vdGF0aW9uXzFreHhpeXRcIi8+PHRleHRBbm5vdGF0aW9uIGlkPVwiVGV4dEFubm90\nYXRpb25fMWprMnlmY1wiPjx0ZXh0PjwhW0NEQVRBW25vdGUgYWRkZWQgd2l0aCByZXN1bHRzXG5d\nXT48L3RleHQ+PC90ZXh0QW5ub3RhdGlvbj48YXNzb2NpYXRpb24gaWQ9XCJBc3NvY2lhdGlvbl8x\ndXBrdThmXCIgc291cmNlUmVmPVwiU2VydmljZVRhc2tfMDZydG50Y1wiIHRhcmdldFJlZj1cIlRl\neHRBbm5vdGF0aW9uXzFqazJ5ZmNcIi8+PC9wcm9jZXNzPjxicG1uZGk6QlBNTkRpYWdyYW0gaWQ9\nXCJCUE1ORGlhZ3JhbV8xXCI+PGJwbW5kaTpCUE1OUGxhbmUgYnBtbkVsZW1lbnQ9XCJ1bmRlZmlu\nZWRcIiBpZD1cIkJQTU5QbGFuZV8xXCI+PGJwbW5kaTpCUE1OU2hhcGUgYnBtbkVsZW1lbnQ9XCJT\ndGFydEV2ZW50XzE1NWFzeG1cIiBpZD1cIlN0YXJ0RXZlbnRfMTU1YXN4bV9kaVwiPjxvbWdkYzpC\nb3VuZHMgaGVpZ2h0PVwiMzZcIiB3aWR0aD1cIjM2XCIgeD1cIjE5OVwiIHk9XCIxODVcIi8+PGJw\nbW5kaTpCUE1OTGFiZWw+PG9tZ2RjOkJvdW5kcyBoZWlnaHQ9XCIwXCIgd2lkdGg9XCI5MFwiIHg9\nXCIxOTRcIiB5PVwiMjIwXCIvPjwvYnBtbmRpOkJQTU5MYWJlbD48L2JwbW5kaTpCUE1OU2hhcGU+\nPGJwbW5kaTpCUE1OU2hhcGUgYnBtbkVsZW1lbnQ9XCJUZXh0QW5ub3RhdGlvbl8xa3h4aXl0XCIg\naWQ9XCJUZXh0QW5ub3RhdGlvbl8xa3h4aXl0X2RpXCI+PG9tZ2RjOkJvdW5kcyBoZWlnaHQ9XCIz\nMFwiIHdpZHRoPVwiMTAwXCIgeD1cIjExOVwiIHk9XCIyNTdcIi8+PC9icG1uZGk6QlBNTlNoYXBl\nPjxicG1uZGk6QlBNTkVkZ2UgYnBtbkVsZW1lbnQ9XCJBc3NvY2lhdGlvbl8xc2V1ajQ4XCIgaWQ9\nXCJBc3NvY2lhdGlvbl8xc2V1ajQ4X2RpXCI+PG9tZ2RpOndheXBvaW50IHg9XCIyMDVcIiB4c2k6\ndHlwZT1cIm9tZ2RjOlBvaW50XCIgeT1cIjIxNVwiLz48b21nZGk6d2F5cG9pbnQgeD1cIjE3Nlwi\nIHhzaTp0eXBlPVwib21nZGM6UG9pbnRcIiB5PVwiMjU3XCIvPjwvYnBtbmRpOkJQTU5FZGdlPjxi\ncG1uZGk6QlBNTlNoYXBlIGJwbW5FbGVtZW50PVwiU2VydmljZVRhc2tfMDZydG50Y1wiIGlkPVwi\nU2VydmljZVRhc2tfMDZydG50Y19kaVwiPjxvbWdkYzpCb3VuZHMgaGVpZ2h0PVwiODBcIiB3aWR0\naD1cIjEwMFwiIHg9XCIzMDZcIiB5PVwiMTYzXCIvPjwvYnBtbmRpOkJQTU5TaGFwZT48YnBtbmRp\nOkJQTU5FZGdlIGJwbW5FbGVtZW50PVwiU2VxdWVuY2VGbG93XzEwamtyZGpcIiBpZD1cIlNlcXVl\nbmNlRmxvd18xMGprcmRqX2RpXCI+PG9tZ2RpOndheXBvaW50IHg9XCIyMzVcIiB4c2k6dHlwZT1c\nIm9tZ2RjOlBvaW50XCIgeT1cIjIwM1wiLz48b21nZGk6d2F5cG9pbnQgeD1cIjMwNlwiIHhzaTp0\neXBlPVwib21nZGM6UG9pbnRcIiB5PVwiMjAzXCIvPjxicG1uZGk6QlBNTkxhYmVsPjxvbWdkYzpC\nb3VuZHMgaGVpZ2h0PVwiMTNcIiB3aWR0aD1cIjkwXCIgeD1cIjIyNS41XCIgeT1cIjE4MS41XCIv\nPjwvYnBtbmRpOkJQTU5MYWJlbD48L2JwbW5kaTpCUE1ORWRnZT48YnBtbmRpOkJQTU5TaGFwZSBi\ncG1uRWxlbWVudD1cIkVuZEV2ZW50XzFxNTNwMDBcIiBpZD1cIkVuZEV2ZW50XzFxNTNwMDBfZGlc\nIj48b21nZGM6Qm91bmRzIGhlaWdodD1cIjM2XCIgd2lkdGg9XCIzNlwiIHg9XCI0NTdcIiB5PVwi\nMTg1XCIvPjxicG1uZGk6QlBNTkxhYmVsPjxvbWdkYzpCb3VuZHMgaGVpZ2h0PVwiMTNcIiB3aWR0\naD1cIjkwXCIgeD1cIjQzMFwiIHk9XCIyMjRcIi8+PC9icG1uZGk6QlBNTkxhYmVsPjwvYnBtbmRp\nOkJQTU5TaGFwZT48YnBtbmRpOkJQTU5FZGdlIGJwbW5FbGVtZW50PVwiU2VxdWVuY2VGbG93XzEz\neWVsMHdcIiBpZD1cIlNlcXVlbmNlRmxvd18xM3llbDB3X2RpXCI+PG9tZ2RpOndheXBvaW50IHg9\nXCI0MDZcIiB4c2k6dHlwZT1cIm9tZ2RjOlBvaW50XCIgeT1cIjIwM1wiLz48b21nZGk6d2F5cG9p\nbnQgeD1cIjQ1N1wiIHhzaTp0eXBlPVwib21nZGM6UG9pbnRcIiB5PVwiMjAzXCIvPjxicG1uZGk6\nQlBNTkxhYmVsPjxvbWdkYzpCb3VuZHMgaGVpZ2h0PVwiMTNcIiB3aWR0aD1cIjkwXCIgeD1cIjM4\nNi41XCIgeT1cIjE4MS41XCIvPjwvYnBtbmRpOkJQTU5MYWJlbD48L2JwbW5kaTpCUE1ORWRnZT48\nYnBtbmRpOkJQTU5TaGFwZSBicG1uRWxlbWVudD1cIlRleHRBbm5vdGF0aW9uXzFqazJ5ZmNcIiBp\nZD1cIlRleHRBbm5vdGF0aW9uXzFqazJ5ZmNfZGlcIj48b21nZGM6Qm91bmRzIGhlaWdodD1cIjMw\nXCIgd2lkdGg9XCIxMDBcIiB4PVwiNDA4XCIgeT1cIjEwNlwiLz48L2JwbW5kaTpCUE1OU2hhcGU+\nPGJwbW5kaTpCUE1ORWRnZSBicG1uRWxlbWVudD1cIkFzc29jaWF0aW9uXzF1cGt1OGZcIiBpZD1c\nIkFzc29jaWF0aW9uXzF1cGt1OGZfZGlcIj48b21nZGk6d2F5cG9pbnQgeD1cIjQwMFwiIHhzaTp0\neXBlPVwib21nZGM6UG9pbnRcIiB5PVwiMTY3XCIvPjxvbWdkaTp3YXlwb2ludCB4PVwiNDQwXCIg\neHNpOnR5cGU9XCJvbWdkYzpQb2ludFwiIHk9XCIxMzZcIi8+PC9icG1uZGk6QlBNTkVkZ2U+PC9i\ncG1uZGk6QlBNTlBsYW5lPjwvYnBtbmRpOkJQTU5EaWFncmFtPjwvZGVmaW5pdGlvbnM+In0sICJj\nb250ZW50X3ZlcnNpb24iOiA2LCAiY3JlYXRvcl9pZCI6ICJhQGV4YW1wbGUuY29tIiwgImRlc2Ny\naXB0aW9uIjogIkRlbGV0ZSBhbiBJUCBhZGRyZXNzIGZyb20gdGhlIFFSYWRhciByZWZlcmVuY2Ug\nc2V0LCBcdTIwMWNTYW1wbGUgQmxvY2tlZCBJUHNcdTIwMWQiLCAiZXhwb3J0X2tleSI6ICJxcmFk\nYXJfZGVsZXRlX3JlZmVyZW5jZV9zZXRfaXRlbSIsICJsYXN0X21vZGlmaWVkX2J5IjogImFAZXhh\nbXBsZS5jb20iLCAibGFzdF9tb2RpZmllZF90aW1lIjogMTU5NTM4Mzc0NzI3NSwgIm5hbWUiOiAi\nRXhhbXBsZSBvZiBkZWxldGluZyBRUmFkYXIgcmVmZXJlbmNlIHNldCBJdGVtIiwgIm9iamVjdF90\neXBlIjogImFydGlmYWN0IiwgInByb2dyYW1tYXRpY19uYW1lIjogInFyYWRhcl9kZWxldGVfcmVm\nZXJlbmNlX3NldF9pdGVtIiwgInRhZ3MiOiBbXSwgInV1aWQiOiAiM2RlOGFmMzUtZWExMC00NzRl\nLTgyNDktZDEwYjVmNTEyMzM0IiwgIndvcmtmbG93X2lkIjogNjV9LCB7ImFjdGlvbnMiOiBbXSwg\nImNvbnRlbnQiOiB7InZlcnNpb24iOiAzLCAid29ya2Zsb3dfaWQiOiAicXJhZGFyX2ZpbmRfcmVm\nZXJlbmNlX3NldHNfYXJ0aWZhY3QiLCAieG1sIjogIjw/eG1sIHZlcnNpb249XCIxLjBcIiBlbmNv\nZGluZz1cIlVURi04XCI/PjxkZWZpbml0aW9ucyB4bWxucz1cImh0dHA6Ly93d3cub21nLm9yZy9z\ncGVjL0JQTU4vMjAxMDA1MjQvTU9ERUxcIiB4bWxuczpicG1uZGk9XCJodHRwOi8vd3d3Lm9tZy5v\ncmcvc3BlYy9CUE1OLzIwMTAwNTI0L0RJXCIgeG1sbnM6b21nZGM9XCJodHRwOi8vd3d3Lm9tZy5v\ncmcvc3BlYy9ERC8yMDEwMDUyNC9EQ1wiIHhtbG5zOm9tZ2RpPVwiaHR0cDovL3d3dy5vbWcub3Jn\nL3NwZWMvREQvMjAxMDA1MjQvRElcIiB4bWxuczpyZXNpbGllbnQ9XCJodHRwOi8vcmVzaWxpZW50\nLmlibS5jb20vYnBtblwiIHhtbG5zOnhzZD1cImh0dHA6Ly93d3cudzMub3JnLzIwMDEvWE1MU2No\nZW1hXCIgeG1sbnM6eHNpPVwiaHR0cDovL3d3dy53My5vcmcvMjAwMS9YTUxTY2hlbWEtaW5zdGFu\nY2VcIiB0YXJnZXROYW1lc3BhY2U9XCJodHRwOi8vd3d3LmNhbXVuZGEub3JnL3Rlc3RcIj48cHJv\nY2VzcyBpZD1cInFyYWRhcl9maW5kX3JlZmVyZW5jZV9zZXRzX2FydGlmYWN0XCIgaXNFeGVjdXRh\nYmxlPVwidHJ1ZVwiIG5hbWU9XCJFeGFtcGxlIG9mIGZpbmRpbmcgYWxsIFFSYWRhciByZWZlcmVu\nY2Ugc2V0cyBmb3IgYXJ0aWZhY3RcIj48ZG9jdW1lbnRhdGlvbj5GaW5kIGFsbCB0aGUgUVJhZGFy\nIHJlZmVyZW5jZSBzZXRzIHRoYXQgY29udGFpbiB0aGUgZ2l2ZW4gYXJ0aWZhY3Q8L2RvY3VtZW50\nYXRpb24+PHN0YXJ0RXZlbnQgaWQ9XCJTdGFydEV2ZW50XzE1NWFzeG1cIj48b3V0Z29pbmc+U2Vx\ndWVuY2VGbG93XzE5ZW9xN3Y8L291dGdvaW5nPjwvc3RhcnRFdmVudD48c2VydmljZVRhc2sgaWQ9\nXCJTZXJ2aWNlVGFza18xa3hzZjVkXCIgbmFtZT1cIlFSYWRhciBGaW5kIFJlZmVyZW5jZSBTZXRz\nXCIgcmVzaWxpZW50OnR5cGU9XCJmdW5jdGlvblwiPjxleHRlbnNpb25FbGVtZW50cz48cmVzaWxp\nZW50OmZ1bmN0aW9uIHV1aWQ9XCIwOTg4NTgxMy1mNjQwLTQ1YmMtODg5Mi1kMWE3NDFhN2Q1M2Vc\nIj57XCJpbnB1dHNcIjp7fSxcInBvc3RfcHJvY2Vzc2luZ19zY3JpcHRcIjpcIml0ZW1zID0gcmVz\ndWx0cy5yZWZlcmVuY2VfaXRlbXNcXG5cXFwiXFxcIlxcXCJcXG4gIFNhbXBsZSBkYXRhOlxcbiAg\nW3t1J25hbWUnOiB1J1JGIFJpc2tsaXN0JywgdSd0aW1lb3V0X3R5cGUnOiB1J0ZJUlNUX1NFRU4n\nLCB1J2NyZWF0aW9uX3RpbWUnOiAxNTQ5Mzc2ODU5MTY0LCB1J2VsZW1lbnRfdHlwZSc6IHUnSVAn\nLCB1J251bWJlcl9vZl9lbGVtZW50cyc6IDUsIHUnZGF0YSc6IFt7dSdzb3VyY2UnOiB1J2FkbWlu\nJywgdSdmaXJzdF9zZWVuJzogMTU0OTQ3NzczNzk5MywgdSd2YWx1ZSc6IHUnNDYuMjEuMTQ3LjE2\nMScsIHUnbGFzdF9zZWVuJzogMTU0OTQ3NzczNzk5M31dfV1cXG5cXG5cXFwiXFxcIlxcXCJcXG5p\nZiBpdGVtczpcXG4gIGZvciBpdGVtIGluIGl0ZW1zOlxcbiAgICAgIGl0ZW1fcm93ID0gaW5jaWRl\nbnQuYWRkUm93KFxcXCJxcmFkYXJfcmVmZXJlbmNlX3NldFxcXCIpXFxuICAgICAgaXRlbV9yb3db\nXFxcInJlZmVyZW5jZV9zZXRcXFwiXSA9IGl0ZW1bXFxcIm5hbWVcXFwiXVxcbiAgICAgIGl0ZW1f\ncm93W1xcXCJpdGVtX3ZhbHVlXFxcIl0gPSBpdGVtW1xcXCJkYXRhXFxcIl1bMF1bXFxcInZhbHVl\nXFxcIl1cXG4gICAgICBpdGVtX3Jvd1tcXFwic291cmNlXFxcIl0gPSBpdGVtW1xcXCJkYXRhXFxc\nIl1bMF1bXFxcInNvdXJjZVxcXCJdXFxuZWxzZTpcXG4gIGluY2lkZW50LmFkZE5vdGUoXFxcIk5v\nIHJlZmVyZW5jZSBzZXRzIGNvbnRhaW4gYXJ0aWZhY3Q6IHt9XFxcIi5mb3JtYXQoYXJ0aWZhY3Qu\ndmFsdWUpKVwiLFwicHJlX3Byb2Nlc3Npbmdfc2NyaXB0XCI6XCJpbnB1dHMucXJhZGFyX3JlZmVy\nZW5jZV9zZXRfaXRlbV92YWx1ZSA9IGFydGlmYWN0LnZhbHVlXCJ9PC9yZXNpbGllbnQ6ZnVuY3Rp\nb24+PC9leHRlbnNpb25FbGVtZW50cz48aW5jb21pbmc+U2VxdWVuY2VGbG93XzE5ZW9xN3Y8L2lu\nY29taW5nPjxvdXRnb2luZz5TZXF1ZW5jZUZsb3dfMDUzbG8wcjwvb3V0Z29pbmc+PC9zZXJ2aWNl\nVGFzaz48c2VxdWVuY2VGbG93IGlkPVwiU2VxdWVuY2VGbG93XzE5ZW9xN3ZcIiBzb3VyY2VSZWY9\nXCJTdGFydEV2ZW50XzE1NWFzeG1cIiB0YXJnZXRSZWY9XCJTZXJ2aWNlVGFza18xa3hzZjVkXCIv\nPjxlbmRFdmVudCBpZD1cIkVuZEV2ZW50XzExcjU0ZjRcIj48aW5jb21pbmc+U2VxdWVuY2VGbG93\nXzA1M2xvMHI8L2luY29taW5nPjwvZW5kRXZlbnQ+PHNlcXVlbmNlRmxvdyBpZD1cIlNlcXVlbmNl\nRmxvd18wNTNsbzByXCIgc291cmNlUmVmPVwiU2VydmljZVRhc2tfMWt4c2Y1ZFwiIHRhcmdldFJl\nZj1cIkVuZEV2ZW50XzExcjU0ZjRcIi8+PHRleHRBbm5vdGF0aW9uIGlkPVwiVGV4dEFubm90YXRp\nb25fMWt4eGl5dFwiPjx0ZXh0PlN0YXJ0IHlvdXIgd29ya2Zsb3cgaGVyZTwvdGV4dD48L3RleHRB\nbm5vdGF0aW9uPjxhc3NvY2lhdGlvbiBpZD1cIkFzc29jaWF0aW9uXzFzZXVqNDhcIiBzb3VyY2VS\nZWY9XCJTdGFydEV2ZW50XzE1NWFzeG1cIiB0YXJnZXRSZWY9XCJUZXh0QW5ub3RhdGlvbl8xa3h4\naXl0XCIvPjx0ZXh0QW5ub3RhdGlvbiBpZD1cIlRleHRBbm5vdGF0aW9uXzA5bTl1dnVcIj48dGV4\ndD48IVtDREFUQVtJbnB1dDpcbnFyYWRhcl9yZWZlcmVuY2Vfc2V0X2l0ZW1fdmFsdWVcblxuRmlu\nZCBhbGwgdGhlIHJlZmVyZW5jZSBzZXRzIHRoYXQgY29udGFpbiB0aGlzIGl0ZW1cbl1dPjwvdGV4\ndD48L3RleHRBbm5vdGF0aW9uPjxhc3NvY2lhdGlvbiBpZD1cIkFzc29jaWF0aW9uXzAzbDZrZXFc\nIiBzb3VyY2VSZWY9XCJTZXJ2aWNlVGFza18xa3hzZjVkXCIgdGFyZ2V0UmVmPVwiVGV4dEFubm90\nYXRpb25fMDltOXV2dVwiLz48dGV4dEFubm90YXRpb24gaWQ9XCJUZXh0QW5ub3RhdGlvbl8xa3h0\nd244XCI+PHRleHQ+PCFbQ0RBVEFbT3V0cHV0OlxuUmVzdWx0cyBhZGRlZCB0byBkYXRhdGFibGU6\nIHFyYWRhcl9yZWZlcmVuY2Vfc2V0XG5dXT48L3RleHQ+PC90ZXh0QW5ub3RhdGlvbj48YXNzb2Np\nYXRpb24gaWQ9XCJBc3NvY2lhdGlvbl8xamh3MjRrXCIgc291cmNlUmVmPVwiU2VydmljZVRhc2tf\nMWt4c2Y1ZFwiIHRhcmdldFJlZj1cIlRleHRBbm5vdGF0aW9uXzFreHR3bjhcIi8+PC9wcm9jZXNz\nPjxicG1uZGk6QlBNTkRpYWdyYW0gaWQ9XCJCUE1ORGlhZ3JhbV8xXCI+PGJwbW5kaTpCUE1OUGxh\nbmUgYnBtbkVsZW1lbnQ9XCJ1bmRlZmluZWRcIiBpZD1cIkJQTU5QbGFuZV8xXCI+PGJwbW5kaTpC\nUE1OU2hhcGUgYnBtbkVsZW1lbnQ9XCJTdGFydEV2ZW50XzE1NWFzeG1cIiBpZD1cIlN0YXJ0RXZl\nbnRfMTU1YXN4bV9kaVwiPjxvbWdkYzpCb3VuZHMgaGVpZ2h0PVwiMzZcIiB3aWR0aD1cIjM2XCIg\neD1cIjE4MVwiIHk9XCIxODFcIi8+PGJwbW5kaTpCUE1OTGFiZWw+PG9tZ2RjOkJvdW5kcyBoZWln\naHQ9XCIwXCIgd2lkdGg9XCI5MFwiIHg9XCIxNzZcIiB5PVwiMjE2XCIvPjwvYnBtbmRpOkJQTU5M\nYWJlbD48L2JwbW5kaTpCUE1OU2hhcGU+PGJwbW5kaTpCUE1OU2hhcGUgYnBtbkVsZW1lbnQ9XCJU\nZXh0QW5ub3RhdGlvbl8xa3h4aXl0XCIgaWQ9XCJUZXh0QW5ub3RhdGlvbl8xa3h4aXl0X2RpXCI+\nPG9tZ2RjOkJvdW5kcyBoZWlnaHQ9XCIzMFwiIHdpZHRoPVwiMTAwXCIgeD1cIjk5XCIgeT1cIjI1\nNFwiLz48L2JwbW5kaTpCUE1OU2hhcGU+PGJwbW5kaTpCUE1ORWRnZSBicG1uRWxlbWVudD1cIkFz\nc29jaWF0aW9uXzFzZXVqNDhcIiBpZD1cIkFzc29jaWF0aW9uXzFzZXVqNDhfZGlcIj48b21nZGk6\nd2F5cG9pbnQgeD1cIjE4NlwiIHhzaTp0eXBlPVwib21nZGM6UG9pbnRcIiB5PVwiMjExXCIvPjxv\nbWdkaTp3YXlwb2ludCB4PVwiMTU2XCIgeHNpOnR5cGU9XCJvbWdkYzpQb2ludFwiIHk9XCIyNTRc\nIi8+PC9icG1uZGk6QlBNTkVkZ2U+PGJwbW5kaTpCUE1OU2hhcGUgYnBtbkVsZW1lbnQ9XCJTZXJ2\naWNlVGFza18xa3hzZjVkXCIgaWQ9XCJTZXJ2aWNlVGFza18xa3hzZjVkX2RpXCI+PG9tZ2RjOkJv\ndW5kcyBoZWlnaHQ9XCI4MFwiIHdpZHRoPVwiMTAwXCIgeD1cIjI3M1wiIHk9XCIxNTlcIi8+PC9i\ncG1uZGk6QlBNTlNoYXBlPjxicG1uZGk6QlBNTkVkZ2UgYnBtbkVsZW1lbnQ9XCJTZXF1ZW5jZUZs\nb3dfMTllb3E3dlwiIGlkPVwiU2VxdWVuY2VGbG93XzE5ZW9xN3ZfZGlcIj48b21nZGk6d2F5cG9p\nbnQgeD1cIjIxN1wiIHhzaTp0eXBlPVwib21nZGM6UG9pbnRcIiB5PVwiMTk5XCIvPjxvbWdkaTp3\nYXlwb2ludCB4PVwiMjczXCIgeHNpOnR5cGU9XCJvbWdkYzpQb2ludFwiIHk9XCIxOTlcIi8+PGJw\nbW5kaTpCUE1OTGFiZWw+PG9tZ2RjOkJvdW5kcyBoZWlnaHQ9XCIxM1wiIHdpZHRoPVwiOTBcIiB4\nPVwiMjAwXCIgeT1cIjE3Ny41XCIvPjwvYnBtbmRpOkJQTU5MYWJlbD48L2JwbW5kaTpCUE1ORWRn\nZT48YnBtbmRpOkJQTU5TaGFwZSBicG1uRWxlbWVudD1cIkVuZEV2ZW50XzExcjU0ZjRcIiBpZD1c\nIkVuZEV2ZW50XzExcjU0ZjRfZGlcIj48b21nZGM6Qm91bmRzIGhlaWdodD1cIjM2XCIgd2lkdGg9\nXCIzNlwiIHg9XCI0MjNcIiB5PVwiMTgxXCIvPjxicG1uZGk6QlBNTkxhYmVsPjxvbWdkYzpCb3Vu\nZHMgaGVpZ2h0PVwiMTNcIiB3aWR0aD1cIjkwXCIgeD1cIjM5NlwiIHk9XCIyMjBcIi8+PC9icG1u\nZGk6QlBNTkxhYmVsPjwvYnBtbmRpOkJQTU5TaGFwZT48YnBtbmRpOkJQTU5FZGdlIGJwbW5FbGVt\nZW50PVwiU2VxdWVuY2VGbG93XzA1M2xvMHJcIiBpZD1cIlNlcXVlbmNlRmxvd18wNTNsbzByX2Rp\nXCI+PG9tZ2RpOndheXBvaW50IHg9XCIzNzNcIiB4c2k6dHlwZT1cIm9tZ2RjOlBvaW50XCIgeT1c\nIjE5OVwiLz48b21nZGk6d2F5cG9pbnQgeD1cIjQyM1wiIHhzaTp0eXBlPVwib21nZGM6UG9pbnRc\nIiB5PVwiMTk5XCIvPjxicG1uZGk6QlBNTkxhYmVsPjxvbWdkYzpCb3VuZHMgaGVpZ2h0PVwiMTNc\nIiB3aWR0aD1cIjkwXCIgeD1cIjM1M1wiIHk9XCIxNzcuNVwiLz48L2JwbW5kaTpCUE1OTGFiZWw+\nPC9icG1uZGk6QlBNTkVkZ2U+PGJwbW5kaTpCUE1OU2hhcGUgYnBtbkVsZW1lbnQ9XCJUZXh0QW5u\nb3RhdGlvbl8wOW05dXZ1XCIgaWQ9XCJUZXh0QW5ub3RhdGlvbl8wOW05dXZ1X2RpXCI+PG9tZ2Rj\nOkJvdW5kcyBoZWlnaHQ9XCI3MFwiIHdpZHRoPVwiMjAwXCIgeD1cIjQ5XCIgeT1cIjI5XCIvPjwv\nYnBtbmRpOkJQTU5TaGFwZT48YnBtbmRpOkJQTU5FZGdlIGJwbW5FbGVtZW50PVwiQXNzb2NpYXRp\nb25fMDNsNmtlcVwiIGlkPVwiQXNzb2NpYXRpb25fMDNsNmtlcV9kaVwiPjxvbWdkaTp3YXlwb2lu\ndCB4PVwiMjc4XCIgeHNpOnR5cGU9XCJvbWdkYzpQb2ludFwiIHk9XCIxNjRcIi8+PG9tZ2RpOndh\neXBvaW50IHg9XCIxOTNcIiB4c2k6dHlwZT1cIm9tZ2RjOlBvaW50XCIgeT1cIjk5XCIvPjwvYnBt\nbmRpOkJQTU5FZGdlPjxicG1uZGk6QlBNTlNoYXBlIGJwbW5FbGVtZW50PVwiVGV4dEFubm90YXRp\nb25fMWt4dHduOFwiIGlkPVwiVGV4dEFubm90YXRpb25fMWt4dHduOF9kaVwiPjxvbWdkYzpCb3Vu\nZHMgaGVpZ2h0PVwiNjhcIiB3aWR0aD1cIjMwOVwiIHg9XCI0MTJcIiB5PVwiMzBcIi8+PC9icG1u\nZGk6QlBNTlNoYXBlPjxicG1uZGk6QlBNTkVkZ2UgYnBtbkVsZW1lbnQ9XCJBc3NvY2lhdGlvbl8x\namh3MjRrXCIgaWQ9XCJBc3NvY2lhdGlvbl8xamh3MjRrX2RpXCI+PG9tZ2RpOndheXBvaW50IHg9\nXCIzNzNcIiB4c2k6dHlwZT1cIm9tZ2RjOlBvaW50XCIgeT1cIjE3MVwiLz48b21nZGk6d2F5cG9p\nbnQgeD1cIjUwNVwiIHhzaTp0eXBlPVwib21nZGM6UG9pbnRcIiB5PVwiOThcIi8+PC9icG1uZGk6\nQlBNTkVkZ2U+PC9icG1uZGk6QlBNTlBsYW5lPjwvYnBtbmRpOkJQTU5EaWFncmFtPjwvZGVmaW5p\ndGlvbnM+In0sICJjb250ZW50X3ZlcnNpb24iOiAzLCAiY3JlYXRvcl9pZCI6ICJhQGV4YW1wbGUu\nY29tIiwgImRlc2NyaXB0aW9uIjogIkZpbmQgYWxsIHRoZSBRUmFkYXIgcmVmZXJlbmNlIHNldHMg\ndGhhdCBjb250YWluIHRoZSBnaXZlbiBhcnRpZmFjdCIsICJleHBvcnRfa2V5IjogInFyYWRhcl9m\naW5kX3JlZmVyZW5jZV9zZXRzX2FydGlmYWN0IiwgImxhc3RfbW9kaWZpZWRfYnkiOiAiYUBleGFt\ncGxlLmNvbSIsICJsYXN0X21vZGlmaWVkX3RpbWUiOiAxNTk1Mzg0NjE0OTY5LCAibmFtZSI6ICJF\neGFtcGxlIG9mIGZpbmRpbmcgYWxsIFFSYWRhciByZWZlcmVuY2Ugc2V0cyBmb3IgYXJ0aWZhY3Qi\nLCAib2JqZWN0X3R5cGUiOiAiYXJ0aWZhY3QiLCAicHJvZ3JhbW1hdGljX25hbWUiOiAicXJhZGFy\nX2ZpbmRfcmVmZXJlbmNlX3NldHNfYXJ0aWZhY3QiLCAidGFncyI6IFtdLCAidXVpZCI6ICI2Y2Vh\nNmFjMS0xYjdjLTRmZjUtODQwNC1kMGQ4ODY5NTQ0NjAiLCAid29ya2Zsb3dfaWQiOiA2OX0sIHsi\nYWN0aW9ucyI6IFtdLCAiY29udGVudCI6IHsidmVyc2lvbiI6IDIsICJ3b3JrZmxvd19pZCI6ICJx\ncmFkYXJfc2VhcmNoX2V2ZW50X29mZmVuc2UiLCAieG1sIjogIjw/eG1sIHZlcnNpb249XCIxLjBc\nIiBlbmNvZGluZz1cIlVURi04XCI/PjxkZWZpbml0aW9ucyB4bWxucz1cImh0dHA6Ly93d3cub21n\nLm9yZy9zcGVjL0JQTU4vMjAxMDA1MjQvTU9ERUxcIiB4bWxuczpicG1uZGk9XCJodHRwOi8vd3d3\nLm9tZy5vcmcvc3BlYy9CUE1OLzIwMTAwNTI0L0RJXCIgeG1sbnM6b21nZGM9XCJodHRwOi8vd3d3\nLm9tZy5vcmcvc3BlYy9ERC8yMDEwMDUyNC9EQ1wiIHhtbG5zOm9tZ2RpPVwiaHR0cDovL3d3dy5v\nbWcub3JnL3NwZWMvREQvMjAxMDA1MjQvRElcIiB4bWxuczpyZXNpbGllbnQ9XCJodHRwOi8vcmVz\naWxpZW50LmlibS5jb20vYnBtblwiIHhtbG5zOnhzZD1cImh0dHA6Ly93d3cudzMub3JnLzIwMDEv\nWE1MU2NoZW1hXCIgeG1sbnM6eHNpPVwiaHR0cDovL3d3dy53My5vcmcvMjAwMS9YTUxTY2hlbWEt\naW5zdGFuY2VcIiB0YXJnZXROYW1lc3BhY2U9XCJodHRwOi8vd3d3LmNhbXVuZGEub3JnL3Rlc3Rc\nIj48cHJvY2VzcyBpZD1cInFyYWRhcl9zZWFyY2hfZXZlbnRfb2ZmZW5zZVwiIGlzRXhlY3V0YWJs\nZT1cInRydWVcIiBuYW1lPVwiRXhhbXBsZSBvZiBzZWFyY2hpbmcgUVJhZGFyIGV2ZW50cyB1c2lu\nZyBvZmZlbnNlIGlkXCI+PGRvY3VtZW50YXRpb24+VXNlIHRoZSBxcmFkYXJfaWQgZmllbGQgb2Yg\ndGhlIGluY2lkZW50IHRvIHNlYXJjaCBxcmFkYXIgZXZlbnRzLCBhbmQgdXBkYXRlIHRoZSBkYXRh\nIHRhYmxlLCBxcmFkYXJfb2ZmZW5zZV9ldmVudCwgd2l0aCB0aGUgZmlyc3QgNSByZXN1bHRzLjwv\nZG9jdW1lbnRhdGlvbj48c3RhcnRFdmVudCBpZD1cIlN0YXJ0RXZlbnRfMTU1YXN4bVwiPjxvdXRn\nb2luZz5TZXF1ZW5jZUZsb3dfMDI2YmJiaTwvb3V0Z29pbmc+PC9zdGFydEV2ZW50PjxzZXJ2aWNl\nVGFzayBpZD1cIlNlcnZpY2VUYXNrXzFndG84azBcIiBuYW1lPVwiUVJhZGFyIFNlYXJjaFwiIHJl\nc2lsaWVudDp0eXBlPVwiZnVuY3Rpb25cIj48ZXh0ZW5zaW9uRWxlbWVudHM+PHJlc2lsaWVudDpm\ndW5jdGlvbiB1dWlkPVwiYzNlNmY2Y2MtODkwNS00MWU2LTk4NDEtZWJlOTk4NDVkNzc4XCI+e1wi\naW5wdXRzXCI6e1wiMDQ4YmEzOWEtYWI5NC00ZDFmLWEwZjgtMjQ2MmRlM2MwNDRjXCI6e1wiaW5w\ndXRfdHlwZVwiOlwic3RhdGljXCIsXCJzdGF0aWNfaW5wdXRcIjp7XCJtdWx0aXNlbGVjdF92YWx1\nZVwiOltdLFwidGV4dF9jb250ZW50X3ZhbHVlXCI6e1wiZm9ybWF0XCI6XCJ0ZXh0XCIsXCJjb250\nZW50XCI6XCJTRUxFQ1QgJXBhcmFtMSUgRlJPTSBldmVudHMgV0hFUkUgSU5PRkZFTlNFKCVwYXJh\nbTIlKSBMQVNUICVwYXJhbTMlIE1JTlVURVNcIn19fSxcIjAxZWQ0NjUyLWZlNjEtNGRiZi1iYzgz\nLWY4MWM2MmM1NTYzY1wiOntcImlucHV0X3R5cGVcIjpcInN0YXRpY1wiLFwic3RhdGljX2lucHV0\nXCI6e1wibXVsdGlzZWxlY3RfdmFsdWVcIjpbXSxcInRleHRfdmFsdWVcIjpcIkRBVEVGT1JNQVQo\nc3RhcnR0aW1lLCAnWVlZWS1NTS1kZCBISDptbScpIGFzIFN0YXJ0VGltZSwgQ0FURUdPUllOQU1F\nKGNhdGVnb3J5KSwgTE9HU09VUkNFTkFNRShsb2dzb3VyY2VpZCksIFBST1RPQ09MTkFNRShwcm90\nb2NvbGlkKSwgUlVMRU5BTUUoY3JlZXZlbnRsaXN0KVwifX0sXCJiNjA1MzY5MC1kN2IyLTQyMDQt\nYjQ4YS1mODY1MDNiNDE5ZDlcIjp7XCJpbnB1dF90eXBlXCI6XCJzdGF0aWNcIixcInN0YXRpY19p\nbnB1dFwiOntcIm11bHRpc2VsZWN0X3ZhbHVlXCI6W10sXCJ0ZXh0X3ZhbHVlXCI6XCI0MzMyMFwi\nfX0sXCJjMzNmYmUxZC0xMjVjLTRhNzktODJlOC02NjA4ZDFjN2JiNWVcIjp7XCJpbnB1dF90eXBl\nXCI6XCJzdGF0aWNcIixcInN0YXRpY19pbnB1dFwiOntcIm11bHRpc2VsZWN0X3ZhbHVlXCI6W10s\nXCJudW1iZXJfdmFsdWVcIjoxfX0sXCJkN2E1NDRmZi02ODliLTRmMTUtYjNjMS1hN2ViZDIwYmJm\nM2JcIjp7XCJpbnB1dF90eXBlXCI6XCJzdGF0aWNcIixcInN0YXRpY19pbnB1dFwiOntcIm11bHRp\nc2VsZWN0X3ZhbHVlXCI6W10sXCJzZWxlY3RfdmFsdWVcIjpcIjU0YzRlYjUyLWQ5NTUtNGUwNS05\nZjc2LWMzODE5ODUzZmY2OFwifX19LFwicG9zdF9wcm9jZXNzaW5nX3NjcmlwdFwiOlwiZm9yIGV2\nZW50IGluIHJlc3VsdHMuZXZlbnRzOlxcbiAgcXJhZGFyX2V2ZW50ID0gaW5jaWRlbnQuYWRkUm93\nKFxcXCJxcmFkYXJfb2ZmZW5zZV9ldmVudFxcXCIpXFxuICBxcmFkYXJfZXZlbnQuc3RhcnRfdGlt\nZSA9IGV2ZW50LlN0YXJ0VGltZVxcbiAgcXJhZGFyX2V2ZW50LmNhdGVnb3J5ID0gZXZlbnQuY2F0\nZWdvcnluYW1lX2NhdGVnb3J5XFxuICBxcmFkYXJfZXZlbnQubG9nX3NvdXJjZSA9IGV2ZW50Lmxv\nZ3NvdXJjZW5hbWVfbG9nc291cmNlaWRcXG4gIHFyYWRhcl9ldmVudC5wcm90b2NvbCA9IGV2ZW50\nLnByb3RvY29sbmFtZV9wcm90b2NvbGlkXFxuICBxcmFkYXJfZXZlbnQucnVsZSA9IGV2ZW50LnJ1\nbGVuYW1lX2NyZWV2ZW50bGlzdFwiLFwicHJlX3Byb2Nlc3Npbmdfc2NyaXB0XCI6XCJpbnB1dHMu\ncXJhZGFyX3F1ZXJ5X3BhcmFtMiA9IGluY2lkZW50LnByb3BlcnRpZXMucXJhZGFyX2lkXFxuaWYg\ncnVsZS5wcm9wZXJ0aWVzLnFyYWRhcl9xdWVyeV9hbGxfcmVzdWx0czpcXG4gIGlucHV0cy5xcmFk\nYXJfcXVlcnlfYWxsX3Jlc3VsdHMgPSBydWxlLnByb3BlcnRpZXMucXJhZGFyX3F1ZXJ5X2FsbF9y\nZXN1bHRzXCJ9PC9yZXNpbGllbnQ6ZnVuY3Rpb24+PC9leHRlbnNpb25FbGVtZW50cz48aW5jb21p\nbmc+U2VxdWVuY2VGbG93XzAyNmJiYmk8L2luY29taW5nPjxvdXRnb2luZz5TZXF1ZW5jZUZsb3df\nMGNzMGJjcjwvb3V0Z29pbmc+PC9zZXJ2aWNlVGFzaz48c2VxdWVuY2VGbG93IGlkPVwiU2VxdWVu\nY2VGbG93XzAyNmJiYmlcIiBzb3VyY2VSZWY9XCJTdGFydEV2ZW50XzE1NWFzeG1cIiB0YXJnZXRS\nZWY9XCJTZXJ2aWNlVGFza18xZ3RvOGswXCIvPjxlbmRFdmVudCBpZD1cIkVuZEV2ZW50XzBlaGJu\nNHdcIj48aW5jb21pbmc+U2VxdWVuY2VGbG93XzBjczBiY3I8L2luY29taW5nPjwvZW5kRXZlbnQ+\nPHNlcXVlbmNlRmxvdyBpZD1cIlNlcXVlbmNlRmxvd18wY3MwYmNyXCIgc291cmNlUmVmPVwiU2Vy\ndmljZVRhc2tfMWd0bzhrMFwiIHRhcmdldFJlZj1cIkVuZEV2ZW50XzBlaGJuNHdcIi8+PHRleHRB\nbm5vdGF0aW9uIGlkPVwiVGV4dEFubm90YXRpb25fMWt4eGl5dFwiPjx0ZXh0PlN0YXJ0IHlvdXIg\nd29ya2Zsb3cgaGVyZTwvdGV4dD48L3RleHRBbm5vdGF0aW9uPjxhc3NvY2lhdGlvbiBpZD1cIkFz\nc29jaWF0aW9uXzFzZXVqNDhcIiBzb3VyY2VSZWY9XCJTdGFydEV2ZW50XzE1NWFzeG1cIiB0YXJn\nZXRSZWY9XCJUZXh0QW5ub3RhdGlvbl8xa3h4aXl0XCIvPjx0ZXh0QW5ub3RhdGlvbiBpZD1cIlRl\neHRBbm5vdGF0aW9uXzB4cTZleDlcIj48dGV4dD48IVtDREFUQVtSZXR1cm4gcmVzdWx0cyBpbiBx\ncmFkYXJfb2ZmZW5zZV9ldmVudCBkYXRhdGFibGVcbl1dPjwvdGV4dD48L3RleHRBbm5vdGF0aW9u\nPjxhc3NvY2lhdGlvbiBpZD1cIkFzc29jaWF0aW9uXzBma2Rrc3RcIiBzb3VyY2VSZWY9XCJTZXJ2\naWNlVGFza18xZ3RvOGswXCIgdGFyZ2V0UmVmPVwiVGV4dEFubm90YXRpb25fMHhxNmV4OVwiLz48\nL3Byb2Nlc3M+PGJwbW5kaTpCUE1ORGlhZ3JhbSBpZD1cIkJQTU5EaWFncmFtXzFcIj48YnBtbmRp\nOkJQTU5QbGFuZSBicG1uRWxlbWVudD1cInVuZGVmaW5lZFwiIGlkPVwiQlBNTlBsYW5lXzFcIj48\nYnBtbmRpOkJQTU5TaGFwZSBicG1uRWxlbWVudD1cIlN0YXJ0RXZlbnRfMTU1YXN4bVwiIGlkPVwi\nU3RhcnRFdmVudF8xNTVhc3htX2RpXCI+PG9tZ2RjOkJvdW5kcyBoZWlnaHQ9XCIzNlwiIHdpZHRo\nPVwiMzZcIiB4PVwiMzY2XCIgeT1cIjE3NFwiLz48YnBtbmRpOkJQTU5MYWJlbD48b21nZGM6Qm91\nbmRzIGhlaWdodD1cIjBcIiB3aWR0aD1cIjkwXCIgeD1cIjM2MVwiIHk9XCIyMDlcIi8+PC9icG1u\nZGk6QlBNTkxhYmVsPjwvYnBtbmRpOkJQTU5TaGFwZT48YnBtbmRpOkJQTU5TaGFwZSBicG1uRWxl\nbWVudD1cIlRleHRBbm5vdGF0aW9uXzFreHhpeXRcIiBpZD1cIlRleHRBbm5vdGF0aW9uXzFreHhp\neXRfZGlcIj48b21nZGM6Qm91bmRzIGhlaWdodD1cIjMwXCIgd2lkdGg9XCIxMDBcIiB4PVwiMjQ3\nXCIgeT1cIjIzOVwiLz48L2JwbW5kaTpCUE1OU2hhcGU+PGJwbW5kaTpCUE1ORWRnZSBicG1uRWxl\nbWVudD1cIkFzc29jaWF0aW9uXzFzZXVqNDhcIiBpZD1cIkFzc29jaWF0aW9uXzFzZXVqNDhfZGlc\nIj48b21nZGk6d2F5cG9pbnQgeD1cIjM2OVwiIHhzaTp0eXBlPVwib21nZGM6UG9pbnRcIiB5PVwi\nMjAxXCIvPjxvbWdkaTp3YXlwb2ludCB4PVwiMzE2XCIgeHNpOnR5cGU9XCJvbWdkYzpQb2ludFwi\nIHk9XCIyMzlcIi8+PC9icG1uZGk6QlBNTkVkZ2U+PGJwbW5kaTpCUE1OU2hhcGUgYnBtbkVsZW1l\nbnQ9XCJTZXJ2aWNlVGFza18xZ3RvOGswXCIgaWQ9XCJTZXJ2aWNlVGFza18xZ3RvOGswX2RpXCI+\nPG9tZ2RjOkJvdW5kcyBoZWlnaHQ9XCI4MFwiIHdpZHRoPVwiMTAwXCIgeD1cIjQ4Mi4wOTQ1MTU3\nNTI2MjU0XCIgeT1cIjE1MlwiLz48L2JwbW5kaTpCUE1OU2hhcGU+PGJwbW5kaTpCUE1ORWRnZSBi\ncG1uRWxlbWVudD1cIlNlcXVlbmNlRmxvd18wMjZiYmJpXCIgaWQ9XCJTZXF1ZW5jZUZsb3dfMDI2\nYmJiaV9kaVwiPjxvbWdkaTp3YXlwb2ludCB4PVwiNDAyXCIgeHNpOnR5cGU9XCJvbWdkYzpQb2lu\ndFwiIHk9XCIxOTJcIi8+PG9tZ2RpOndheXBvaW50IHg9XCI0ODJcIiB4c2k6dHlwZT1cIm9tZ2Rj\nOlBvaW50XCIgeT1cIjE5MlwiLz48YnBtbmRpOkJQTU5MYWJlbD48b21nZGM6Qm91bmRzIGhlaWdo\ndD1cIjEzXCIgd2lkdGg9XCIwXCIgeD1cIjQ0MlwiIHk9XCIxNzBcIi8+PC9icG1uZGk6QlBNTkxh\nYmVsPjwvYnBtbmRpOkJQTU5FZGdlPjxicG1uZGk6QlBNTlNoYXBlIGJwbW5FbGVtZW50PVwiRW5k\nRXZlbnRfMGVoYm40d1wiIGlkPVwiRW5kRXZlbnRfMGVoYm40d19kaVwiPjxvbWdkYzpCb3VuZHMg\naGVpZ2h0PVwiMzZcIiB3aWR0aD1cIjM2XCIgeD1cIjY2MlwiIHk9XCIxNzRcIi8+PGJwbW5kaTpC\nUE1OTGFiZWw+PG9tZ2RjOkJvdW5kcyBoZWlnaHQ9XCIxM1wiIHdpZHRoPVwiOTBcIiB4PVwiNjM1\nXCIgeT1cIjIxM1wiLz48L2JwbW5kaTpCUE1OTGFiZWw+PC9icG1uZGk6QlBNTlNoYXBlPjxicG1u\nZGk6QlBNTkVkZ2UgYnBtbkVsZW1lbnQ9XCJTZXF1ZW5jZUZsb3dfMGNzMGJjclwiIGlkPVwiU2Vx\ndWVuY2VGbG93XzBjczBiY3JfZGlcIj48b21nZGk6d2F5cG9pbnQgeD1cIjU4MlwiIHhzaTp0eXBl\nPVwib21nZGM6UG9pbnRcIiB5PVwiMTkyXCIvPjxvbWdkaTp3YXlwb2ludCB4PVwiNjYyXCIgeHNp\nOnR5cGU9XCJvbWdkYzpQb2ludFwiIHk9XCIxOTJcIi8+PGJwbW5kaTpCUE1OTGFiZWw+PG9tZ2Rj\nOkJvdW5kcyBoZWlnaHQ9XCIxM1wiIHdpZHRoPVwiOTBcIiB4PVwiNTc3XCIgeT1cIjE3MC41XCIv\nPjwvYnBtbmRpOkJQTU5MYWJlbD48L2JwbW5kaTpCUE1ORWRnZT48YnBtbmRpOkJQTU5TaGFwZSBi\ncG1uRWxlbWVudD1cIlRleHRBbm5vdGF0aW9uXzB4cTZleDlcIiBpZD1cIlRleHRBbm5vdGF0aW9u\nXzB4cTZleDlfZGlcIj48b21nZGM6Qm91bmRzIGhlaWdodD1cIjU5XCIgd2lkdGg9XCIxMzZcIiB4\nPVwiNTg5XCIgeT1cIjY3XCIvPjwvYnBtbmRpOkJQTU5TaGFwZT48YnBtbmRpOkJQTU5FZGdlIGJw\nbW5FbGVtZW50PVwiQXNzb2NpYXRpb25fMGZrZGtzdFwiIGlkPVwiQXNzb2NpYXRpb25fMGZrZGtz\ndF9kaVwiPjxvbWdkaTp3YXlwb2ludCB4PVwiNTc3XCIgeHNpOnR5cGU9XCJvbWdkYzpQb2ludFwi\nIHk9XCIxNTdcIi8+PG9tZ2RpOndheXBvaW50IHg9XCI2MTlcIiB4c2k6dHlwZT1cIm9tZ2RjOlBv\naW50XCIgeT1cIjEyNlwiLz48L2JwbW5kaTpCUE1ORWRnZT48L2JwbW5kaTpCUE1OUGxhbmU+PC9i\ncG1uZGk6QlBNTkRpYWdyYW0+PC9kZWZpbml0aW9ucz4ifSwgImNvbnRlbnRfdmVyc2lvbiI6IDIs\nICJjcmVhdG9yX2lkIjogImFAZXhhbXBsZS5jb20iLCAiZGVzY3JpcHRpb24iOiAiVXNlIHRoZSBx\ncmFkYXJfaWQgZmllbGQgb2YgdGhlIGluY2lkZW50IHRvIHNlYXJjaCBxcmFkYXIgZXZlbnRzLCBh\nbmQgdXBkYXRlIHRoZSBkYXRhIHRhYmxlLCBxcmFkYXJfb2ZmZW5zZV9ldmVudCwgd2l0aCB0aGUg\nZmlyc3QgNSByZXN1bHRzLiIsICJleHBvcnRfa2V5IjogInFyYWRhcl9zZWFyY2hfZXZlbnRfb2Zm\nZW5zZSIsICJsYXN0X21vZGlmaWVkX2J5IjogImFAZXhhbXBsZS5jb20iLCAibGFzdF9tb2RpZmll\nZF90aW1lIjogMTU5NTM4MTg2NDMxNCwgIm5hbWUiOiAiRXhhbXBsZSBvZiBzZWFyY2hpbmcgUVJh\nZGFyIGV2ZW50cyB1c2luZyBvZmZlbnNlIGlkIiwgIm9iamVjdF90eXBlIjogImluY2lkZW50Iiwg\nInByb2dyYW1tYXRpY19uYW1lIjogInFyYWRhcl9zZWFyY2hfZXZlbnRfb2ZmZW5zZSIsICJ0YWdz\nIjogW10sICJ1dWlkIjogImUzN2QyZGFlLWE3MWYtNGIyZi04ODMwLWIwM2VkMzIwZmRmZiIsICJ3\nb3JrZmxvd19pZCI6IDY4fSwgeyJhY3Rpb25zIjogW10sICJjb250ZW50IjogeyJ2ZXJzaW9uIjog\nMywgIndvcmtmbG93X2lkIjogInFyYWRhcl9maW5kX3JlZmVyZW5jZV9zZXRfaXRlbSIsICJ4bWwi\nOiAiPD94bWwgdmVyc2lvbj1cIjEuMFwiIGVuY29kaW5nPVwiVVRGLThcIj8+PGRlZmluaXRpb25z\nIHhtbG5zPVwiaHR0cDovL3d3dy5vbWcub3JnL3NwZWMvQlBNTi8yMDEwMDUyNC9NT0RFTFwiIHht\nbG5zOmJwbW5kaT1cImh0dHA6Ly93d3cub21nLm9yZy9zcGVjL0JQTU4vMjAxMDA1MjQvRElcIiB4\nbWxuczpvbWdkYz1cImh0dHA6Ly93d3cub21nLm9yZy9zcGVjL0RELzIwMTAwNTI0L0RDXCIgeG1s\nbnM6b21nZGk9XCJodHRwOi8vd3d3Lm9tZy5vcmcvc3BlYy9ERC8yMDEwMDUyNC9ESVwiIHhtbG5z\nOnJlc2lsaWVudD1cImh0dHA6Ly9yZXNpbGllbnQuaWJtLmNvbS9icG1uXCIgeG1sbnM6eHNkPVwi\naHR0cDovL3d3dy53My5vcmcvMjAwMS9YTUxTY2hlbWFcIiB4bWxuczp4c2k9XCJodHRwOi8vd3d3\nLnczLm9yZy8yMDAxL1hNTFNjaGVtYS1pbnN0YW5jZVwiIHRhcmdldE5hbWVzcGFjZT1cImh0dHA6\nLy93d3cuY2FtdW5kYS5vcmcvdGVzdFwiPjxwcm9jZXNzIGlkPVwicXJhZGFyX2ZpbmRfcmVmZXJl\nbmNlX3NldF9pdGVtXCIgaXNFeGVjdXRhYmxlPVwidHJ1ZVwiIG5hbWU9XCJFeGFtcGxlIG9mIGZp\nbmRpbmcgYW4gaXRlbSBmcm9tIGEgUVJhZGFyIHJlZmVyZW5jZSBzZXRcIj48ZG9jdW1lbnRhdGlv\nbj48IVtDREFUQVtMb29rIGZvciBhbiBpdGVtIGluIHRoZSBRUmFkYXIgcmVmZXJlbmNlIHNldCBc\nIlNhbXBsZSBCbG9ja2VkIElQc1wiLCBhbmQgYWRkIGEgbm90ZSB0byB0aGUgSW5jaWRlbnQuXV0+\nPC9kb2N1bWVudGF0aW9uPjxzdGFydEV2ZW50IGlkPVwiU3RhcnRFdmVudF8xNTVhc3htXCI+PG91\ndGdvaW5nPlNlcXVlbmNlRmxvd18wbmhpM3Z2PC9vdXRnb2luZz48L3N0YXJ0RXZlbnQ+PHNlcnZp\nY2VUYXNrIGlkPVwiU2VydmljZVRhc2tfMXQ2YXh4eVwiIG5hbWU9XCJRUmFkYXIgRmluZCBSZWZl\ncmVuY2UgU2V0IEl0ZW1cIiByZXNpbGllbnQ6dHlwZT1cImZ1bmN0aW9uXCI+PGV4dGVuc2lvbkVs\nZW1lbnRzPjxyZXNpbGllbnQ6ZnVuY3Rpb24gdXVpZD1cIjlkODE3ZWUzLWE4Y2YtNGEwYS1hOGE2\nLTk2OWY2MDkwZjI3NlwiPntcImlucHV0c1wiOntcImFhNWUyMTFkLWI1ZTAtNDI4OS04OGJiLTQ3\nNTk1YWZhYzM4NVwiOntcImlucHV0X3R5cGVcIjpcInN0YXRpY1wiLFwic3RhdGljX2lucHV0XCI6\ne1wibXVsdGlzZWxlY3RfdmFsdWVcIjpbXSxcInRleHRfdmFsdWVcIjpcIlNhbXBsZSBCbG9ja2Vk\nIElQc1wifX19LFwicG9zdF9wcm9jZXNzaW5nX3NjcmlwdFwiOlwiaWYgcmVzdWx0cy5mb3VuZCA9\nPSBcXFwiVHJ1ZVxcXCI6XFxuICBpbmNpZGVudC5hZGROb3RlKHVcXFwiRm91bmQgSVA6IHt9IGlu\nIGxpc3Q6IHt9LlxcXCIuZm9ybWF0KGFydGlmYWN0LnZhbHVlLCByZXN1bHRzWydjb250ZW50J11b\nJ25hbWUnXSkpXFxuZWxzZTpcXG4gIGluY2lkZW50LmFkZE5vdGUoXFxcIklQOnt9IG5vdCBmb3Vu\nZCBpbiBsaXN0LlxcXCIuZm9ybWF0KGFydGlmYWN0LnZhbHVlKSlcXG4gIFxcblxcXCJcXFwiXFxc\nIlxcbntcXG4gICdzdGF0dXNfY29kZSc6IDIwMCxcXG4gICdmb3VuZCc6ICdUcnVlJyxcXG4gICdj\nb250ZW50Jzoge1xcbiAgICAndGltZV90b19saXZlJzogJzIwIHllYXJzIDQgbW9ucyAyMiBkYXlz\nIDAgaG91cnMgMCBtaW5zIDAuMDAgc2VjcycsXFxuICAgICd0aW1lb3V0X3R5cGUnOiAnRklSU1Rf\nU0VFTicsXFxuICAgICdudW1iZXJfb2ZfZWxlbWVudHMnOiAyLFxcbiAgICAnZGF0YSc6IFtcXG4g\nICAgICB7XFxuICAgICAgICAnbGFzdF9zZWVuJzogMTU5NDM4NjQ0Nzc4OSxcXG4gICAgICAgICdm\naXJzdF9zZWVuJzogMTU5NDM4NjQ0Nzc4OSxcXG4gICAgICAgICdzb3VyY2UnOiAncmVmZXJlbmNl\nIGRhdGEgYXBpJyxcXG4gICAgICAgICd2YWx1ZSc6ICcxNjkuMjU0LjMuMicsXFxuICAgICAgICAn\nZG9tYWluX2lkJzogTm9uZVxcbiAgICAgIH0sXFxuICAgICAge1xcbiAgICAgICAgJ2xhc3Rfc2Vl\nbic6IDE1OTUzNjQzOTI5NTYsXFxuICAgICAgICAnZmlyc3Rfc2Vlbic6IDE1OTUzNjQzOTI5NTYs\nXFxuICAgICAgICAnc291cmNlJzogJ3JlZmVyZW5jZSBkYXRhIGFwaScsXFxuICAgICAgICAndmFs\ndWUnOiAnMS4yLjMuNCcsXFxuICAgICAgICAnZG9tYWluX2lkJzogTm9uZVxcbiAgICAgIH1cXG4g\nICAgXSxcXG4gICAgJ2NyZWF0aW9uX3RpbWUnOiAxNTg3NTg1MjE5Njk3LFxcbiAgICAnbmFtZSc6\nICdTYW1wbGUgQmxvY2tlZCBJUHMnLFxcbiAgICAnbmFtZXNwYWNlJzogJ1NIQVJFRCcsXFxuICAg\nICdlbGVtZW50X3R5cGUnOiAnSVAnLFxcbiAgICAnY29sbGVjdGlvbl9pZCc6IDMyXFxuICB9XFxu\nfVxcblxcXCJcXFwiXFxcIlwiLFwicHJlX3Byb2Nlc3Npbmdfc2NyaXB0XCI6XCJpbnB1dHMucXJh\nZGFyX3JlZmVyZW5jZV9zZXRfaXRlbV92YWx1ZSA9IGFydGlmYWN0LnZhbHVlXCJ9PC9yZXNpbGll\nbnQ6ZnVuY3Rpb24+PC9leHRlbnNpb25FbGVtZW50cz48aW5jb21pbmc+U2VxdWVuY2VGbG93XzBu\naGkzdnY8L2luY29taW5nPjxvdXRnb2luZz5TZXF1ZW5jZUZsb3dfMGQwcGE0djwvb3V0Z29pbmc+\nPC9zZXJ2aWNlVGFzaz48c2VxdWVuY2VGbG93IGlkPVwiU2VxdWVuY2VGbG93XzBuaGkzdnZcIiBz\nb3VyY2VSZWY9XCJTdGFydEV2ZW50XzE1NWFzeG1cIiB0YXJnZXRSZWY9XCJTZXJ2aWNlVGFza18x\ndDZheHh5XCIvPjxlbmRFdmVudCBpZD1cIkVuZEV2ZW50XzFjNDZiYTJcIj48aW5jb21pbmc+U2Vx\ndWVuY2VGbG93XzBkMHBhNHY8L2luY29taW5nPjwvZW5kRXZlbnQ+PHNlcXVlbmNlRmxvdyBpZD1c\nIlNlcXVlbmNlRmxvd18wZDBwYTR2XCIgc291cmNlUmVmPVwiU2VydmljZVRhc2tfMXQ2YXh4eVwi\nIHRhcmdldFJlZj1cIkVuZEV2ZW50XzFjNDZiYTJcIi8+PHRleHRBbm5vdGF0aW9uIGlkPVwiVGV4\ndEFubm90YXRpb25fMWt4eGl5dFwiPjx0ZXh0PlN0YXJ0IHlvdXIgd29ya2Zsb3cgaGVyZTwvdGV4\ndD48L3RleHRBbm5vdGF0aW9uPjxhc3NvY2lhdGlvbiBpZD1cIkFzc29jaWF0aW9uXzFzZXVqNDhc\nIiBzb3VyY2VSZWY9XCJTdGFydEV2ZW50XzE1NWFzeG1cIiB0YXJnZXRSZWY9XCJUZXh0QW5ub3Rh\ndGlvbl8xa3h4aXl0XCIvPjwvcHJvY2Vzcz48YnBtbmRpOkJQTU5EaWFncmFtIGlkPVwiQlBNTkRp\nYWdyYW1fMVwiPjxicG1uZGk6QlBNTlBsYW5lIGJwbW5FbGVtZW50PVwidW5kZWZpbmVkXCIgaWQ9\nXCJCUE1OUGxhbmVfMVwiPjxicG1uZGk6QlBNTlNoYXBlIGJwbW5FbGVtZW50PVwiU3RhcnRFdmVu\ndF8xNTVhc3htXCIgaWQ9XCJTdGFydEV2ZW50XzE1NWFzeG1fZGlcIj48b21nZGM6Qm91bmRzIGhl\naWdodD1cIjM2XCIgd2lkdGg9XCIzNlwiIHg9XCIyNjVcIiB5PVwiMTgzXCIvPjxicG1uZGk6QlBN\nTkxhYmVsPjxvbWdkYzpCb3VuZHMgaGVpZ2h0PVwiMFwiIHdpZHRoPVwiOTBcIiB4PVwiMjYwXCIg\neT1cIjIxOFwiLz48L2JwbW5kaTpCUE1OTGFiZWw+PC9icG1uZGk6QlBNTlNoYXBlPjxicG1uZGk6\nQlBNTlNoYXBlIGJwbW5FbGVtZW50PVwiVGV4dEFubm90YXRpb25fMWt4eGl5dFwiIGlkPVwiVGV4\ndEFubm90YXRpb25fMWt4eGl5dF9kaVwiPjxvbWdkYzpCb3VuZHMgaGVpZ2h0PVwiMzBcIiB3aWR0\naD1cIjEwMFwiIHg9XCI5OVwiIHk9XCIyNTRcIi8+PC9icG1uZGk6QlBNTlNoYXBlPjxicG1uZGk6\nQlBNTkVkZ2UgYnBtbkVsZW1lbnQ9XCJBc3NvY2lhdGlvbl8xc2V1ajQ4XCIgaWQ9XCJBc3NvY2lh\ndGlvbl8xc2V1ajQ4X2RpXCI+PG9tZ2RpOndheXBvaW50IHg9XCIyNjdcIiB4c2k6dHlwZT1cIm9t\nZ2RjOlBvaW50XCIgeT1cIjIwN1wiLz48b21nZGk6d2F5cG9pbnQgeD1cIjE3NVwiIHhzaTp0eXBl\nPVwib21nZGM6UG9pbnRcIiB5PVwiMjU0XCIvPjwvYnBtbmRpOkJQTU5FZGdlPjxicG1uZGk6QlBN\nTlNoYXBlIGJwbW5FbGVtZW50PVwiU2VydmljZVRhc2tfMXQ2YXh4eVwiIGlkPVwiU2VydmljZVRh\nc2tfMXQ2YXh4eV9kaVwiPjxvbWdkYzpCb3VuZHMgaGVpZ2h0PVwiODBcIiB3aWR0aD1cIjEwMFwi\nIHg9XCIzNTZcIiB5PVwiMTYxXCIvPjwvYnBtbmRpOkJQTU5TaGFwZT48YnBtbmRpOkJQTU5FZGdl\nIGJwbW5FbGVtZW50PVwiU2VxdWVuY2VGbG93XzBuaGkzdnZcIiBpZD1cIlNlcXVlbmNlRmxvd18w\nbmhpM3Z2X2RpXCI+PG9tZ2RpOndheXBvaW50IHg9XCIzMDFcIiB4c2k6dHlwZT1cIm9tZ2RjOlBv\naW50XCIgeT1cIjIwMVwiLz48b21nZGk6d2F5cG9pbnQgeD1cIjM1NlwiIHhzaTp0eXBlPVwib21n\nZGM6UG9pbnRcIiB5PVwiMjAxXCIvPjxicG1uZGk6QlBNTkxhYmVsPjxvbWdkYzpCb3VuZHMgaGVp\nZ2h0PVwiMTNcIiB3aWR0aD1cIjkwXCIgeD1cIjI4My41XCIgeT1cIjE3OS41XCIvPjwvYnBtbmRp\nOkJQTU5MYWJlbD48L2JwbW5kaTpCUE1ORWRnZT48YnBtbmRpOkJQTU5TaGFwZSBicG1uRWxlbWVu\ndD1cIkVuZEV2ZW50XzFjNDZiYTJcIiBpZD1cIkVuZEV2ZW50XzFjNDZiYTJfZGlcIj48b21nZGM6\nQm91bmRzIGhlaWdodD1cIjM2XCIgd2lkdGg9XCIzNlwiIHg9XCI1MDdcIiB5PVwiMTgzXCIvPjxi\ncG1uZGk6QlBNTkxhYmVsPjxvbWdkYzpCb3VuZHMgaGVpZ2h0PVwiMTNcIiB3aWR0aD1cIjkwXCIg\neD1cIjQ4MFwiIHk9XCIyMjJcIi8+PC9icG1uZGk6QlBNTkxhYmVsPjwvYnBtbmRpOkJQTU5TaGFw\nZT48YnBtbmRpOkJQTU5FZGdlIGJwbW5FbGVtZW50PVwiU2VxdWVuY2VGbG93XzBkMHBhNHZcIiBp\nZD1cIlNlcXVlbmNlRmxvd18wZDBwYTR2X2RpXCI+PG9tZ2RpOndheXBvaW50IHg9XCI0NTZcIiB4\nc2k6dHlwZT1cIm9tZ2RjOlBvaW50XCIgeT1cIjIwMVwiLz48b21nZGk6d2F5cG9pbnQgeD1cIjUw\nN1wiIHhzaTp0eXBlPVwib21nZGM6UG9pbnRcIiB5PVwiMjAxXCIvPjxicG1uZGk6QlBNTkxhYmVs\nPjxvbWdkYzpCb3VuZHMgaGVpZ2h0PVwiMTNcIiB3aWR0aD1cIjkwXCIgeD1cIjQzNi41XCIgeT1c\nIjE3OS41XCIvPjwvYnBtbmRpOkJQTU5MYWJlbD48L2JwbW5kaTpCUE1ORWRnZT48L2JwbW5kaTpC\nUE1OUGxhbmU+PC9icG1uZGk6QlBNTkRpYWdyYW0+PC9kZWZpbml0aW9ucz4ifSwgImNvbnRlbnRf\ndmVyc2lvbiI6IDMsICJjcmVhdG9yX2lkIjogImFAZXhhbXBsZS5jb20iLCAiZGVzY3JpcHRpb24i\nOiAiTG9vayBmb3IgYW4gaXRlbSBpbiB0aGUgUVJhZGFyIHJlZmVyZW5jZSBzZXQgXCJTYW1wbGUg\nQmxvY2tlZCBJUHNcIiwgYW5kIGFkZCBhIG5vdGUgdG8gdGhlIEluY2lkZW50LiIsICJleHBvcnRf\na2V5IjogInFyYWRhcl9maW5kX3JlZmVyZW5jZV9zZXRfaXRlbSIsICJsYXN0X21vZGlmaWVkX2J5\nIjogImFAZXhhbXBsZS5jb20iLCAibGFzdF9tb2RpZmllZF90aW1lIjogMTU5NTM4NTA0MzQxNSwg\nIm5hbWUiOiAiRXhhbXBsZSBvZiBmaW5kaW5nIGFuIGl0ZW0gZnJvbSBhIFFSYWRhciByZWZlcmVu\nY2Ugc2V0IiwgIm9iamVjdF90eXBlIjogImFydGlmYWN0IiwgInByb2dyYW1tYXRpY19uYW1lIjog\nInFyYWRhcl9maW5kX3JlZmVyZW5jZV9zZXRfaXRlbSIsICJ0YWdzIjogW10sICJ1dWlkIjogIjc5\nODA0NWViLWNmN2QtNGFkNi1iOGZkLWRjZTkxMGIzMTkyZSIsICJ3b3JrZmxvd19pZCI6IDcwfSwg\neyJhY3Rpb25zIjogW10sICJjb250ZW50IjogeyJ2ZXJzaW9uIjogMTEsICJ3b3JrZmxvd19pZCI6\nICJxcmFkYXJfbW92ZV9pdGVtX3RvX2RpZmZlcmVudF9yZWZfc2V0IiwgInhtbCI6ICI8P3htbCB2\nZXJzaW9uPVwiMS4wXCIgZW5jb2Rpbmc9XCJVVEYtOFwiPz48ZGVmaW5pdGlvbnMgeG1sbnM9XCJo\ndHRwOi8vd3d3Lm9tZy5vcmcvc3BlYy9CUE1OLzIwMTAwNTI0L01PREVMXCIgeG1sbnM6YnBtbmRp\nPVwiaHR0cDovL3d3dy5vbWcub3JnL3NwZWMvQlBNTi8yMDEwMDUyNC9ESVwiIHhtbG5zOm9tZ2Rj\nPVwiaHR0cDovL3d3dy5vbWcub3JnL3NwZWMvREQvMjAxMDA1MjQvRENcIiB4bWxuczpvbWdkaT1c\nImh0dHA6Ly93d3cub21nLm9yZy9zcGVjL0RELzIwMTAwNTI0L0RJXCIgeG1sbnM6cmVzaWxpZW50\nPVwiaHR0cDovL3Jlc2lsaWVudC5pYm0uY29tL2JwbW5cIiB4bWxuczp4c2Q9XCJodHRwOi8vd3d3\nLnczLm9yZy8yMDAxL1hNTFNjaGVtYVwiIHhtbG5zOnhzaT1cImh0dHA6Ly93d3cudzMub3JnLzIw\nMDEvWE1MU2NoZW1hLWluc3RhbmNlXCIgdGFyZ2V0TmFtZXNwYWNlPVwiaHR0cDovL3d3dy5jYW11\nbmRhLm9yZy90ZXN0XCI+PHByb2Nlc3MgaWQ9XCJxcmFkYXJfbW92ZV9pdGVtX3RvX2RpZmZlcmVu\ndF9yZWZfc2V0XCIgaXNFeGVjdXRhYmxlPVwidHJ1ZVwiIG5hbWU9XCJFeGFtcGxlIG9mIG1vdmlu\nZyBRUmFkYXIgaXRlbSBmcm9tIG9uZSByZWZlcmVuY2Ugc2V0IHRvIGFub3RoZXJcIj48ZG9jdW1l\nbnRhdGlvbj5SZW1vdmUgYW4gaXRlbSBmcm9tIHRoZSBRUmFkYXIgcmVmZXJlbmNlIHNldCwgXHUy\nMDFjU2FtcGxlIEJsb2NrZWQgSVBzXHUyMDFkLCBhbmQgYWRkIGl0IHRvIHJlZmVyZW5jZSBzZXQs\nIFx1MjAxY1NhbXBsZSBTdXNwZWN0IElQc1x1MjAxZC4gQWRkIGEgbm90ZSB0byB0aGUgSW5jaWRl\nbnQgYWZ0ZXIgY29tcGxldGluZyBlYWNoIHN0ZXAuPC9kb2N1bWVudGF0aW9uPjxzdGFydEV2ZW50\nIGlkPVwiU3RhcnRFdmVudF8xNTVhc3htXCI+PG91dGdvaW5nPlNlcXVlbmNlRmxvd18xaGFubG8z\nPC9vdXRnb2luZz48L3N0YXJ0RXZlbnQ+PHNlcnZpY2VUYXNrIGlkPVwiU2VydmljZVRhc2tfMHM4\nY3Zhd1wiIG5hbWU9XCJRUmFkYXIgRGVsZXRlIFJlZmVyZW5jZSBTZXQgSXRlbVwiIHJlc2lsaWVu\ndDp0eXBlPVwiZnVuY3Rpb25cIj48ZXh0ZW5zaW9uRWxlbWVudHM+PHJlc2lsaWVudDpmdW5jdGlv\nbiB1dWlkPVwiYTdkYzNkMjYtYWI5Ny00NGEzLWI1NmEtZTM2NzMxNWIwOGUwXCI+e1wiaW5wdXRz\nXCI6e1wiYWE1ZTIxMWQtYjVlMC00Mjg5LTg4YmItNDc1OTVhZmFjMzg1XCI6e1wiaW5wdXRfdHlw\nZVwiOlwic3RhdGljXCIsXCJzdGF0aWNfaW5wdXRcIjp7XCJtdWx0aXNlbGVjdF92YWx1ZVwiOltd\nLFwidGV4dF92YWx1ZVwiOlwiU2FtcGxlIEJsb2NrZWQgSVBzXCJ9fX0sXCJwb3N0X3Byb2Nlc3Np\nbmdfc2NyaXB0XCI6XCJpZiByZXN1bHRzLnN0YXR1c19jb2RlID09IDIwMDpcXG4gICNBZGQgYSBu\nb3RlXFxuICBpbmNpZGVudC5hZGROb3RlKFxcXCJJUCBcXFwiICsgYXJ0aWZhY3QudmFsdWUgKyBc\nXFwiIHJlbW92ZWQgc3VjY2Vzc2Z1bGx5IGZyb20gQmxvY2tlZCBsaXN0XFxcIilcXG5lbHNlOlxc\nbiAgaW5jaWRlbnQuYWRkTm90ZSh1XFxcIkZhaWxlZCB0byByZW1vdmUge30gZnJvbSBCbG9ja2Vk\nIGxpc3QsIG1lc3NhZ2U6IHt9XFxcIi5mb3JtYXQoYXJ0aWZhY3QudmFsdWUsIHJlc3VsdHNbJ2Nv\nbnRlbnQnXVsnbWVzc2FnZSddKSlcIixcInByZV9wcm9jZXNzaW5nX3NjcmlwdFwiOlwiaW5wdXRz\nLnFyYWRhcl9yZWZlcmVuY2Vfc2V0X2l0ZW1fdmFsdWUgPSBhcnRpZmFjdC52YWx1ZVwiLFwicmVz\ndWx0X25hbWVcIjpcImRlbGV0ZV9vcHJcIn08L3Jlc2lsaWVudDpmdW5jdGlvbj48L2V4dGVuc2lv\nbkVsZW1lbnRzPjxpbmNvbWluZz5TZXF1ZW5jZUZsb3dfMWhhbmxvMzwvaW5jb21pbmc+PG91dGdv\naW5nPlNlcXVlbmNlRmxvd18wZm1ub29wPC9vdXRnb2luZz48L3NlcnZpY2VUYXNrPjxzZXF1ZW5j\nZUZsb3cgaWQ9XCJTZXF1ZW5jZUZsb3dfMWhhbmxvM1wiIHNvdXJjZVJlZj1cIlN0YXJ0RXZlbnRf\nMTU1YXN4bVwiIHRhcmdldFJlZj1cIlNlcnZpY2VUYXNrXzBzOGN2YXdcIi8+PHNlcnZpY2VUYXNr\nIGlkPVwiU2VydmljZVRhc2tfMHNkZzhiNVwiIG5hbWU9XCJRUmFkYXIgQWRkIFJlZmVyZW5jZSBT\nZXQgSXRlbVwiIHJlc2lsaWVudDp0eXBlPVwiZnVuY3Rpb25cIj48ZXh0ZW5zaW9uRWxlbWVudHM+\nPHJlc2lsaWVudDpmdW5jdGlvbiB1dWlkPVwiMzBiNjg5OWEtZDAxNS00OGMzLThmZDktNTAwNzg4\nZDRiNDM3XCI+e1wiaW5wdXRzXCI6e1wiYWE1ZTIxMWQtYjVlMC00Mjg5LTg4YmItNDc1OTVhZmFj\nMzg1XCI6e1wiaW5wdXRfdHlwZVwiOlwic3RhdGljXCIsXCJzdGF0aWNfaW5wdXRcIjp7XCJtdWx0\naXNlbGVjdF92YWx1ZVwiOltdLFwidGV4dF92YWx1ZVwiOlwiU2FtcGxlIFN1c3BlY3QgSVBzXCJ9\nfX0sXCJwb3N0X3Byb2Nlc3Npbmdfc2NyaXB0XCI6XCJpZiByZXN1bHRzLnN0YXR1c19jb2RlID09\nIDIwMDpcXG4gIGluY2lkZW50LmFkZE5vdGUodVxcXCJTdWNjZXNzZnVsbHkgYWRkZWQge30gdG8g\nU3VzcGVjdGVkIElQc1xcXCIuZm9ybWF0KGFydGlmYWN0LnZhbHVlKSlcXG5lbHNlOlxcbiAgaW5j\naWRlbnQuYWRkTm90ZSh1XFxcIkZhaWxlZCB0byBhZGQge30gdG8gU3VzcGVjdGVkIElQcy4gU3Rh\ndHVzIGNvZGU6IHt9LCBtZXNzYWdlOiB7fVxcXCIuZm9ybWF0KGFydGlmYWN0LnZhbHVlLCByZXN1\nbHRzLnN0YXR1c19jb2RlLCByZXN1bHRzWydjb250ZW50J11bJ21lc3NhZ2UnXSkpXFxuICBcIixc\nInByZV9wcm9jZXNzaW5nX3NjcmlwdFwiOlwiaW5wdXRzLnFyYWRhcl9yZWZlcmVuY2Vfc2V0X2l0\nZW1fdmFsdWUgPSBhcnRpZmFjdC52YWx1ZVwifTwvcmVzaWxpZW50OmZ1bmN0aW9uPjwvZXh0ZW5z\naW9uRWxlbWVudHM+PGluY29taW5nPlNlcXVlbmNlRmxvd18wbDlzbDM5PC9pbmNvbWluZz48b3V0\nZ29pbmc+U2VxdWVuY2VGbG93XzBtdGxya2g8L291dGdvaW5nPjwvc2VydmljZVRhc2s+PHNlcXVl\nbmNlRmxvdyBpZD1cIlNlcXVlbmNlRmxvd18wZm1ub29wXCIgc291cmNlUmVmPVwiU2VydmljZVRh\nc2tfMHM4Y3Zhd1wiIHRhcmdldFJlZj1cIkV4Y2x1c2l2ZUdhdGV3YXlfMGJnazN6cFwiLz48ZW5k\nRXZlbnQgaWQ9XCJFbmRFdmVudF8wYTdqZzY2XCI+PGluY29taW5nPlNlcXVlbmNlRmxvd18wbXRs\ncmtoPC9pbmNvbWluZz48aW5jb21pbmc+U2VxdWVuY2VGbG93XzBmeDV4bnQ8L2luY29taW5nPjwv\nZW5kRXZlbnQ+PHNlcXVlbmNlRmxvdyBpZD1cIlNlcXVlbmNlRmxvd18wbXRscmtoXCIgc291cmNl\nUmVmPVwiU2VydmljZVRhc2tfMHNkZzhiNVwiIHRhcmdldFJlZj1cIkVuZEV2ZW50XzBhN2pnNjZc\nIi8+PGV4Y2x1c2l2ZUdhdGV3YXkgaWQ9XCJFeGNsdXNpdmVHYXRld2F5XzBiZ2szenBcIj48aW5j\nb21pbmc+U2VxdWVuY2VGbG93XzBmbW5vb3A8L2luY29taW5nPjxvdXRnb2luZz5TZXF1ZW5jZUZs\nb3dfMGw5c2wzOTwvb3V0Z29pbmc+PG91dGdvaW5nPlNlcXVlbmNlRmxvd18wZng1eG50PC9vdXRn\nb2luZz48L2V4Y2x1c2l2ZUdhdGV3YXk+PHNlcXVlbmNlRmxvdyBpZD1cIlNlcXVlbmNlRmxvd18w\nbDlzbDM5XCIgbmFtZT1cIlN1Y2Nlc3NcIiBzb3VyY2VSZWY9XCJFeGNsdXNpdmVHYXRld2F5XzBi\nZ2szenBcIiB0YXJnZXRSZWY9XCJTZXJ2aWNlVGFza18wc2RnOGI1XCI+PGNvbmRpdGlvbkV4cHJl\nc3Npb24gbGFuZ3VhZ2U9XCJyZXNpbGllbnQtY29uZGl0aW9uc1wiIHhzaTp0eXBlPVwidEZvcm1h\nbEV4cHJlc3Npb25cIj48IVtDREFUQVt7XCJjb25kaXRpb25zXCI6W3tcImV2YWx1YXRpb25faWRc\nIjoxLFwiZmllbGRfbmFtZVwiOm51bGwsXCJtZXRob2RcIjpcInNjcmlwdFwiLFwidHlwZVwiOm51\nbGwsXCJ2YWx1ZVwiOntcInNjcmlwdF90ZXh0XCI6XCIjRW50ZXIgc3VwcGxlbWVudGFsIHNjcmlw\ndFxcbiNWYXJpYWJsZXMgaW5zdGFudGlhdGVkIGluIHRoaXMgZWRpdG9yIGFyZSBhdmFpbGFibGVc\nXG4jZm9yIHVzZSBpbiB0aGUgZXhwcmVzc2lvbiBhYm92ZVwiLFwiZmluYWxfZXhwcmVzc2lvbl90\nZXh0XCI6XCJ3b3JrZmxvdy5wcm9wZXJ0aWVzLmRlbGV0ZV9vcHJbJ3N0YXR1c19jb2RlJ10gPT0g\nMjAwXCIsXCJsYW5ndWFnZVwiOlwicHl0aG9uXCJ9fV0sXCJjdXN0b21fY29uZGl0aW9uXCI6XCJc\nIixcImxvZ2ljX3R5cGVcIjpcImFsbFwifV1dPjwvY29uZGl0aW9uRXhwcmVzc2lvbj48L3NlcXVl\nbmNlRmxvdz48c2VxdWVuY2VGbG93IGlkPVwiU2VxdWVuY2VGbG93XzBmeDV4bnRcIiBuYW1lPVwi\nRmFpbFwiIHNvdXJjZVJlZj1cIkV4Y2x1c2l2ZUdhdGV3YXlfMGJnazN6cFwiIHRhcmdldFJlZj1c\nIkVuZEV2ZW50XzBhN2pnNjZcIj48Y29uZGl0aW9uRXhwcmVzc2lvbiBsYW5ndWFnZT1cInJlc2ls\naWVudC1jb25kaXRpb25zXCIgeHNpOnR5cGU9XCJ0Rm9ybWFsRXhwcmVzc2lvblwiPjwhW0NEQVRB\nW3tcImNvbmRpdGlvbnNcIjpbe1wiZXZhbHVhdGlvbl9pZFwiOjEsXCJmaWVsZF9uYW1lXCI6bnVs\nbCxcIm1ldGhvZFwiOlwic2NyaXB0XCIsXCJ0eXBlXCI6bnVsbCxcInZhbHVlXCI6e1wiZmluYWxf\nZXhwcmVzc2lvbl90ZXh0XCI6XCJ3b3JrZmxvdy5wcm9wZXJ0aWVzLmRlbGV0ZV9vcHJbJ3N0YXR1\nc19jb2RlJ10gIT0gMjAwXCIsXCJsYW5ndWFnZVwiOlwicHl0aG9uXCJ9fV0sXCJjdXN0b21fY29u\nZGl0aW9uXCI6XCJcIixcImxvZ2ljX3R5cGVcIjpcImFsbFwifV1dPjwvY29uZGl0aW9uRXhwcmVz\nc2lvbj48L3NlcXVlbmNlRmxvdz48dGV4dEFubm90YXRpb24gaWQ9XCJUZXh0QW5ub3RhdGlvbl8x\na3h4aXl0XCI+PHRleHQ+U3RhcnQgeW91ciB3b3JrZmxvdyBoZXJlPC90ZXh0PjwvdGV4dEFubm90\nYXRpb24+PGFzc29jaWF0aW9uIGlkPVwiQXNzb2NpYXRpb25fMXNldWo0OFwiIHNvdXJjZVJlZj1c\nIlN0YXJ0RXZlbnRfMTU1YXN4bVwiIHRhcmdldFJlZj1cIlRleHRBbm5vdGF0aW9uXzFreHhpeXRc\nIi8+PHRleHRBbm5vdGF0aW9uIGlkPVwiVGV4dEFubm90YXRpb25fMHh1ZGZtdlwiPjx0ZXh0Pjwh\nW0NEQVRBW1JlbW92ZSBpdCBmcm9tIHRoZSBTdXNwZWN0IGxpc3Rcbl1dPjwvdGV4dD48L3RleHRB\nbm5vdGF0aW9uPjxhc3NvY2lhdGlvbiBpZD1cIkFzc29jaWF0aW9uXzBrMGRiYjZcIiBzb3VyY2VS\nZWY9XCJTZXJ2aWNlVGFza18wczhjdmF3XCIgdGFyZ2V0UmVmPVwiVGV4dEFubm90YXRpb25fMHh1\nZGZtdlwiLz48dGV4dEFubm90YXRpb24gaWQ9XCJUZXh0QW5ub3RhdGlvbl8xaHlmOTZkXCI+PHRl\neHQ+PCFbQ0RBVEFbQWRkIGl0IHRvIHRoZSBCbG9ja2VkIGxpc3Rcbl1dPjwvdGV4dD48L3RleHRB\nbm5vdGF0aW9uPjxhc3NvY2lhdGlvbiBpZD1cIkFzc29jaWF0aW9uXzAwb2ZydWVcIiBzb3VyY2VS\nZWY9XCJTZXJ2aWNlVGFza18wc2RnOGI1XCIgdGFyZ2V0UmVmPVwiVGV4dEFubm90YXRpb25fMWh5\nZjk2ZFwiLz48L3Byb2Nlc3M+PGJwbW5kaTpCUE1ORGlhZ3JhbSBpZD1cIkJQTU5EaWFncmFtXzFc\nIj48YnBtbmRpOkJQTU5QbGFuZSBicG1uRWxlbWVudD1cInVuZGVmaW5lZFwiIGlkPVwiQlBNTlBs\nYW5lXzFcIj48YnBtbmRpOkJQTU5TaGFwZSBicG1uRWxlbWVudD1cIlN0YXJ0RXZlbnRfMTU1YXN4\nbVwiIGlkPVwiU3RhcnRFdmVudF8xNTVhc3htX2RpXCI+PG9tZ2RjOkJvdW5kcyBoZWlnaHQ9XCIz\nNlwiIHdpZHRoPVwiMzZcIiB4PVwiMjAyXCIgeT1cIjE2OFwiLz48YnBtbmRpOkJQTU5MYWJlbD48\nb21nZGM6Qm91bmRzIGhlaWdodD1cIjBcIiB3aWR0aD1cIjkwXCIgeD1cIjE5N1wiIHk9XCIyMDNc\nIi8+PC9icG1uZGk6QlBNTkxhYmVsPjwvYnBtbmRpOkJQTU5TaGFwZT48YnBtbmRpOkJQTU5TaGFw\nZSBicG1uRWxlbWVudD1cIlRleHRBbm5vdGF0aW9uXzFreHhpeXRcIiBpZD1cIlRleHRBbm5vdGF0\naW9uXzFreHhpeXRfZGlcIj48b21nZGM6Qm91bmRzIGhlaWdodD1cIjMwXCIgd2lkdGg9XCIxMDBc\nIiB4PVwiOTlcIiB5PVwiMjU0XCIvPjwvYnBtbmRpOkJQTU5TaGFwZT48YnBtbmRpOkJQTU5FZGdl\nIGJwbW5FbGVtZW50PVwiQXNzb2NpYXRpb25fMXNldWo0OFwiIGlkPVwiQXNzb2NpYXRpb25fMXNl\ndWo0OF9kaVwiPjxvbWdkaTp3YXlwb2ludCB4PVwiMjA3XCIgeHNpOnR5cGU9XCJvbWdkYzpQb2lu\ndFwiIHk9XCIxOTdcIi8+PG9tZ2RpOndheXBvaW50IHg9XCIxNTlcIiB4c2k6dHlwZT1cIm9tZ2Rj\nOlBvaW50XCIgeT1cIjI1NFwiLz48L2JwbW5kaTpCUE1ORWRnZT48YnBtbmRpOkJQTU5TaGFwZSBi\ncG1uRWxlbWVudD1cIlNlcnZpY2VUYXNrXzBzOGN2YXdcIiBpZD1cIlNlcnZpY2VUYXNrXzBzOGN2\nYXdfZGlcIj48b21nZGM6Qm91bmRzIGhlaWdodD1cIjgwXCIgd2lkdGg9XCIxMDBcIiB4PVwiMzIz\nLjM2MzU0MDU2OTAyXCIgeT1cIjE0NlwiLz48L2JwbW5kaTpCUE1OU2hhcGU+PGJwbW5kaTpCUE1O\nRWRnZSBicG1uRWxlbWVudD1cIlNlcXVlbmNlRmxvd18xaGFubG8zXCIgaWQ9XCJTZXF1ZW5jZUZs\nb3dfMWhhbmxvM19kaVwiPjxvbWdkaTp3YXlwb2ludCB4PVwiMjM4XCIgeHNpOnR5cGU9XCJvbWdk\nYzpQb2ludFwiIHk9XCIxODZcIi8+PG9tZ2RpOndheXBvaW50IHg9XCIzMjNcIiB4c2k6dHlwZT1c\nIm9tZ2RjOlBvaW50XCIgeT1cIjE4NlwiLz48YnBtbmRpOkJQTU5MYWJlbD48b21nZGM6Qm91bmRz\nIGhlaWdodD1cIjEzXCIgd2lkdGg9XCIwXCIgeD1cIjI4MC41XCIgeT1cIjE2NC41XCIvPjwvYnBt\nbmRpOkJQTU5MYWJlbD48L2JwbW5kaTpCUE1ORWRnZT48YnBtbmRpOkJQTU5TaGFwZSBicG1uRWxl\nbWVudD1cIlNlcnZpY2VUYXNrXzBzZGc4YjVcIiBpZD1cIlNlcnZpY2VUYXNrXzBzZGc4YjVfZGlc\nIj48b21nZGM6Qm91bmRzIGhlaWdodD1cIjgwXCIgd2lkdGg9XCIxMDBcIiB4PVwiNTg4XCIgeT1c\nIjE0NlwiLz48L2JwbW5kaTpCUE1OU2hhcGU+PGJwbW5kaTpCUE1ORWRnZSBicG1uRWxlbWVudD1c\nIlNlcXVlbmNlRmxvd18wZm1ub29wXCIgaWQ9XCJTZXF1ZW5jZUZsb3dfMGZtbm9vcF9kaVwiPjxv\nbWdkaTp3YXlwb2ludCB4PVwiNDIzXCIgeHNpOnR5cGU9XCJvbWdkYzpQb2ludFwiIHk9XCIxODZc\nIi8+PG9tZ2RpOndheXBvaW50IHg9XCI0NTdcIiB4c2k6dHlwZT1cIm9tZ2RjOlBvaW50XCIgeT1c\nIjE4NlwiLz48YnBtbmRpOkJQTU5MYWJlbD48b21nZGM6Qm91bmRzIGhlaWdodD1cIjEzXCIgd2lk\ndGg9XCI5MFwiIHg9XCIzOTVcIiB5PVwiMTY0LjVcIi8+PC9icG1uZGk6QlBNTkxhYmVsPjwvYnBt\nbmRpOkJQTU5FZGdlPjxicG1uZGk6QlBNTlNoYXBlIGJwbW5FbGVtZW50PVwiRW5kRXZlbnRfMGE3\namc2NlwiIGlkPVwiRW5kRXZlbnRfMGE3amc2Nl9kaVwiPjxvbWdkYzpCb3VuZHMgaGVpZ2h0PVwi\nMzZcIiB3aWR0aD1cIjM2XCIgeD1cIjc2MlwiIHk9XCIxNjhcIi8+PGJwbW5kaTpCUE1OTGFiZWw+\nPG9tZ2RjOkJvdW5kcyBoZWlnaHQ9XCIxM1wiIHdpZHRoPVwiOTBcIiB4PVwiNzM1XCIgeT1cIjIw\nN1wiLz48L2JwbW5kaTpCUE1OTGFiZWw+PC9icG1uZGk6QlBNTlNoYXBlPjxicG1uZGk6QlBNTkVk\nZ2UgYnBtbkVsZW1lbnQ9XCJTZXF1ZW5jZUZsb3dfMG10bHJraFwiIGlkPVwiU2VxdWVuY2VGbG93\nXzBtdGxya2hfZGlcIj48b21nZGk6d2F5cG9pbnQgeD1cIjY4OFwiIHhzaTp0eXBlPVwib21nZGM6\nUG9pbnRcIiB5PVwiMTg2XCIvPjxvbWdkaTp3YXlwb2ludCB4PVwiNzYyXCIgeHNpOnR5cGU9XCJv\nbWdkYzpQb2ludFwiIHk9XCIxODZcIi8+PGJwbW5kaTpCUE1OTGFiZWw+PG9tZ2RjOkJvdW5kcyBo\nZWlnaHQ9XCIxM1wiIHdpZHRoPVwiOTBcIiB4PVwiNjgwXCIgeT1cIjE2NC41XCIvPjwvYnBtbmRp\nOkJQTU5MYWJlbD48L2JwbW5kaTpCUE1ORWRnZT48YnBtbmRpOkJQTU5TaGFwZSBicG1uRWxlbWVu\ndD1cIlRleHRBbm5vdGF0aW9uXzB4dWRmbXZcIiBpZD1cIlRleHRBbm5vdGF0aW9uXzB4dWRmbXZf\nZGlcIj48b21nZGM6Qm91bmRzIGhlaWdodD1cIjMwXCIgd2lkdGg9XCIxMDBcIiB4PVwiMzIzXCIg\neT1cIjUxLjExNzcwNTI0MjMzNDMyNFwiLz48L2JwbW5kaTpCUE1OU2hhcGU+PGJwbW5kaTpCUE1O\nRWRnZSBicG1uRWxlbWVudD1cIkFzc29jaWF0aW9uXzBrMGRiYjZcIiBpZD1cIkFzc29jaWF0aW9u\nXzBrMGRiYjZfZGlcIj48b21nZGk6d2F5cG9pbnQgeD1cIjM3M1wiIHhzaTp0eXBlPVwib21nZGM6\nUG9pbnRcIiB5PVwiMTQ2XCIvPjxvbWdkaTp3YXlwb2ludCB4PVwiMzczXCIgeHNpOnR5cGU9XCJv\nbWdkYzpQb2ludFwiIHk9XCI4MVwiLz48L2JwbW5kaTpCUE1ORWRnZT48YnBtbmRpOkJQTU5TaGFw\nZSBicG1uRWxlbWVudD1cIlRleHRBbm5vdGF0aW9uXzFoeWY5NmRcIiBpZD1cIlRleHRBbm5vdGF0\naW9uXzFoeWY5NmRfZGlcIj48b21nZGM6Qm91bmRzIGhlaWdodD1cIjMwXCIgd2lkdGg9XCIxMDBc\nIiB4PVwiNTUwXCIgeT1cIjUxXCIvPjwvYnBtbmRpOkJQTU5TaGFwZT48YnBtbmRpOkJQTU5FZGdl\nIGJwbW5FbGVtZW50PVwiQXNzb2NpYXRpb25fMDBvZnJ1ZVwiIGlkPVwiQXNzb2NpYXRpb25fMDBv\nZnJ1ZV9kaVwiPjxvbWdkaTp3YXlwb2ludCB4PVwiNjI1XCIgeHNpOnR5cGU9XCJvbWdkYzpQb2lu\ndFwiIHk9XCIxNDZcIi8+PG9tZ2RpOndheXBvaW50IHg9XCI2MDVcIiB4c2k6dHlwZT1cIm9tZ2Rj\nOlBvaW50XCIgeT1cIjgxXCIvPjwvYnBtbmRpOkJQTU5FZGdlPjxicG1uZGk6QlBNTlNoYXBlIGJw\nbW5FbGVtZW50PVwiRXhjbHVzaXZlR2F0ZXdheV8wYmdrM3pwXCIgaWQ9XCJFeGNsdXNpdmVHYXRl\nd2F5XzBiZ2szenBfZGlcIiBpc01hcmtlclZpc2libGU9XCJ0cnVlXCI+PG9tZ2RjOkJvdW5kcyBo\nZWlnaHQ9XCI1MFwiIHdpZHRoPVwiNTBcIiB4PVwiNDU3XCIgeT1cIjE2MVwiLz48YnBtbmRpOkJQ\nTU5MYWJlbD48b21nZGM6Qm91bmRzIGhlaWdodD1cIjE0XCIgd2lkdGg9XCIwXCIgeD1cIjQ4Mlwi\nIHk9XCIyMTRcIi8+PC9icG1uZGk6QlBNTkxhYmVsPjwvYnBtbmRpOkJQTU5TaGFwZT48YnBtbmRp\nOkJQTU5FZGdlIGJwbW5FbGVtZW50PVwiU2VxdWVuY2VGbG93XzBsOXNsMzlcIiBpZD1cIlNlcXVl\nbmNlRmxvd18wbDlzbDM5X2RpXCI+PG9tZ2RpOndheXBvaW50IHg9XCI1MDdcIiB4c2k6dHlwZT1c\nIm9tZ2RjOlBvaW50XCIgeT1cIjE4NlwiLz48b21nZGk6d2F5cG9pbnQgeD1cIjU4OFwiIHhzaTp0\neXBlPVwib21nZGM6UG9pbnRcIiB5PVwiMTg2XCIvPjxicG1uZGk6QlBNTkxhYmVsPjxvbWdkYzpC\nb3VuZHMgaGVpZ2h0PVwiMTRcIiB3aWR0aD1cIjQzXCIgeD1cIjUyNlwiIHk9XCIxNjRcIi8+PC9i\ncG1uZGk6QlBNTkxhYmVsPjwvYnBtbmRpOkJQTU5FZGdlPjxicG1uZGk6QlBNTkVkZ2UgYnBtbkVs\nZW1lbnQ9XCJTZXF1ZW5jZUZsb3dfMGZ4NXhudFwiIGlkPVwiU2VxdWVuY2VGbG93XzBmeDV4bnRf\nZGlcIj48b21nZGk6d2F5cG9pbnQgeD1cIjQ4MlwiIHhzaTp0eXBlPVwib21nZGM6UG9pbnRcIiB5\nPVwiMjExXCIvPjxvbWdkaTp3YXlwb2ludCB4PVwiNDgyXCIgeHNpOnR5cGU9XCJvbWdkYzpQb2lu\ndFwiIHk9XCIyOTJcIi8+PG9tZ2RpOndheXBvaW50IHg9XCI3ODBcIiB4c2k6dHlwZT1cIm9tZ2Rj\nOlBvaW50XCIgeT1cIjI5MlwiLz48b21nZGk6d2F5cG9pbnQgeD1cIjc4MFwiIHhzaTp0eXBlPVwi\nb21nZGM6UG9pbnRcIiB5PVwiMjA0XCIvPjxicG1uZGk6QlBNTkxhYmVsPjxvbWdkYzpCb3VuZHMg\naGVpZ2h0PVwiMTRcIiB3aWR0aD1cIjIwXCIgeD1cIjYyMVwiIHk9XCIyNzBcIi8+PC9icG1uZGk6\nQlBNTkxhYmVsPjwvYnBtbmRpOkJQTU5FZGdlPjwvYnBtbmRpOkJQTU5QbGFuZT48L2JwbW5kaTpC\nUE1ORGlhZ3JhbT48L2RlZmluaXRpb25zPiJ9LCAiY29udGVudF92ZXJzaW9uIjogMTEsICJjcmVh\ndG9yX2lkIjogImFAZXhhbXBsZS5jb20iLCAiZGVzY3JpcHRpb24iOiAiUmVtb3ZlIGFuIGl0ZW0g\nZnJvbSB0aGUgUVJhZGFyIHJlZmVyZW5jZSBzZXQsIFx1MjAxY1NhbXBsZSBCbG9ja2VkIElQc1x1\nMjAxZCwgYW5kIGFkZCBpdCB0byByZWZlcmVuY2Ugc2V0LCBcdTIwMWNTYW1wbGUgU3VzcGVjdCBJ\nUHNcdTIwMWQuIEFkZCBhIG5vdGUgdG8gdGhlIEluY2lkZW50IGFmdGVyIGNvbXBsZXRpbmcgZWFj\naCBzdGVwLiIsICJleHBvcnRfa2V5IjogInFyYWRhcl9tb3ZlX2l0ZW1fdG9fZGlmZmVyZW50X3Jl\nZl9zZXQiLCAibGFzdF9tb2RpZmllZF9ieSI6ICJhQGV4YW1wbGUuY29tIiwgImxhc3RfbW9kaWZp\nZWRfdGltZSI6IDE1OTUzODU0MzcxNTgsICJuYW1lIjogIkV4YW1wbGUgb2YgbW92aW5nIFFSYWRh\nciBpdGVtIGZyb20gb25lIHJlZmVyZW5jZSBzZXQgdG8gYW5vdGhlciIsICJvYmplY3RfdHlwZSI6\nICJhcnRpZmFjdCIsICJwcm9ncmFtbWF0aWNfbmFtZSI6ICJxcmFkYXJfbW92ZV9pdGVtX3RvX2Rp\nZmZlcmVudF9yZWZfc2V0IiwgInRhZ3MiOiBbXSwgInV1aWQiOiAiZTUwMTE0ZTYtNTc2ZC00MGNh\nLWE1NmItMjBhMmE4YzM4ODAwIiwgIndvcmtmbG93X2lkIjogNjd9LCB7ImFjdGlvbnMiOiBbXSwg\nImNvbnRlbnQiOiB7InZlcnNpb24iOiA2LCAid29ya2Zsb3dfaWQiOiAicXJhZGFyX2FkZF9yZWZl\ncmVuY2Vfc2V0X2l0ZW0iLCAieG1sIjogIjw/eG1sIHZlcnNpb249XCIxLjBcIiBlbmNvZGluZz1c\nIlVURi04XCI/PjxkZWZpbml0aW9ucyB4bWxucz1cImh0dHA6Ly93d3cub21nLm9yZy9zcGVjL0JQ\nTU4vMjAxMDA1MjQvTU9ERUxcIiB4bWxuczpicG1uZGk9XCJodHRwOi8vd3d3Lm9tZy5vcmcvc3Bl\nYy9CUE1OLzIwMTAwNTI0L0RJXCIgeG1sbnM6b21nZGM9XCJodHRwOi8vd3d3Lm9tZy5vcmcvc3Bl\nYy9ERC8yMDEwMDUyNC9EQ1wiIHhtbG5zOm9tZ2RpPVwiaHR0cDovL3d3dy5vbWcub3JnL3NwZWMv\nREQvMjAxMDA1MjQvRElcIiB4bWxuczpyZXNpbGllbnQ9XCJodHRwOi8vcmVzaWxpZW50LmlibS5j\nb20vYnBtblwiIHhtbG5zOnhzZD1cImh0dHA6Ly93d3cudzMub3JnLzIwMDEvWE1MU2NoZW1hXCIg\neG1sbnM6eHNpPVwiaHR0cDovL3d3dy53My5vcmcvMjAwMS9YTUxTY2hlbWEtaW5zdGFuY2VcIiB0\nYXJnZXROYW1lc3BhY2U9XCJodHRwOi8vd3d3LmNhbXVuZGEub3JnL3Rlc3RcIj48cHJvY2VzcyBp\nZD1cInFyYWRhcl9hZGRfcmVmZXJlbmNlX3NldF9pdGVtXCIgaXNFeGVjdXRhYmxlPVwidHJ1ZVwi\nIG5hbWU9XCJFeGFtcGxlIG9mIGFkZGluZyBhbiBpdGVtIHRvIFFSYWRhciByZWZlcmVuY2Ugc2V0\nXCI+PGRvY3VtZW50YXRpb24+QWRkIGFuIElQIGFkZHJlc3MgYXJ0aWZhY3QgdG8gdGhlIFFSYWRh\nciByZWZlcmVuY2Ugc2V0LCBcdTIwMWNTYW1wbGUgQmxvY2tlZCBJUHNcdTIwMWQuPC9kb2N1bWVu\ndGF0aW9uPjxzdGFydEV2ZW50IGlkPVwiU3RhcnRFdmVudF8xNTVhc3htXCI+PG91dGdvaW5nPlNl\ncXVlbmNlRmxvd18wZHJtdXE2PC9vdXRnb2luZz48L3N0YXJ0RXZlbnQ+PHNlcnZpY2VUYXNrIGlk\nPVwiU2VydmljZVRhc2tfMXR5czdwc1wiIG5hbWU9XCJRUmFkYXIgQWRkIFJlZmVyZW5jZSBTZXQg\nSXRlbVwiIHJlc2lsaWVudDp0eXBlPVwiZnVuY3Rpb25cIj48ZXh0ZW5zaW9uRWxlbWVudHM+PHJl\nc2lsaWVudDpmdW5jdGlvbiB1dWlkPVwiMzBiNjg5OWEtZDAxNS00OGMzLThmZDktNTAwNzg4ZDRi\nNDM3XCI+e1wiaW5wdXRzXCI6e1wiYWE1ZTIxMWQtYjVlMC00Mjg5LTg4YmItNDc1OTVhZmFjMzg1\nXCI6e1wiaW5wdXRfdHlwZVwiOlwic3RhdGljXCIsXCJzdGF0aWNfaW5wdXRcIjp7XCJtdWx0aXNl\nbGVjdF92YWx1ZVwiOltdLFwidGV4dF92YWx1ZVwiOlwiU2FtcGxlIEJsb2NrZWQgSVBzXCJ9fX0s\nXCJwb3N0X3Byb2Nlc3Npbmdfc2NyaXB0XCI6XCJpZiByZXN1bHRzLnN0YXR1c19jb2RlID09IDIw\nMDpcXG4gIGluY2lkZW50LmFkZE5vdGUodVxcXCJJUDoge30gYWRkZWQgdG8gYmxvY2tlZCBJUHMg\ncmVmZXJlbmNlIHNldDoge31cXFwiLmZvcm1hdChhcnRpZmFjdC52YWx1ZSwgcmVzdWx0c1snY29u\ndGVudCddWyduYW1lJ10pKVxcbmVsc2U6XFxuICBpbmNpZGVudC5hZGROb3RlKHVcXFwiRmFpbGVk\nIHRvIGFkZCBJUDoge30gdG8gcmVmZXJlbmNlIHNldC4gU3RhdHVzIENvZGU6IHt9LCBtZXNzYWdl\nOiB7fVxcXCIuZm9ybWF0KGFydGlmYWN0LnZhbHVlLCByZXN1bHRzLnN0YXR1c19jb2RlLCByZXN1\nbHRzWydjb250ZW50J11bJ21lc3NhZ2UnXSkpXCIsXCJwcmVfcHJvY2Vzc2luZ19zY3JpcHRcIjpc\nImlucHV0cy5xcmFkYXJfcmVmZXJlbmNlX3NldF9pdGVtX3ZhbHVlID0gYXJ0aWZhY3QudmFsdWVc\nIn08L3Jlc2lsaWVudDpmdW5jdGlvbj48L2V4dGVuc2lvbkVsZW1lbnRzPjxpbmNvbWluZz5TZXF1\nZW5jZUZsb3dfMGRybXVxNjwvaW5jb21pbmc+PG91dGdvaW5nPlNlcXVlbmNlRmxvd18wZGh4bW1k\nPC9vdXRnb2luZz48L3NlcnZpY2VUYXNrPjxzZXF1ZW5jZUZsb3cgaWQ9XCJTZXF1ZW5jZUZsb3df\nMGRybXVxNlwiIHNvdXJjZVJlZj1cIlN0YXJ0RXZlbnRfMTU1YXN4bVwiIHRhcmdldFJlZj1cIlNl\ncnZpY2VUYXNrXzF0eXM3cHNcIi8+PGVuZEV2ZW50IGlkPVwiRW5kRXZlbnRfMDZzb2Z5aVwiPjxp\nbmNvbWluZz5TZXF1ZW5jZUZsb3dfMGRoeG1tZDwvaW5jb21pbmc+PC9lbmRFdmVudD48c2VxdWVu\nY2VGbG93IGlkPVwiU2VxdWVuY2VGbG93XzBkaHhtbWRcIiBzb3VyY2VSZWY9XCJTZXJ2aWNlVGFz\na18xdHlzN3BzXCIgdGFyZ2V0UmVmPVwiRW5kRXZlbnRfMDZzb2Z5aVwiLz48dGV4dEFubm90YXRp\nb24gaWQ9XCJUZXh0QW5ub3RhdGlvbl8xa3h4aXl0XCI+PHRleHQ+U3RhcnQgeW91ciB3b3JrZmxv\ndyBoZXJlPC90ZXh0PjwvdGV4dEFubm90YXRpb24+PGFzc29jaWF0aW9uIGlkPVwiQXNzb2NpYXRp\nb25fMXNldWo0OFwiIHNvdXJjZVJlZj1cIlN0YXJ0RXZlbnRfMTU1YXN4bVwiIHRhcmdldFJlZj1c\nIlRleHRBbm5vdGF0aW9uXzFreHhpeXRcIi8+PHRleHRBbm5vdGF0aW9uIGlkPVwiVGV4dEFubm90\nYXRpb25fMHhpeWwxdFwiPjx0ZXh0PjwhW0NEQVRBW0FkZCByZXN1bHRzIGFzIGFuIGluY2lkZW50\nIG5vdGVcbl1dPjwvdGV4dD48L3RleHRBbm5vdGF0aW9uPjxhc3NvY2lhdGlvbiBpZD1cIkFzc29j\naWF0aW9uXzE5eGtxbDhcIiBzb3VyY2VSZWY9XCJTZXJ2aWNlVGFza18xdHlzN3BzXCIgdGFyZ2V0\nUmVmPVwiVGV4dEFubm90YXRpb25fMHhpeWwxdFwiLz48L3Byb2Nlc3M+PGJwbW5kaTpCUE1ORGlh\nZ3JhbSBpZD1cIkJQTU5EaWFncmFtXzFcIj48YnBtbmRpOkJQTU5QbGFuZSBicG1uRWxlbWVudD1c\nInVuZGVmaW5lZFwiIGlkPVwiQlBNTlBsYW5lXzFcIj48YnBtbmRpOkJQTU5TaGFwZSBicG1uRWxl\nbWVudD1cIlN0YXJ0RXZlbnRfMTU1YXN4bVwiIGlkPVwiU3RhcnRFdmVudF8xNTVhc3htX2RpXCI+\nPG9tZ2RjOkJvdW5kcyBoZWlnaHQ9XCIzNlwiIHdpZHRoPVwiMzZcIiB4PVwiMjA1XCIgeT1cIjE2\nNlwiLz48YnBtbmRpOkJQTU5MYWJlbD48b21nZGM6Qm91bmRzIGhlaWdodD1cIjBcIiB3aWR0aD1c\nIjkwXCIgeD1cIjIwMFwiIHk9XCIyMDFcIi8+PC9icG1uZGk6QlBNTkxhYmVsPjwvYnBtbmRpOkJQ\nTU5TaGFwZT48YnBtbmRpOkJQTU5TaGFwZSBicG1uRWxlbWVudD1cIlRleHRBbm5vdGF0aW9uXzFr\neHhpeXRcIiBpZD1cIlRleHRBbm5vdGF0aW9uXzFreHhpeXRfZGlcIj48b21nZGM6Qm91bmRzIGhl\naWdodD1cIjMwXCIgd2lkdGg9XCIxMDBcIiB4PVwiOTlcIiB5PVwiMjU0XCIvPjwvYnBtbmRpOkJQ\nTU5TaGFwZT48YnBtbmRpOkJQTU5FZGdlIGJwbW5FbGVtZW50PVwiQXNzb2NpYXRpb25fMXNldWo0\nOFwiIGlkPVwiQXNzb2NpYXRpb25fMXNldWo0OF9kaVwiPjxvbWdkaTp3YXlwb2ludCB4PVwiMjA5\nXCIgeHNpOnR5cGU9XCJvbWdkYzpQb2ludFwiIHk9XCIxOTVcIi8+PG9tZ2RpOndheXBvaW50IHg9\nXCIxNTlcIiB4c2k6dHlwZT1cIm9tZ2RjOlBvaW50XCIgeT1cIjI1NFwiLz48L2JwbW5kaTpCUE1O\nRWRnZT48YnBtbmRpOkJQTU5TaGFwZSBicG1uRWxlbWVudD1cIlNlcnZpY2VUYXNrXzF0eXM3cHNc\nIiBpZD1cIlNlcnZpY2VUYXNrXzF0eXM3cHNfZGlcIj48b21nZGM6Qm91bmRzIGhlaWdodD1cIjgw\nXCIgd2lkdGg9XCIxMDBcIiB4PVwiMzAzXCIgeT1cIjE0NFwiLz48L2JwbW5kaTpCUE1OU2hhcGU+\nPGJwbW5kaTpCUE1ORWRnZSBicG1uRWxlbWVudD1cIlNlcXVlbmNlRmxvd18wZHJtdXE2XCIgaWQ9\nXCJTZXF1ZW5jZUZsb3dfMGRybXVxNl9kaVwiPjxvbWdkaTp3YXlwb2ludCB4PVwiMjQxXCIgeHNp\nOnR5cGU9XCJvbWdkYzpQb2ludFwiIHk9XCIxODRcIi8+PG9tZ2RpOndheXBvaW50IHg9XCIzMDNc\nIiB4c2k6dHlwZT1cIm9tZ2RjOlBvaW50XCIgeT1cIjE4NFwiLz48YnBtbmRpOkJQTU5MYWJlbD48\nb21nZGM6Qm91bmRzIGhlaWdodD1cIjEzXCIgd2lkdGg9XCIwXCIgeD1cIjI3MlwiIHk9XCIxNjJc\nIi8+PC9icG1uZGk6QlBNTkxhYmVsPjwvYnBtbmRpOkJQTU5FZGdlPjxicG1uZGk6QlBNTlNoYXBl\nIGJwbW5FbGVtZW50PVwiRW5kRXZlbnRfMDZzb2Z5aVwiIGlkPVwiRW5kRXZlbnRfMDZzb2Z5aV9k\naVwiPjxvbWdkYzpCb3VuZHMgaGVpZ2h0PVwiMzZcIiB3aWR0aD1cIjM2XCIgeD1cIjQ3M1wiIHk9\nXCIxNjZcIi8+PGJwbW5kaTpCUE1OTGFiZWw+PG9tZ2RjOkJvdW5kcyBoZWlnaHQ9XCIxM1wiIHdp\nZHRoPVwiOTBcIiB4PVwiNDQ2XCIgeT1cIjIwNVwiLz48L2JwbW5kaTpCUE1OTGFiZWw+PC9icG1u\nZGk6QlBNTlNoYXBlPjxicG1uZGk6QlBNTkVkZ2UgYnBtbkVsZW1lbnQ9XCJTZXF1ZW5jZUZsb3df\nMGRoeG1tZFwiIGlkPVwiU2VxdWVuY2VGbG93XzBkaHhtbWRfZGlcIj48b21nZGk6d2F5cG9pbnQg\neD1cIjQwM1wiIHhzaTp0eXBlPVwib21nZGM6UG9pbnRcIiB5PVwiMTg0XCIvPjxvbWdkaTp3YXlw\nb2ludCB4PVwiNDczXCIgeHNpOnR5cGU9XCJvbWdkYzpQb2ludFwiIHk9XCIxODRcIi8+PGJwbW5k\naTpCUE1OTGFiZWw+PG9tZ2RjOkJvdW5kcyBoZWlnaHQ9XCIxM1wiIHdpZHRoPVwiOTBcIiB4PVwi\nMzkzXCIgeT1cIjE2Mi41XCIvPjwvYnBtbmRpOkJQTU5MYWJlbD48L2JwbW5kaTpCUE1ORWRnZT48\nYnBtbmRpOkJQTU5TaGFwZSBicG1uRWxlbWVudD1cIlRleHRBbm5vdGF0aW9uXzB4aXlsMXRcIiBp\nZD1cIlRleHRBbm5vdGF0aW9uXzB4aXlsMXRfZGlcIj48b21nZGM6Qm91bmRzIGhlaWdodD1cIjQ3\nXCIgd2lkdGg9XCIxMjRcIiB4PVwiNDAzXCIgeT1cIjcxXCIvPjwvYnBtbmRpOkJQTU5TaGFwZT48\nYnBtbmRpOkJQTU5FZGdlIGJwbW5FbGVtZW50PVwiQXNzb2NpYXRpb25fMTl4a3FsOFwiIGlkPVwi\nQXNzb2NpYXRpb25fMTl4a3FsOF9kaVwiPjxvbWdkaTp3YXlwb2ludCB4PVwiMzk4XCIgeHNpOnR5\ncGU9XCJvbWdkYzpQb2ludFwiIHk9XCIxNDlcIi8+PG9tZ2RpOndheXBvaW50IHg9XCI0MzZcIiB4\nc2k6dHlwZT1cIm9tZ2RjOlBvaW50XCIgeT1cIjExOFwiLz48L2JwbW5kaTpCUE1ORWRnZT48L2Jw\nbW5kaTpCUE1OUGxhbmU+PC9icG1uZGk6QlBNTkRpYWdyYW0+PC9kZWZpbml0aW9ucz4ifSwgImNv\nbnRlbnRfdmVyc2lvbiI6IDYsICJjcmVhdG9yX2lkIjogImFAZXhhbXBsZS5jb20iLCAiZGVzY3Jp\ncHRpb24iOiAiQWRkIGFuIElQIGFkZHJlc3MgYXJ0aWZhY3QgdG8gdGhlIFFSYWRhciByZWZlcmVu\nY2Ugc2V0LCBcdTIwMWNTYW1wbGUgQmxvY2tlZCBJUHNcdTIwMWQuIiwgImV4cG9ydF9rZXkiOiAi\ncXJhZGFyX2FkZF9yZWZlcmVuY2Vfc2V0X2l0ZW0iLCAibGFzdF9tb2RpZmllZF9ieSI6ICJhQGV4\nYW1wbGUuY29tIiwgImxhc3RfbW9kaWZpZWRfdGltZSI6IDE1OTUzODQ5MzIxOTIsICJuYW1lIjog\nIkV4YW1wbGUgb2YgYWRkaW5nIGFuIGl0ZW0gdG8gUVJhZGFyIHJlZmVyZW5jZSBzZXQiLCAib2Jq\nZWN0X3R5cGUiOiAiYXJ0aWZhY3QiLCAicHJvZ3JhbW1hdGljX25hbWUiOiAicXJhZGFyX2FkZF9y\nZWZlcmVuY2Vfc2V0X2l0ZW0iLCAidGFncyI6IFtdLCAidXVpZCI6ICI5MjgxOTBiYy1jZjllLTQ5\nZTUtOWM5NS1jZDBiYWU5ZDlhYzYiLCAid29ya2Zsb3dfaWQiOiA2Nn1dLCAid29ya3NwYWNlcyI6\nIFtdfQ==\n\"\"\")", "title": "" }, { "docid": "7f44f45b61b12bb24d9894bcfbebb2d5", "score": "0.49117467", "text": "def customization_data(client=None):\n\n yield ImportDefinition(u\"\"\"\neyJhY3Rpb25fb3JkZXIiOiBbXSwgImFjdGlvbnMiOiBbeyJhdXRvbWF0aW9ucyI6IFtdLCAiY29u\nZGl0aW9ucyI6IFtdLCAiZW5hYmxlZCI6IHRydWUsICJleHBvcnRfa2V5IjogIkRhdGEgRmVlZGVy\nOiBBcnRpZmFjdCIsICJpZCI6IDE0LCAibG9naWNfdHlwZSI6ICJhbGwiLCAibWVzc2FnZV9kZXN0\naW5hdGlvbnMiOiBbImZlZWRfZGF0YSJdLCAibmFtZSI6ICJEYXRhIEZlZWRlcjogQXJ0aWZhY3Qi\nLCAib2JqZWN0X3R5cGUiOiAiYXJ0aWZhY3QiLCAidGFncyI6IFtdLCAidGltZW91dF9zZWNvbmRz\nIjogODY0MDAsICJ0eXBlIjogMCwgInV1aWQiOiAiM2M2MjdhYTgtNTgxMC00NGE0LWEyNWQtZTVh\nOGRiMTliNmE2IiwgInZpZXdfaXRlbXMiOiBbXSwgIndvcmtmbG93cyI6IFtdfSwgeyJhdXRvbWF0\naW9ucyI6IFtdLCAiY29uZGl0aW9ucyI6IFtdLCAiZW5hYmxlZCI6IHRydWUsICJleHBvcnRfa2V5\nIjogIkRhdGEgRmVlZGVyOiBBdHRhY2htZW50IiwgImlkIjogMTUsICJsb2dpY190eXBlIjogImFs\nbCIsICJtZXNzYWdlX2Rlc3RpbmF0aW9ucyI6IFsiZmVlZF9kYXRhIl0sICJuYW1lIjogIkRhdGEg\nRmVlZGVyOiBBdHRhY2htZW50IiwgIm9iamVjdF90eXBlIjogImF0dGFjaG1lbnQiLCAidGFncyI6\nIFtdLCAidGltZW91dF9zZWNvbmRzIjogODY0MDAsICJ0eXBlIjogMCwgInV1aWQiOiAiMmI2Mjhi\nOGMtMWI1Mi00ZTUxLWE1ZjMtYzMyM2Q3ZmYwMzdlIiwgInZpZXdfaXRlbXMiOiBbXSwgIndvcmtm\nbG93cyI6IFtdfSwgeyJhdXRvbWF0aW9ucyI6IFtdLCAiY29uZGl0aW9ucyI6IFtdLCAiZW5hYmxl\nZCI6IHRydWUsICJleHBvcnRfa2V5IjogIkRhdGEgRmVlZGVyOiBJbmNpZGVudCIsICJpZCI6IDE2\nLCAibG9naWNfdHlwZSI6ICJhbGwiLCAibWVzc2FnZV9kZXN0aW5hdGlvbnMiOiBbImZlZWRfZGF0\nYSJdLCAibmFtZSI6ICJEYXRhIEZlZWRlcjogSW5jaWRlbnQiLCAib2JqZWN0X3R5cGUiOiAiaW5j\naWRlbnQiLCAidGFncyI6IFtdLCAidGltZW91dF9zZWNvbmRzIjogODY0MDAsICJ0eXBlIjogMCwg\nInV1aWQiOiAiNWJjMGI5OWItOGY4Ny00OGRlLTk3ZDktOTMzM2YxMTM5ZDVkIiwgInZpZXdfaXRl\nbXMiOiBbXSwgIndvcmtmbG93cyI6IFtdfSwgeyJhdXRvbWF0aW9ucyI6IFtdLCAiY29uZGl0aW9u\ncyI6IFtdLCAiZW5hYmxlZCI6IHRydWUsICJleHBvcnRfa2V5IjogIkRhdGEgRmVlZGVyOiBNaWxl\nc3RvbmUiLCAiaWQiOiAxNywgImxvZ2ljX3R5cGUiOiAiYWxsIiwgIm1lc3NhZ2VfZGVzdGluYXRp\nb25zIjogWyJmZWVkX2RhdGEiXSwgIm5hbWUiOiAiRGF0YSBGZWVkZXI6IE1pbGVzdG9uZSIsICJv\nYmplY3RfdHlwZSI6ICJtaWxlc3RvbmUiLCAidGFncyI6IFtdLCAidGltZW91dF9zZWNvbmRzIjog\nODY0MDAsICJ0eXBlIjogMCwgInV1aWQiOiAiYzdmY2FmNTAtNDQwMi00YzYyLTk1NTItYzI2ZGY2\nZTViZTliIiwgInZpZXdfaXRlbXMiOiBbXSwgIndvcmtmbG93cyI6IFtdfSwgeyJhdXRvbWF0aW9u\ncyI6IFtdLCAiY29uZGl0aW9ucyI6IFtdLCAiZW5hYmxlZCI6IHRydWUsICJleHBvcnRfa2V5Ijog\nIkRhdGEgRmVlZGVyOiBOb3RlIiwgImlkIjogMTgsICJsb2dpY190eXBlIjogImFsbCIsICJtZXNz\nYWdlX2Rlc3RpbmF0aW9ucyI6IFsiZmVlZF9kYXRhIl0sICJuYW1lIjogIkRhdGEgRmVlZGVyOiBO\nb3RlIiwgIm9iamVjdF90eXBlIjogIm5vdGUiLCAidGFncyI6IFtdLCAidGltZW91dF9zZWNvbmRz\nIjogODY0MDAsICJ0eXBlIjogMCwgInV1aWQiOiAiNzgwZjJlYmUtOWFhYy00MWU5LTk4YWItNzA2\nODhhYzlhZjdhIiwgInZpZXdfaXRlbXMiOiBbXSwgIndvcmtmbG93cyI6IFtdfSwgeyJhdXRvbWF0\naW9ucyI6IFtdLCAiY29uZGl0aW9ucyI6IFtdLCAiZW5hYmxlZCI6IHRydWUsICJleHBvcnRfa2V5\nIjogIkRhdGEgRmVlZGVyOiBTeW5jIEluY2lkZW50cyIsICJpZCI6IDE5LCAibG9naWNfdHlwZSI6\nICJhbGwiLCAibWVzc2FnZV9kZXN0aW5hdGlvbnMiOiBbXSwgIm5hbWUiOiAiRGF0YSBGZWVkZXI6\nIFN5bmMgSW5jaWRlbnRzIiwgIm9iamVjdF90eXBlIjogImluY2lkZW50IiwgInRhZ3MiOiBbXSwg\nInRpbWVvdXRfc2Vjb25kcyI6IDg2NDAwLCAidHlwZSI6IDEsICJ1dWlkIjogIjE3NGFiYzE4LWRj\nNzItNDEzMC1hNWM4LTY0MjFmYjQ0OWYxMiIsICJ2aWV3X2l0ZW1zIjogW3siY29udGVudCI6ICJj\nYzUzMmEyMi1lOTBmLTQ2ZTQtOTE5YS0wYWQxMjk2Nzk2YmYiLCAiZWxlbWVudCI6ICJmaWVsZF91\ndWlkIiwgImZpZWxkX3R5cGUiOiAiYWN0aW9uaW52b2NhdGlvbiIsICJzaG93X2lmIjogbnVsbCwg\nInNob3dfbGlua19oZWFkZXIiOiBmYWxzZSwgInN0ZXBfbGFiZWwiOiBudWxsfSwgeyJjb250ZW50\nIjogIjNlMmU5M2RlLTZiYTUtNGFkZi1iMDQ0LTY2YTQ2MDlkZDc3ZCIsICJlbGVtZW50IjogImZp\nZWxkX3V1aWQiLCAiZmllbGRfdHlwZSI6ICJhY3Rpb25pbnZvY2F0aW9uIiwgInNob3dfaWYiOiBu\ndWxsLCAic2hvd19saW5rX2hlYWRlciI6IGZhbHNlLCAic3RlcF9sYWJlbCI6IG51bGx9LCB7ImNv\nbnRlbnQiOiAiN2VkM2FmMDMtZDNiMy00MWVkLWE3MjEtOTcwNTQzNDYzNTk4IiwgImVsZW1lbnQi\nOiAiZmllbGRfdXVpZCIsICJmaWVsZF90eXBlIjogImFjdGlvbmludm9jYXRpb24iLCAic2hvd19p\nZiI6IG51bGwsICJzaG93X2xpbmtfaGVhZGVyIjogZmFsc2UsICJzdGVwX2xhYmVsIjogbnVsbH1d\nLCAid29ya2Zsb3dzIjogWyJkYXRhX2ZlZWRlcl9zeW5jX2luY2lkZW50cyJdfSwgeyJhdXRvbWF0\naW9ucyI6IFtdLCAiY29uZGl0aW9ucyI6IFtdLCAiZW5hYmxlZCI6IHRydWUsICJleHBvcnRfa2V5\nIjogIkRhdGEgRmVlZGVyOiBUYXNrIiwgImlkIjogMjAsICJsb2dpY190eXBlIjogImFsbCIsICJt\nZXNzYWdlX2Rlc3RpbmF0aW9ucyI6IFsiZmVlZF9kYXRhIl0sICJuYW1lIjogIkRhdGEgRmVlZGVy\nOiBUYXNrIiwgIm9iamVjdF90eXBlIjogInRhc2siLCAidGFncyI6IFtdLCAidGltZW91dF9zZWNv\nbmRzIjogODY0MDAsICJ0eXBlIjogMCwgInV1aWQiOiAiZWUwZDkyZWUtZTUzZC00ZWJkLWEyOGYt\nZGY5NTllOTQ5ZWQ3IiwgInZpZXdfaXRlbXMiOiBbXSwgIndvcmtmbG93cyI6IFtdfV0sICJhdXRv\nbWF0aWNfdGFza3MiOiBbXSwgImV4cG9ydF9kYXRlIjogMTU5OTU4NDgyMDgxOSwgImV4cG9ydF9m\nb3JtYXRfdmVyc2lvbiI6IDIsICJmaWVsZHMiOiBbeyJhbGxvd19kZWZhdWx0X3ZhbHVlIjogZmFs\nc2UsICJibGFua19vcHRpb24iOiB0cnVlLCAiY2FsY3VsYXRlZCI6IGZhbHNlLCAiY2hhbmdlYWJs\nZSI6IHRydWUsICJjaG9zZW4iOiBmYWxzZSwgImRlZmF1bHRfY2hvc2VuX2J5X3NlcnZlciI6IGZh\nbHNlLCAiZGVwcmVjYXRlZCI6IGZhbHNlLCAiZXhwb3J0X2tleSI6ICJfX2Z1bmN0aW9uL2RmX3F1\nZXJ5X2FwaV9tZXRob2QiLCAiaGlkZV9ub3RpZmljYXRpb24iOiBmYWxzZSwgImlkIjogMTg1LCAi\naW5wdXRfdHlwZSI6ICJib29sZWFuIiwgImludGVybmFsIjogZmFsc2UsICJpc190cmFja2VkIjog\nZmFsc2UsICJuYW1lIjogImRmX3F1ZXJ5X2FwaV9tZXRob2QiLCAib3BlcmF0aW9uX3Blcm1zIjog\ne30sICJvcGVyYXRpb25zIjogW10sICJwbGFjZWhvbGRlciI6ICIiLCAicHJlZml4IjogbnVsbCwg\nInJlYWRfb25seSI6IGZhbHNlLCAicmVxdWlyZWQiOiAiYWx3YXlzIiwgInJpY2hfdGV4dCI6IGZh\nbHNlLCAidGFncyI6IFtdLCAidGVtcGxhdGVzIjogW10sICJ0ZXh0IjogImRmX3F1ZXJ5X2FwaV9t\nZXRob2QiLCAidG9vbHRpcCI6ICIiLCAidHlwZV9pZCI6IDExLCAidXVpZCI6ICI3MzFlOTRmZi04\nMjJmLTQ4ZjEtODNhOS1hNzgzODBmZDYzNmIiLCAidmFsdWVzIjogW119LCB7ImFsbG93X2RlZmF1\nbHRfdmFsdWUiOiBmYWxzZSwgImJsYW5rX29wdGlvbiI6IGZhbHNlLCAiY2FsY3VsYXRlZCI6IGZh\nbHNlLCAiY2hhbmdlYWJsZSI6IHRydWUsICJjaG9zZW4iOiBmYWxzZSwgImRlZmF1bHRfY2hvc2Vu\nX2J5X3NlcnZlciI6IGZhbHNlLCAiZGVwcmVjYXRlZCI6IGZhbHNlLCAiZXhwb3J0X2tleSI6ICJf\nX2Z1bmN0aW9uL2RmX21pbl9pbmNpZGVudF9pZCIsICJoaWRlX25vdGlmaWNhdGlvbiI6IGZhbHNl\nLCAiaWQiOiAxODQsICJpbnB1dF90eXBlIjogIm51bWJlciIsICJpbnRlcm5hbCI6IGZhbHNlLCAi\naXNfdHJhY2tlZCI6IGZhbHNlLCAibmFtZSI6ICJkZl9taW5faW5jaWRlbnRfaWQiLCAib3BlcmF0\naW9uX3Blcm1zIjoge30sICJvcGVyYXRpb25zIjogW10sICJwbGFjZWhvbGRlciI6ICIiLCAicHJl\nZml4IjogbnVsbCwgInJlYWRfb25seSI6IGZhbHNlLCAicmVxdWlyZWQiOiAiYWx3YXlzIiwgInJp\nY2hfdGV4dCI6IGZhbHNlLCAidGFncyI6IFtdLCAidGVtcGxhdGVzIjogW10sICJ0ZXh0IjogImRm\nX21pbl9pbmNpZGVudF9pZCIsICJ0b29sdGlwIjogIkVudGVyIGFuIGluY2lkZW50ICMgb3IgMCB0\nbyBpbmRpY2F0ZSB0aGUgc3RhcnQgb2YgYWxsIGluY2lkZW50cyIsICJ0eXBlX2lkIjogMTEsICJ1\ndWlkIjogImI4MGQxMWQ0LTljNmItNGNkNy05NTFhLTRmZThjNTcyYzllZiIsICJ2YWx1ZXMiOiBb\nXX0sIHsiYWxsb3dfZGVmYXVsdF92YWx1ZSI6IGZhbHNlLCAiYmxhbmtfb3B0aW9uIjogZmFsc2Us\nICJjYWxjdWxhdGVkIjogZmFsc2UsICJjaGFuZ2VhYmxlIjogdHJ1ZSwgImNob3NlbiI6IGZhbHNl\nLCAiZGVmYXVsdF9jaG9zZW5fYnlfc2VydmVyIjogZmFsc2UsICJkZXByZWNhdGVkIjogZmFsc2Us\nICJleHBvcnRfa2V5IjogIl9fZnVuY3Rpb24vZGZfbWF4X2luY2lkZW50X2lkIiwgImhpZGVfbm90\naWZpY2F0aW9uIjogZmFsc2UsICJpZCI6IDE4MywgImlucHV0X3R5cGUiOiAibnVtYmVyIiwgImlu\ndGVybmFsIjogZmFsc2UsICJpc190cmFja2VkIjogZmFsc2UsICJuYW1lIjogImRmX21heF9pbmNp\nZGVudF9pZCIsICJvcGVyYXRpb25fcGVybXMiOiB7fSwgIm9wZXJhdGlvbnMiOiBbXSwgInBsYWNl\naG9sZGVyIjogIiIsICJwcmVmaXgiOiBudWxsLCAicmVhZF9vbmx5IjogZmFsc2UsICJyaWNoX3Rl\neHQiOiBmYWxzZSwgInRhZ3MiOiBbXSwgInRlbXBsYXRlcyI6IFtdLCAidGV4dCI6ICJkZl9tYXhf\naW5jaWRlbnRfaWQiLCAidG9vbHRpcCI6ICJFbnRlciBpbmNpZGVudCAjIGZvciB1cHBlciByYW5n\nZSBvciAwIHRvIGluZGljYXRlIGFsbCBpbmNpZGVudHMiLCAidHlwZV9pZCI6IDExLCAidXVpZCI6\nICJlNzgyMGU0NC00MDg3LTRjZTItODRhZi0wZmU5MzYzMGEwM2MiLCAidmFsdWVzIjogW119LCB7\nImFsbG93X2RlZmF1bHRfdmFsdWUiOiBmYWxzZSwgImJsYW5rX29wdGlvbiI6IGZhbHNlLCAiY2Fs\nY3VsYXRlZCI6IGZhbHNlLCAiY2hhbmdlYWJsZSI6IHRydWUsICJjaG9zZW4iOiBmYWxzZSwgImRl\nZmF1bHRfY2hvc2VuX2J5X3NlcnZlciI6IGZhbHNlLCAiZGVwcmVjYXRlZCI6IGZhbHNlLCAiZXhw\nb3J0X2tleSI6ICJhY3Rpb25pbnZvY2F0aW9uL2RhdGFfZmVlZGVyX21heGltdW1faW5jaWRlbnRf\naWQiLCAiaGlkZV9ub3RpZmljYXRpb24iOiBmYWxzZSwgImlkIjogMTgyLCAiaW5wdXRfdHlwZSI6\nICJudW1iZXIiLCAiaW50ZXJuYWwiOiBmYWxzZSwgImlzX3RyYWNrZWQiOiBmYWxzZSwgIm5hbWUi\nOiAiZGF0YV9mZWVkZXJfbWF4aW11bV9pbmNpZGVudF9pZCIsICJvcGVyYXRpb25fcGVybXMiOiB7\nfSwgIm9wZXJhdGlvbnMiOiBbXSwgInBsYWNlaG9sZGVyIjogIiIsICJwcmVmaXgiOiAicHJvcGVy\ndGllcyIsICJyZWFkX29ubHkiOiBmYWxzZSwgInJpY2hfdGV4dCI6IGZhbHNlLCAidGFncyI6IFtd\nLCAidGVtcGxhdGVzIjogW10sICJ0ZXh0IjogIk1heGltdW0gSW5jaWRlbnQgSUQiLCAidG9vbHRp\ncCI6ICJFbnRlciBJbmNpZGVudCBJRCB0byBzeW5jIHVwIHRvIG9yIDAgdG8gaW5kaWNhdGUgYWxs\nIGluY2lkZW50cyIsICJ0eXBlX2lkIjogNiwgInV1aWQiOiAiM2UyZTkzZGUtNmJhNS00YWRmLWIw\nNDQtNjZhNDYwOWRkNzdkIiwgInZhbHVlcyI6IFtdfSwgeyJhbGxvd19kZWZhdWx0X3ZhbHVlIjog\nZmFsc2UsICJibGFua19vcHRpb24iOiB0cnVlLCAiY2FsY3VsYXRlZCI6IGZhbHNlLCAiY2hhbmdl\nYWJsZSI6IHRydWUsICJjaG9zZW4iOiBmYWxzZSwgImRlZmF1bHRfY2hvc2VuX2J5X3NlcnZlciI6\nIGZhbHNlLCAiZGVwcmVjYXRlZCI6IGZhbHNlLCAiZXhwb3J0X2tleSI6ICJhY3Rpb25pbnZvY2F0\naW9uL3F1ZXJ5X2FwaV9tZXRob2QiLCAiaGlkZV9ub3RpZmljYXRpb24iOiBmYWxzZSwgImlkIjog\nMTgxLCAiaW5wdXRfdHlwZSI6ICJib29sZWFuIiwgImludGVybmFsIjogZmFsc2UsICJpc190cmFj\na2VkIjogZmFsc2UsICJuYW1lIjogInF1ZXJ5X2FwaV9tZXRob2QiLCAib3BlcmF0aW9uX3Blcm1z\nIjoge30sICJvcGVyYXRpb25zIjogW10sICJwbGFjZWhvbGRlciI6ICIiLCAicHJlZml4IjogInBy\nb3BlcnRpZXMiLCAicmVhZF9vbmx5IjogZmFsc2UsICJyZXF1aXJlZCI6ICJhbHdheXMiLCAicmlj\naF90ZXh0IjogZmFsc2UsICJ0YWdzIjogW10sICJ0ZW1wbGF0ZXMiOiBbXSwgInRleHQiOiAiUXVl\ncnkgQVBJIE1ldGhvZCIsICJ0b29sdGlwIjogIlNwZWNpZnkgdHJ1ZSBpZiBlcnJvcnMgb2NjdXIg\nd2hlbiB1c2luZyB0aGUgZGVmYXVsdCBzZWFyY2ggY2FwYWJpbGl0eSIsICJ0eXBlX2lkIjogNiwg\nInV1aWQiOiAiN2VkM2FmMDMtZDNiMy00MWVkLWE3MjEtOTcwNTQzNDYzNTk4IiwgInZhbHVlcyI6\nIFtdfSwgeyJhbGxvd19kZWZhdWx0X3ZhbHVlIjogZmFsc2UsICJibGFua19vcHRpb24iOiBmYWxz\nZSwgImNhbGN1bGF0ZWQiOiBmYWxzZSwgImNoYW5nZWFibGUiOiB0cnVlLCAiY2hvc2VuIjogZmFs\nc2UsICJkZWZhdWx0X2Nob3Nlbl9ieV9zZXJ2ZXIiOiBmYWxzZSwgImRlcHJlY2F0ZWQiOiBmYWxz\nZSwgImV4cG9ydF9rZXkiOiAiYWN0aW9uaW52b2NhdGlvbi9kYXRhX2ZlZWRlcl9taW5pbXVtX2lu\nY2lkZW50X2lkIiwgImhpZGVfbm90aWZpY2F0aW9uIjogZmFsc2UsICJpZCI6IDE4MCwgImlucHV0\nX3R5cGUiOiAibnVtYmVyIiwgImludGVybmFsIjogZmFsc2UsICJpc190cmFja2VkIjogZmFsc2Us\nICJuYW1lIjogImRhdGFfZmVlZGVyX21pbmltdW1faW5jaWRlbnRfaWQiLCAib3BlcmF0aW9uX3Bl\ncm1zIjoge30sICJvcGVyYXRpb25zIjogW10sICJwbGFjZWhvbGRlciI6ICIiLCAicHJlZml4Ijog\nInByb3BlcnRpZXMiLCAicmVhZF9vbmx5IjogZmFsc2UsICJyZXF1aXJlZCI6ICJhbHdheXMiLCAi\ncmljaF90ZXh0IjogZmFsc2UsICJ0YWdzIjogW10sICJ0ZW1wbGF0ZXMiOiBbXSwgInRleHQiOiAi\nTWluaW11bSBJbmNpZGVudCBJRCIsICJ0b29sdGlwIjogIkVudGVyIEluY2lkZW50IElEIHRvIHN0\nYXJ0IHN5bmMgb3IgMCIsICJ0eXBlX2lkIjogNiwgInV1aWQiOiAiY2M1MzJhMjItZTkwZi00NmU0\nLTkxOWEtMGFkMTI5Njc5NmJmIiwgInZhbHVlcyI6IFtdfSwgeyJleHBvcnRfa2V5IjogImluY2lk\nZW50L2ludGVybmFsX2N1c3RvbWl6YXRpb25zX2ZpZWxkIiwgImlkIjogMCwgImlucHV0X3R5cGUi\nOiAidGV4dCIsICJpbnRlcm5hbCI6IHRydWUsICJuYW1lIjogImludGVybmFsX2N1c3RvbWl6YXRp\nb25zX2ZpZWxkIiwgInJlYWRfb25seSI6IHRydWUsICJ0ZXh0IjogIkN1c3RvbWl6YXRpb25zIEZp\nZWxkIChpbnRlcm5hbCkiLCAidHlwZV9pZCI6IDAsICJ1dWlkIjogImJmZWVjMmQ0LTM3NzAtMTFl\nOC1hZDM5LTRhMDAwNDA0NGFhMSJ9XSwgImZ1bmN0aW9ucyI6IFt7ImNyZWF0b3IiOiB7ImRpc3Bs\nYXlfbmFtZSI6ICJpbnRlZ3JhdGlvbnMiLCAiaWQiOiA0LCAibmFtZSI6ICJlYjJkMWY3ZC02NjUx\nLTQxNWEtYjRmZi1hMTRmY2QyZjg0ZjUiLCAidHlwZSI6ICJhcGlrZXkifSwgImRlc2NyaXB0aW9u\nIjogeyJmb3JtYXQiOiAidGV4dCIsICJjb250ZW50IjogIlN5bmNocm9uaXplIEluY2lkZW50KHMp\nIGFuZCB0aGVpciBhc3NvY2lhdGVkIHRhc2tzLCBub3RlcywgYXR0YWNobWVudHMsIGFydGlmYWN0\ncywgbWlsZXN0b25lcyBhbmQgYXNzb2NpYXRlZCBkYXRhdGFibGVzIn0sICJkZXN0aW5hdGlvbl9o\nYW5kbGUiOiAiZmVlZF9kYXRhIiwgImRpc3BsYXlfbmFtZSI6ICJEYXRhIEZlZWRlcjogU3luYyBJ\nbmNpZGVudHMiLCAiZXhwb3J0X2tleSI6ICJkYXRhX2ZlZWRlcl9zeW5jX2luY2lkZW50cyIsICJp\nZCI6IDEsICJsYXN0X21vZGlmaWVkX2J5IjogeyJkaXNwbGF5X25hbWUiOiAiaW50ZWdyYXRpb25z\nIiwgImlkIjogNCwgIm5hbWUiOiAiZWIyZDFmN2QtNjY1MS00MTVhLWI0ZmYtYTE0ZmNkMmY4NGY1\nIiwgInR5cGUiOiAiYXBpa2V5In0sICJsYXN0X21vZGlmaWVkX3RpbWUiOiAxNTc3MjQwMTI4Njk1\nLCAibmFtZSI6ICJkYXRhX2ZlZWRlcl9zeW5jX2luY2lkZW50cyIsICJ0YWdzIjogW10sICJ1dWlk\nIjogIjdmZmVkNGU1LTcyZmItNDE2Mi1iZGVmLTRlYTNlYmZhODlkZSIsICJ2ZXJzaW9uIjogMSwg\nInZpZXdfaXRlbXMiOiBbeyJjb250ZW50IjogImI4MGQxMWQ0LTljNmItNGNkNy05NTFhLTRmZThj\nNTcyYzllZiIsICJlbGVtZW50IjogImZpZWxkX3V1aWQiLCAiZmllbGRfdHlwZSI6ICJfX2Z1bmN0\naW9uIiwgInNob3dfaWYiOiBudWxsLCAic2hvd19saW5rX2hlYWRlciI6IGZhbHNlLCAic3RlcF9s\nYWJlbCI6IG51bGx9LCB7ImNvbnRlbnQiOiAiZTc4MjBlNDQtNDA4Ny00Y2UyLTg0YWYtMGZlOTM2\nMzBhMDNjIiwgImVsZW1lbnQiOiAiZmllbGRfdXVpZCIsICJmaWVsZF90eXBlIjogIl9fZnVuY3Rp\nb24iLCAic2hvd19pZiI6IG51bGwsICJzaG93X2xpbmtfaGVhZGVyIjogZmFsc2UsICJzdGVwX2xh\nYmVsIjogbnVsbH0sIHsiY29udGVudCI6ICI3MzFlOTRmZi04MjJmLTQ4ZjEtODNhOS1hNzgzODBm\nZDYzNmIiLCAiZWxlbWVudCI6ICJmaWVsZF91dWlkIiwgImZpZWxkX3R5cGUiOiAiX19mdW5jdGlv\nbiIsICJzaG93X2lmIjogbnVsbCwgInNob3dfbGlua19oZWFkZXIiOiBmYWxzZSwgInN0ZXBfbGFi\nZWwiOiBudWxsfV0sICJ3b3JrZmxvd3MiOiBbeyJhY3Rpb25zIjogW10sICJkZXNjcmlwdGlvbiI6\nIG51bGwsICJuYW1lIjogIkRhdGEgRmVlZGVyOiBTeW5jIEluY2lkZW50cyIsICJvYmplY3RfdHlw\nZSI6ICJpbmNpZGVudCIsICJwcm9ncmFtbWF0aWNfbmFtZSI6ICJkYXRhX2ZlZWRlcl9zeW5jX2lu\nY2lkZW50cyIsICJ0YWdzIjogW10sICJ1dWlkIjogbnVsbCwgIndvcmtmbG93X2lkIjogMX1dfV0s\nICJnZW9zIjogbnVsbCwgImdyb3VwcyI6IG51bGwsICJpZCI6IDEsICJpbmJvdW5kX21haWxib3hl\ncyI6IG51bGwsICJpbmNpZGVudF9hcnRpZmFjdF90eXBlcyI6IFtdLCAiaW5jaWRlbnRfdHlwZXMi\nOiBbeyJ1cGRhdGVfZGF0ZSI6IDE1OTk1ODQ4MDY3NDUsICJjcmVhdGVfZGF0ZSI6IDE1OTk1ODQ4\nMDY3NDUsICJ1dWlkIjogImJmZWVjMmQ0LTM3NzAtMTFlOC1hZDM5LTRhMDAwNDA0NGFhMCIsICJk\nZXNjcmlwdGlvbiI6ICJDdXN0b21pemF0aW9uIFBhY2thZ2VzIChpbnRlcm5hbCkiLCAiZXhwb3J0\nX2tleSI6ICJDdXN0b21pemF0aW9uIFBhY2thZ2VzIChpbnRlcm5hbCkiLCAibmFtZSI6ICJDdXN0\nb21pemF0aW9uIFBhY2thZ2VzIChpbnRlcm5hbCkiLCAiZW5hYmxlZCI6IGZhbHNlLCAic3lzdGVt\nIjogZmFsc2UsICJwYXJlbnRfaWQiOiBudWxsLCAiaGlkZGVuIjogZmFsc2UsICJpZCI6IDB9XSwg\nImluZHVzdHJpZXMiOiBudWxsLCAibGF5b3V0cyI6IFtdLCAibG9jYWxlIjogbnVsbCwgIm1lc3Nh\nZ2VfZGVzdGluYXRpb25zIjogW3siYXBpX2tleXMiOiBbImViMmQxZjdkLTY2NTEtNDE1YS1iNGZm\nLWExNGZjZDJmODRmNSJdLCAiZGVzdGluYXRpb25fdHlwZSI6IDAsICJleHBlY3RfYWNrIjogdHJ1\nZSwgImV4cG9ydF9rZXkiOiAiZmVlZF9kYXRhIiwgIm5hbWUiOiAiZmVlZF9kYXRhIiwgInByb2dy\nYW1tYXRpY19uYW1lIjogImZlZWRfZGF0YSIsICJ0YWdzIjogW10sICJ1c2VycyI6IFtdLCAidXVp\nZCI6ICJlMDUyODJmYi02Y2ZjLTQ3MDktYWY4NC1jZDcxNDM4NTgxYzgifV0sICJub3RpZmljYXRp\nb25zIjogbnVsbCwgIm92ZXJyaWRlcyI6IFtdLCAicGhhc2VzIjogW10sICJyZWd1bGF0b3JzIjog\nbnVsbCwgInJvbGVzIjogW10sICJzY3JpcHRzIjogW10sICJzZXJ2ZXJfdmVyc2lvbiI6IHsiYnVp\nbGRfbnVtYmVyIjogMzIsICJtYWpvciI6IDM1LCAibWlub3IiOiAyLCAidmVyc2lvbiI6ICIzNS4y\nLjMyIn0sICJ0YWdzIjogW10sICJ0YXNrX29yZGVyIjogW10sICJ0aW1lZnJhbWVzIjogbnVsbCwg\nInR5cGVzIjogW10sICJ3b3JrZmxvd3MiOiBbeyJhY3Rpb25zIjogW10sICJjb250ZW50IjogeyJ2\nZXJzaW9uIjogMSwgIndvcmtmbG93X2lkIjogImRhdGFfZmVlZGVyX3N5bmNfaW5jaWRlbnRzIiwg\nInhtbCI6ICI8P3htbCB2ZXJzaW9uPVwiMS4wXCIgZW5jb2Rpbmc9XCJVVEYtOFwiPz48ZGVmaW5p\ndGlvbnMgeG1sbnM9XCJodHRwOi8vd3d3Lm9tZy5vcmcvc3BlYy9CUE1OLzIwMTAwNTI0L01PREVM\nXCIgeG1sbnM6YnBtbmRpPVwiaHR0cDovL3d3dy5vbWcub3JnL3NwZWMvQlBNTi8yMDEwMDUyNC9E\nSVwiIHhtbG5zOm9tZ2RjPVwiaHR0cDovL3d3dy5vbWcub3JnL3NwZWMvREQvMjAxMDA1MjQvRENc\nIiB4bWxuczpvbWdkaT1cImh0dHA6Ly93d3cub21nLm9yZy9zcGVjL0RELzIwMTAwNTI0L0RJXCIg\neG1sbnM6cmVzaWxpZW50PVwiaHR0cDovL3Jlc2lsaWVudC5pYm0uY29tL2JwbW5cIiB4bWxuczp4\nc2Q9XCJodHRwOi8vd3d3LnczLm9yZy8yMDAxL1hNTFNjaGVtYVwiIHhtbG5zOnhzaT1cImh0dHA6\nLy93d3cudzMub3JnLzIwMDEvWE1MU2NoZW1hLWluc3RhbmNlXCIgdGFyZ2V0TmFtZXNwYWNlPVwi\naHR0cDovL3d3dy5jYW11bmRhLm9yZy90ZXN0XCI+PHByb2Nlc3MgaWQ9XCJkYXRhX2ZlZWRlcl9z\neW5jX2luY2lkZW50c1wiIGlzRXhlY3V0YWJsZT1cInRydWVcIiBuYW1lPVwiRGF0YSBGZWVkZXI6\nIFN5bmMgSW5jaWRlbnRzXCI+PGRvY3VtZW50YXRpb24+U3luY2hyb25pemUgSW5jaWRlbnQocykg\nYW5kIHRoZWlyIGFzc29jaWF0ZWQgdGFza3MsIG5vdGVzLCBhdHRhY2htZW50cywgYXJ0aWZhY3Rz\nLCBtaWxlc3RvbmVzIGFuZCBhc3NvY2lhdGVkIGRhdGF0YWJsZXM8L2RvY3VtZW50YXRpb24+PHN0\nYXJ0RXZlbnQgaWQ9XCJTdGFydEV2ZW50XzE1NWFzeG1cIj48b3V0Z29pbmc+U2VxdWVuY2VGbG93\nXzFndmxudmc8L291dGdvaW5nPjwvc3RhcnRFdmVudD48c2VydmljZVRhc2sgaWQ9XCJTZXJ2aWNl\nVGFza18weW9mN2hpXCIgbmFtZT1cIkRhdGEgRmVlZGVyOiBTeW5jIEluY2lkZW50c1wiIHJlc2ls\naWVudDp0eXBlPVwiZnVuY3Rpb25cIj48ZXh0ZW5zaW9uRWxlbWVudHM+PHJlc2lsaWVudDpmdW5j\ndGlvbiB1dWlkPVwiN2ZmZWQ0ZTUtNzJmYi00MTYyLWJkZWYtNGVhM2ViZmE4OWRlXCI+e1wiaW5w\ndXRzXCI6e30sXCJwb3N0X3Byb2Nlc3Npbmdfc2NyaXB0XCI6XCIjIHsndmVyc2lvbic6ICcxLjAn\nLCAnc3VjY2Vzcyc6IFRydWUsICdyZWFzb24nOiBOb25lLCAnY29udGVudCc6IHsnbnVtX29mX3N5\nbmNfaW5jaWRlbnRzJzogMn0sICdyYXcnOiAne1xcXCJudW1fb2Zfc3luY19pbmNpZGVudHNcXFwi\nOiAyfScsICdpbnB1dHMnOiB7J2RmX21heF9pbmNpZGVudF9pZCc6IE5vbmUsICdkZl9taW5faW5j\naWRlbnRfaWQnOiAwfSwgJ21ldHJpY3MnOiB7J3ZlcnNpb24nOiAnMS4wJywgJ3BhY2thZ2UnOiAn\ndW5rbm93bicsICdwYWNrYWdlX3ZlcnNpb24nOiAndW5rbm93bicsICdob3N0JzogJ01hcmtzLU1C\nUC5maW9zLXJvdXRlci5ob21lJywgJ2V4ZWN1dGlvbl90aW1lX21zJzogMjA2MiwgJ3RpbWVzdGFt\ncCc6ICcyMDE5LTA1LTE0IDIxOjM3OjA1J319XFxuaW5jaWRlbnQuYWRkTm90ZShcXFwiRGF0YSBG\nZWVkZXIgU3luY1xcXFxuTWluOiB7fSBNYXg6IHt9XFxcXG5JbmNpZGVudHMgU3luYydkOiB7fVxc\nXCIuZm9ybWF0KFxcbiAgICAgICByZXN1bHRzWydpbnB1dHMnXVsnZGZfbWluX2luY2lkZW50X2lk\nJ10sIFxcbiAgICAgICByZXN1bHRzWydpbnB1dHMnXVsnZGZfbWF4X2luY2lkZW50X2lkJ10sXFxu\nICAgICAgIHJlc3VsdHNbJ2NvbnRlbnQnXVsnbnVtX29mX3N5bmNfaW5jaWRlbnRzJ10pKVwiLFwi\ncHJlX3Byb2Nlc3Npbmdfc2NyaXB0XCI6XCJ0cnk6XFxuICBpbnB1dHMuZGZfbWluX2luY2lkZW50\nX2lkID0gcnVsZS5wcm9wZXJ0aWVzLmRhdGFfZmVlZGVyX21pbmltdW1faW5jaWRlbnRfaWRcXG4g\nIGlucHV0cy5kZl9tYXhfaW5jaWRlbnRfaWQgPSBydWxlLnByb3BlcnRpZXMuZGF0YV9mZWVkZXJf\nbWF4aW11bV9pbmNpZGVudF9pZFxcbiAgaW5wdXRzLmRmX3F1ZXJ5X2FwaV9tZXRob2QgPSBydWxl\nLnByb3BlcnRpZXMucXVlcnlfYXBpX21ldGhvZFxcbmV4Y2VwdDpcXG4gIGhlbHBlci5mYWlsKFxc\nXCJUaGlzIHZlcnNpb24gb2YgUmVzaWxpZW50IGNhbm5vdCB1c2UgdGhpcyBmdW5jdGlvblxcXCIp\nXFxuICBcIixcInJlc3VsdF9uYW1lXCI6XCJcIn08L3Jlc2lsaWVudDpmdW5jdGlvbj48L2V4dGVu\nc2lvbkVsZW1lbnRzPjxpbmNvbWluZz5TZXF1ZW5jZUZsb3dfMWd2bG52ZzwvaW5jb21pbmc+PG91\ndGdvaW5nPlNlcXVlbmNlRmxvd18xZzdkNjk3PC9vdXRnb2luZz48L3NlcnZpY2VUYXNrPjxzZXF1\nZW5jZUZsb3cgaWQ9XCJTZXF1ZW5jZUZsb3dfMWd2bG52Z1wiIHNvdXJjZVJlZj1cIlN0YXJ0RXZl\nbnRfMTU1YXN4bVwiIHRhcmdldFJlZj1cIlNlcnZpY2VUYXNrXzB5b2Y3aGlcIi8+PGVuZEV2ZW50\nIGlkPVwiRW5kRXZlbnRfMXZndzE4ZlwiPjxpbmNvbWluZz5TZXF1ZW5jZUZsb3dfMWc3ZDY5Nzwv\naW5jb21pbmc+PC9lbmRFdmVudD48c2VxdWVuY2VGbG93IGlkPVwiU2VxdWVuY2VGbG93XzFnN2Q2\nOTdcIiBzb3VyY2VSZWY9XCJTZXJ2aWNlVGFza18weW9mN2hpXCIgdGFyZ2V0UmVmPVwiRW5kRXZl\nbnRfMXZndzE4ZlwiLz48dGV4dEFubm90YXRpb24gaWQ9XCJUZXh0QW5ub3RhdGlvbl8xa3h4aXl0\nXCI+PHRleHQ+U3RhcnQgeW91ciB3b3JrZmxvdyBoZXJlPC90ZXh0PjwvdGV4dEFubm90YXRpb24+\nPGFzc29jaWF0aW9uIGlkPVwiQXNzb2NpYXRpb25fMXNldWo0OFwiIHNvdXJjZVJlZj1cIlN0YXJ0\nRXZlbnRfMTU1YXN4bVwiIHRhcmdldFJlZj1cIlRleHRBbm5vdGF0aW9uXzFreHhpeXRcIi8+PHRl\neHRBbm5vdGF0aW9uIGlkPVwiVGV4dEFubm90YXRpb25fMDMzMzRjYlwiPjx0ZXh0PkNyZWF0ZXMg\nYW4gaW5jaWRlbnQgbm90ZSB3aXRoIG51bWJlciBvZiBpbmNpZGVudHMgc3luY2hyb25pemVkPC90\nZXh0PjwvdGV4dEFubm90YXRpb24+PGFzc29jaWF0aW9uIGlkPVwiQXNzb2NpYXRpb25fMDd0YnV6\na1wiIHNvdXJjZVJlZj1cIlNlcnZpY2VUYXNrXzB5b2Y3aGlcIiB0YXJnZXRSZWY9XCJUZXh0QW5u\nb3RhdGlvbl8wMzMzNGNiXCIvPjx0ZXh0QW5ub3RhdGlvbiBpZD1cIlRleHRBbm5vdGF0aW9uXzFs\ndmJ2NjJcIj48dGV4dD5JbnB1dCBmcm9tIFJ1bGUgYWN0aXZpdHkgZmllbGRzPC90ZXh0PjwvdGV4\ndEFubm90YXRpb24+PGFzc29jaWF0aW9uIGlkPVwiQXNzb2NpYXRpb25fMG5paXp3ZFwiIHNvdXJj\nZVJlZj1cIlNlcnZpY2VUYXNrXzB5b2Y3aGlcIiB0YXJnZXRSZWY9XCJUZXh0QW5ub3RhdGlvbl8x\nbHZidjYyXCIvPjwvcHJvY2Vzcz48YnBtbmRpOkJQTU5EaWFncmFtIGlkPVwiQlBNTkRpYWdyYW1f\nMVwiPjxicG1uZGk6QlBNTlBsYW5lIGJwbW5FbGVtZW50PVwidW5kZWZpbmVkXCIgaWQ9XCJCUE1O\nUGxhbmVfMVwiPjxicG1uZGk6QlBNTlNoYXBlIGJwbW5FbGVtZW50PVwiU3RhcnRFdmVudF8xNTVh\nc3htXCIgaWQ9XCJTdGFydEV2ZW50XzE1NWFzeG1fZGlcIj48b21nZGM6Qm91bmRzIGhlaWdodD1c\nIjM2XCIgd2lkdGg9XCIzNlwiIHg9XCIxNjJcIiB5PVwiMTg4XCIvPjxicG1uZGk6QlBNTkxhYmVs\nPjxvbWdkYzpCb3VuZHMgaGVpZ2h0PVwiMFwiIHdpZHRoPVwiOTBcIiB4PVwiMTU3XCIgeT1cIjIy\nM1wiLz48L2JwbW5kaTpCUE1OTGFiZWw+PC9icG1uZGk6QlBNTlNoYXBlPjxicG1uZGk6QlBNTlNo\nYXBlIGJwbW5FbGVtZW50PVwiVGV4dEFubm90YXRpb25fMWt4eGl5dFwiIGlkPVwiVGV4dEFubm90\nYXRpb25fMWt4eGl5dF9kaVwiPjxvbWdkYzpCb3VuZHMgaGVpZ2h0PVwiMzBcIiB3aWR0aD1cIjEw\nMFwiIHg9XCI5OVwiIHk9XCIyNTRcIi8+PC9icG1uZGk6QlBNTlNoYXBlPjxicG1uZGk6QlBNTkVk\nZ2UgYnBtbkVsZW1lbnQ9XCJBc3NvY2lhdGlvbl8xc2V1ajQ4XCIgaWQ9XCJBc3NvY2lhdGlvbl8x\nc2V1ajQ4X2RpXCI+PG9tZ2RpOndheXBvaW50IHg9XCIxNjlcIiB4c2k6dHlwZT1cIm9tZ2RjOlBv\naW50XCIgeT1cIjIyMFwiLz48b21nZGk6d2F5cG9pbnQgeD1cIjE1M1wiIHhzaTp0eXBlPVwib21n\nZGM6UG9pbnRcIiB5PVwiMjU0XCIvPjwvYnBtbmRpOkJQTU5FZGdlPjxicG1uZGk6QlBNTlNoYXBl\nIGJwbW5FbGVtZW50PVwiU2VydmljZVRhc2tfMHlvZjdoaVwiIGlkPVwiU2VydmljZVRhc2tfMHlv\nZjdoaV9kaVwiPjxvbWdkYzpCb3VuZHMgaGVpZ2h0PVwiODBcIiB3aWR0aD1cIjEwMFwiIHg9XCIy\nOTFcIiB5PVwiMTY2XCIvPjwvYnBtbmRpOkJQTU5TaGFwZT48YnBtbmRpOkJQTU5FZGdlIGJwbW5F\nbGVtZW50PVwiU2VxdWVuY2VGbG93XzFndmxudmdcIiBpZD1cIlNlcXVlbmNlRmxvd18xZ3ZsbnZn\nX2RpXCI+PG9tZ2RpOndheXBvaW50IHg9XCIxOThcIiB4c2k6dHlwZT1cIm9tZ2RjOlBvaW50XCIg\neT1cIjIwNlwiLz48b21nZGk6d2F5cG9pbnQgeD1cIjI5MVwiIHhzaTp0eXBlPVwib21nZGM6UG9p\nbnRcIiB5PVwiMjA2XCIvPjxicG1uZGk6QlBNTkxhYmVsPjxvbWdkYzpCb3VuZHMgaGVpZ2h0PVwi\nMTNcIiB3aWR0aD1cIjBcIiB4PVwiMjQ0LjVcIiB5PVwiMTg0XCIvPjwvYnBtbmRpOkJQTU5MYWJl\nbD48L2JwbW5kaTpCUE1ORWRnZT48YnBtbmRpOkJQTU5TaGFwZSBicG1uRWxlbWVudD1cIkVuZEV2\nZW50XzF2Z3cxOGZcIiBpZD1cIkVuZEV2ZW50XzF2Z3cxOGZfZGlcIj48b21nZGM6Qm91bmRzIGhl\naWdodD1cIjM2XCIgd2lkdGg9XCIzNlwiIHg9XCI0NjguNTI3MTY0Njg1OTA4M1wiIHk9XCIxODhc\nIi8+PGJwbW5kaTpCUE1OTGFiZWw+PG9tZ2RjOkJvdW5kcyBoZWlnaHQ9XCIxM1wiIHdpZHRoPVwi\nMFwiIHg9XCI0ODYuNTI3MTY0Njg1OTA4M1wiIHk9XCIyMjdcIi8+PC9icG1uZGk6QlBNTkxhYmVs\nPjwvYnBtbmRpOkJQTU5TaGFwZT48YnBtbmRpOkJQTU5FZGdlIGJwbW5FbGVtZW50PVwiU2VxdWVu\nY2VGbG93XzFnN2Q2OTdcIiBpZD1cIlNlcXVlbmNlRmxvd18xZzdkNjk3X2RpXCI+PG9tZ2RpOndh\neXBvaW50IHg9XCIzOTFcIiB4c2k6dHlwZT1cIm9tZ2RjOlBvaW50XCIgeT1cIjIwNlwiLz48b21n\nZGk6d2F5cG9pbnQgeD1cIjQ2OVwiIHhzaTp0eXBlPVwib21nZGM6UG9pbnRcIiB5PVwiMjA2XCIv\nPjxicG1uZGk6QlBNTkxhYmVsPjxvbWdkYzpCb3VuZHMgaGVpZ2h0PVwiMTNcIiB3aWR0aD1cIjBc\nIiB4PVwiNDMwXCIgeT1cIjE4NFwiLz48L2JwbW5kaTpCUE1OTGFiZWw+PC9icG1uZGk6QlBNTkVk\nZ2U+PGJwbW5kaTpCUE1OU2hhcGUgYnBtbkVsZW1lbnQ9XCJUZXh0QW5ub3RhdGlvbl8wMzMzNGNi\nXCIgaWQ9XCJUZXh0QW5ub3RhdGlvbl8wMzMzNGNiX2RpXCI+PG9tZ2RjOkJvdW5kcyBoZWlnaHQ9\nXCI1OVwiIHdpZHRoPVwiMTc2XCIgeD1cIjM4NFwiIHk9XCI2N1wiLz48L2JwbW5kaTpCUE1OU2hh\ncGU+PGJwbW5kaTpCUE1ORWRnZSBicG1uRWxlbWVudD1cIkFzc29jaWF0aW9uXzA3dGJ1emtcIiBp\nZD1cIkFzc29jaWF0aW9uXzA3dGJ1emtfZGlcIj48b21nZGk6d2F5cG9pbnQgeD1cIjM4NVwiIHhz\naTp0eXBlPVwib21nZGM6UG9pbnRcIiB5PVwiMTcwXCIvPjxvbWdkaTp3YXlwb2ludCB4PVwiNDM4\nXCIgeHNpOnR5cGU9XCJvbWdkYzpQb2ludFwiIHk9XCIxMjZcIi8+PC9icG1uZGk6QlBNTkVkZ2U+\nPGJwbW5kaTpCUE1OU2hhcGUgYnBtbkVsZW1lbnQ9XCJUZXh0QW5ub3RhdGlvbl8xbHZidjYyXCIg\naWQ9XCJUZXh0QW5ub3RhdGlvbl8xbHZidjYyX2RpXCI+PG9tZ2RjOkJvdW5kcyBoZWlnaHQ9XCI0\nNlwiIHdpZHRoPVwiMTMzXCIgeD1cIjE0NVwiIHk9XCI4MlwiLz48L2JwbW5kaTpCUE1OU2hhcGU+\nPGJwbW5kaTpCUE1ORWRnZSBicG1uRWxlbWVudD1cIkFzc29jaWF0aW9uXzBuaWl6d2RcIiBpZD1c\nIkFzc29jaWF0aW9uXzBuaWl6d2RfZGlcIj48b21nZGk6d2F5cG9pbnQgeD1cIjI5NlwiIHhzaTp0\neXBlPVwib21nZGM6UG9pbnRcIiB5PVwiMTcxXCIvPjxvbWdkaTp3YXlwb2ludCB4PVwiMjQxXCIg\neHNpOnR5cGU9XCJvbWdkYzpQb2ludFwiIHk9XCIxMjhcIi8+PC9icG1uZGk6QlBNTkVkZ2U+PC9i\ncG1uZGk6QlBNTlBsYW5lPjwvYnBtbmRpOkJQTU5EaWFncmFtPjwvZGVmaW5pdGlvbnM+In0sICJj\nb250ZW50X3ZlcnNpb24iOiAxLCAiY3JlYXRvcl9pZCI6ICJlYjJkMWY3ZC02NjUxLTQxNWEtYjRm\nZi1hMTRmY2QyZjg0ZjUiLCAiZGVzY3JpcHRpb24iOiAiU3luY2hyb25pemUgSW5jaWRlbnQocykg\nYW5kIHRoZWlyIGFzc29jaWF0ZWQgdGFza3MsIG5vdGVzLCBhdHRhY2htZW50cywgYXJ0aWZhY3Rz\nLCBtaWxlc3RvbmVzIGFuZCBhc3NvY2lhdGVkIGRhdGF0YWJsZXMiLCAiZXhwb3J0X2tleSI6ICJk\nYXRhX2ZlZWRlcl9zeW5jX2luY2lkZW50cyIsICJsYXN0X21vZGlmaWVkX2J5IjogImViMmQxZjdk\nLTY2NTEtNDE1YS1iNGZmLWExNGZjZDJmODRmNSIsICJsYXN0X21vZGlmaWVkX3RpbWUiOiAxNTc3\nMjQwMTI5MjM5LCAibmFtZSI6ICJEYXRhIEZlZWRlcjogU3luYyBJbmNpZGVudHMiLCAib2JqZWN0\nX3R5cGUiOiAiaW5jaWRlbnQiLCAicHJvZ3JhbW1hdGljX25hbWUiOiAiZGF0YV9mZWVkZXJfc3lu\nY19pbmNpZGVudHMiLCAidGFncyI6IFtdLCAidXVpZCI6ICI0MzM3MDZhNS0yYjYxLTQ4ZDgtOWIx\nMy1iNDI0NjJhNGU5MDkiLCAid29ya2Zsb3dfaWQiOiAxfV0sICJ3b3Jrc3BhY2VzIjogW119\n\"\"\")", "title": "" }, { "docid": "1fdb3d01c925e42cf2d00b7ce9985f9d", "score": "0.48970267", "text": "def make_javascript_string(text):\n\n text = string.replace(text, \"\\\\\", r\"\\\\\")\n text = string.replace(text, \"'\", r\"\\'\")\n text = string.replace(text, \"\\\"\", r'\\\"')\n text = string.replace(text, \"\\n\", r\"\\n\")\n # Escape less-than signs so browsers don't look for HTML tags\n # inside the literal. \n text = string.replace(text, \"<\", r\"\\074\")\n return \"'\" + text + \"'\"", "title": "" }, { "docid": "36e0395beae8c531bb7fd145274d3943", "score": "0.48885635", "text": "def jformat(obj):\n return datamanager.Json(pjvalue(obj))", "title": "" }, { "docid": "fc6daa4427aa035ac472f37c6eafb438", "score": "0.48589146", "text": "def thg_encode(self, args):\n arg_mensage = args.split(\" \")\n if arg_mensage[0] == \"\":\n print(\n \"\"\"suporte encode:\n\n Este módulo fornece funções para codificar dados binários em caracteres ASCII \n imprimíveis e decodificar essas codificações de volta para dados binários.\n Ele fornece funções de codificação e decodificação para as codificações \n especificadas em RFC 3548 ,que define os algoritmos Base16, Base32 e Base64,\n e para as codificações Ascii85 e Base85 padrão de fato.\n\n a2b_uu\n b2a_uu\n a2b_base64\n b2a_base64\n a2b_qp\n b2a_qp\n a2b_hqx\n rledecode_hqx\n rlecode_hqx\n b2a_hqx\n crc_hqx\n crc32\n b2a_hex\n a2b_hex\n hexlify\n unhexlify\n Charcode\n binary\n base62\n basen\n bcd\n ur\n unicode_normalize\n qp_encoding\n encode type[2,16,32,64] str\n\n \"\"\".format(\n YELLOW=Fore.YELLOW, BLUE=Fore.BLUE, RED=Fore.RED\n )\n )\n\n elif arg_mensage[0] == \"64\":\n arg_mensage[1] = arg_mensage[1].encode(\"ascii\")\n base64_bytes = base64.b64encode(arg_mensage[1])\n by_to_st(base64_bytes)\n elif arg_mensage[0] == \"32\":\n arg_mensage[1] = arg_mensage[1].encode(\"ascii\")\n b32encode_bytes = base64.b32encode(arg_mensage[1])\n by_to_st(b32encode_bytes)\n elif arg_mensage[0] == \"16\":\n arg_mensage[1] = arg_mensage[1].encode(\"ascii\")\n b16encode_bytes = base64.b16encode(arg_mensage[1])\n by_to_st(b16encode_bytes)\n elif arg_mensage[0] == \"a85encode\":\n arg_mensage[1] = arg_mensage[1].encode(\"ascii\")\n a85encode_bytes = base64.a85encode(arg_mensage[1])\n by_to_st(a85encode_bytes)\n elif arg_mensage[0] == \"b85encode\":\n arg_mensage[1] = arg_mensage[1].encode(\"ascii\")\n b85encode_bytes = base64.b85encode(arg_mensage[1])\n by_to_st(b85encode_bytes)\n elif arg_mensage[0] == \"a2b_uu\":\n if arg_mensage[1] == \"help\":\n print(\n \"\"\"{YELLOW}a2b_uu{YELLOW}{BLUE} =>{BLUE}{RED} Converta uma única linha de dados uuencodificados de volta em binários e retorne os dados binários. As linhas normalmente contêm 45 bytes (binários), exceto a última linha. Os dados da linha podem ser seguidos de espaços em branco.\"\"\".format(\n YELLOW=Fore.YELLOW, BLUE=Fore.BLUE, RED=Fore.RED\n )\n )\n else:\n by_to_st((binascii.a2b_uu(arg_mensage[1])))\n elif arg_mensage[0] == \"a2b_base64\":\n if arg_mensage[1] == \"help\":\n print(\n \"\"\"{YELLOW}a2b_uu{YELLOW}{BLUE} =>{BLUE}{RED}Converta dados binários em uma linha de caracteres ASCII na codificação base64. O valor de retorno é a linha convertida, incluindo um caractere de nova linha. O comprimento dos dados deve ser de no máximo 57 para aderir ao padrão base64.\"\"\".format(\n YELLOW=Fore.YELLOW, BLUE=Fore.BLUE, RED=Fore.RED\n )\n )\n else:\n by_to_st(binascii.a2b_base64(arg_mensage[1]))\n elif arg_mensage[0] == \"b2a_base64\":\n if arg_mensage[1] == \"help\":\n print(\n \"\"\"{YELLOW}a2b_uu{YELLOW}{BLUE} =>{BLUE}{RED} Converta dados binários em uma linha de caracteres ASCII na codificação base64. O valor de retorno é a linha convertida, incluindo um caractere de nova linha. O comprimento dos dados deve ser de no máximo 57 para aderir ao padrão base64.\"\"\".format(\n YELLOW=Fore.YELLOW, BLUE=Fore.BLUE, RED=Fore.RED\n )\n )\n else:\n by_to_st(binascii.b2a_base64(b\"arg_mensage[1]\"))\n elif arg_mensage[0] == \"a2b_qp\":\n if arg_mensage[1] == \"help\":\n print(\n \"\"\"{YELLOW}a2b_uu{YELLOW}{BLUE} =>{BLUE}{RED}Converta um bloco de dados imprimíveis entre aspas de volta em binários e retorne os dados binários. Mais de uma linha pode ser passada por vez. Se o cabeçalho do argumento opcional estiver presente e verdadeiro, os sublinhados serão decodificados como espaços.\"\"\".format(\n YELLOW=Fore.YELLOW, BLUE=Fore.BLUE, RED=Fore.RED\n )\n )\n else:\n by_to_st(binascii.a2b_qp(arg_mensage[1]))\n elif arg_mensage[0] == \"b2a_qp\":\n if arg_mensage[1] == \"help\":\n print(\n \"\"\"{YELLOW}a2b_uu{YELLOW}{BLUE} =>{BLUE}{RED}Converta dados binários em uma (s) linha (s) de caracteres ASCII em codificação imprimível entre aspas. O valor de retorno é a (s) linha (s) convertida (s). Se o argumento opcional quotetabs estiver presente e verdadeiro, todas as tabulações e espaços serão codificados. Se o argumento opcional istext estiver presente e verdadeiro, as novas linhas não serão codificadas, mas os espaços em branco finais serão codificados. Se o cabeçalho do argumento opcional estiver presente e verdadeiro, os espaços serão codificados como sublinhados de acordo com RFC1522. Se o cabeçalho do argumento opcional estiver presente e for falso, os caracteres de nova linha também serão codificados; caso contrário, a conversão de alimentação de linha pode corromper o fluxo de dados binários.\"\"\".format(\n YELLOW=Fore.YELLOW, BLUE=Fore.BLUE, RED=Fore.RED\n )\n )\n else:\n by_to_st(binascii.a2b_qp(arg_mensage[1].encode()))\n elif arg_mensage[0] == \"a2b_hqx\":\n if arg_mensage[1] == \"help\":\n print(\n \"\"\"{YELLOW}a2b_uu{YELLOW}{BLUE} =>{BLUE}{RED}Converta dados ASCII formatados de binhex4 em binários, sem fazer a descompressão RLE. A string deve conter um número completo de bytes binários ou (no caso da última parte dos dados binhex4) ter os bits restantes zero.\n \"\"\".format(\n YELLOW=Fore.YELLOW, BLUE=Fore.BLUE, RED=Fore.RED\n )\n )\n else:\n by_to_st(binascii.a2b_hqx(arg_mensage[1]))\n elif arg_mensage[0] == \"rledecode_hqx\":\n if arg_mensage[1] == \"help\":\n print(\n \"\"\"{YELLOW}a2b_uu{YELLOW}{BLUE} =>{BLUE}{RED} Execute a descompressão RLE nos dados, de acordo com o padrão binhex4. O algoritmo usa 0x90 após um byte como um indicador de repetição, seguido por uma contagem. Uma contagem de 0 especifica um valor de byte de 0x90 . A rotina retorna os dados descompactados, a menos que os dados de entrada de dados terminem em um indicador de repetição órfão, caso em que a exceção Incompleta é levantada.\"\"\".format(\n YELLOW=Fore.YELLOW, BLUE=Fore.BLUE, RED=Fore.RED\n )\n )\n else:\n by_to_st((binascii.rledecode_hqx(arg_mensage[1].encode())))\n elif arg_mensage[0] == \"rlecode_hqx\":\n if arg_mensage[1] == \"help\":\n print(\n \"\"\"{YELLOW}a2b_uu{YELLOW}{BLUE} =>{BLUE}{RED} Execute a compactação RLE no estilo binhex4 nos dados e retorne o resultado.\"\"\".format(\n YELLOW=Fore.YELLOW, BLUE=Fore.BLUE, RED=Fore.RED\n )\n )\n else:\n by_to_st((binascii.rlecode_hqx(arg_mensage[1].encode())))\n elif arg_mensage[0] == \"b2a_hqx\":\n if arg_mensage[1] == \"help\":\n print(\n \"\"\"{YELLOW}a2b_uu{YELLOW}{BLUE} =>{BLUE}{RED} Execute a conversão hexbin4 binário para ASCII e retorne a string resultante. O argumento já deve ser codificado por RLE e ter um comprimento divisível por 3 (exceto possivelmente o último fragmento).\n \"\"\".format(\n YELLOW=Fore.YELLOW, BLUE=Fore.BLUE, RED=Fore.RED\n )\n )\n else:\n by_to_st((binascii.b2a_hqx(arg_mensage[1].encode())))\n elif arg_mensage[0] == \"crc_hqx\":\n if arg_mensage[1] == \"help\":\n print(\n \"\"\"{YELLOW}a2b_uu{YELLOW}{BLUE} =>{BLUE}{RED} Calcule o valor binhex4 crc dos dados , começando com um crc inicial e retornando o resultado.\n \"\"\".format(\n YELLOW=Fore.YELLOW, BLUE=Fore.BLUE, RED=Fore.RED\n )\n )\n else:\n by_to_st(\n (binascii.crc_hqx(arg_mensage[1].encode(), int(arg_mensage[2])))\n )\n elif arg_mensage[0] == \"crc32\":\n if arg_mensage[1] == \"help\":\n print(\n \"\"\"{YELLOW}a2b_uu{YELLOW}{BLUE} =>{BLUE}{RED} Calcule CRC-32, a soma de verificação de dados de \n 32 bits, começando com um crc inicial. Isso é consistente com a soma de verificação do arquivo ZIP. \n Uma vez que o algoritmo é projetado para uso como um algoritmo de soma de verificação, não é adequado \n para uso como um algoritmo de hash geral. \n\n {YELLOW}Nota{YELLOW}{RED} Para gerar o mesmo valor numérico em todas as versões e plataformas Python, {RED}{BLUE}use crc32 (dados) & 0xffffffff{BLUE}{RED}. Se você estiver usando apenas a soma de verificação no formato binário compactado, isso não é necessário, pois o valor de retorno é a representação binária correta de 32 bits, independentemente do sinal.\n \"\"\".format(\n YELLOW=Fore.YELLOW, BLUE=Fore.BLUE, RED=Fore.RED\n )\n )\n else:\n by_to_st((binascii.crc32(arg_mensage[1].encode())))\n elif arg_mensage[0] == \"hexlify\":\n if arg_mensage[1] == \"help\":\n print(\n \"\"\"{YELLOW}a2b_uu{YELLOW}{BLUE} =>{BLUE}{RED} Retorna a representação hexadecimal dos dados \n binários . Cada byte de dados é convertido na representação hexadecimal de 2 dígitos correspondente. \n A string resultante é, portanto, o dobro do comprimento dos dados . \n\n \"\"\".format(\n YELLOW=Fore.YELLOW, BLUE=Fore.BLUE, RED=Fore.RED\n )\n )\n else:\n by_to_st(\n (binascii.hexlify(arg_mensage[1].encode(), arg_mensage[2].encode()))\n )\n elif arg_mensage[0] == \"b2a_hex\":\n if arg_mensage[1] == \"help\":\n print(\n \"\"\"{YELLOW}a2b_uu{YELLOW}{BLUE} =>{BLUE}{RED} hex\n \"\"\".format(\n YELLOW=Fore.YELLOW, BLUE=Fore.BLUE, RED=Fore.RED\n )\n )\n else:\n by_to_st(\n (binascii.b2a_hex(arg_mensage[1].encode(), int(arg_mensage[2])))\n )\n elif arg_mensage[0] == \"unhexlify\":\n if arg_mensage[1] == \"help\":\n print(\n \"\"\"{YELLOW}a2b_uu{YELLOW}{BLUE} =>{BLUE}{RED} Retorna os dados binários representados pela string hexadecimal hexstr . Esta função é o inverso de b2a_hex () . hexstr deve conter um número par de dígitos hexadecimais (que podem ser maiúsculas ou minúsculas), caso contrário, um TypeError é gerado.\n\n \"\"\".format(\n YELLOW=Fore.YELLOW, BLUE=Fore.BLUE, RED=Fore.RED\n )\n )\n else:\n by_to_st((binascii.unhexlify(arg_mensage[1].encode())))\n elif arg_mensage[0] == \"b2a_uu\":\n if arg_mensage[1] == \"help\":\n print(\n \"\"\"{YELLOW}a2b_uu{YELLOW}{BLUE} =>{BLUE}{RED} Converta dados binários em uma linha de caracteres ASCII, o valor de retorno é a linha convertida, incluindo um caractere de nova linha. O comprimento dos dados deve ser de no máximo 45.\n\n \"\"\".format(\n YELLOW=Fore.YELLOW, BLUE=Fore.BLUE, RED=Fore.RED\n )\n )\n else:\n by_to_st(\n (binascii.b2a_uu(arg_mensage[1].encode(), int(arg_mensage[2])))\n )\n elif arg_mensage[0] == \"charcode\":\n if arg_mensage[1] == \"help\":\n print(\n \"\"\"{YELLOW}charcode{YELLOW}{BLUE} =>{BLUE}{RED}converte string em charcode\n \"\"\".format(\n YELLOW=Fore.YELLOW, BLUE=Fore.BLUE, RED=Fore.RED\n )\n )\n else:\n print(ord(arg_mensage[1].encode()))\n elif arg_mensage[0] == \"binary\":\n if arg_mensage[1] == \"help\":\n print(\n \"\"\"{YELLOW}binary{YELLOW}{BLUE} =>{BLUE}{RED}converte string em binary\n \"\"\".format(\n YELLOW=Fore.YELLOW, BLUE=Fore.BLUE, RED=Fore.RED\n )\n )\n else:\n print(\" \".join(format(ord(x), \"b\") for x in arg_mensage[1]))\n elif arg_mensage[0] == \"base62\":\n if arg_mensage[1] == \"help\":\n print(\n \"\"\"{YELLOW}base62{YELLOW}{BLUE} =>{BLUE}{RED}converte string em base62\n \"\"\".format(\n YELLOW=Fore.YELLOW, BLUE=Fore.BLUE, RED=Fore.RED\n )\n )\n else:\n print(decode62(arg_mensage[1]))\n elif arg_mensage[0] == \"basen\":\n if arg_mensage[1] == \"help\":\n print(\n \"\"\"{YELLOW}basen{YELLOW}{BLUE} =>{BLUE}{RED}converte decimal em basen\n \"\"\".format(\n YELLOW=Fore.YELLOW, BLUE=Fore.BLUE, RED=Fore.RED\n )\n )\n else:\n print(numpy.base_repr(int(arg_mensage[1]), base=int(arg_mensage[2])))\n elif arg_mensage[0] == \"url\":\n try:\n if arg_mensage[1] == \"help\":\n print(\n \"\"\"{YELLOW}url_encode{YELLOW}{BLUE} =>{BLUE}{RED}encode personalidado para url\\nencode url_encode safa[] encoding\"\"\".format(\n YELLOW=Fore.YELLOW, BLUE=Fore.BLUE, RED=Fore.RED\n )\n )\n else:\n print(\n quote(\n arg_mensage[1], safe=arg_mensage[2], encoding=arg_mensage[3]\n )\n )\n except IndexError:\n print(\n \"digite a sintaxe correta\\nncode url_encode safa[] encoding\\n ou use o comando help\"\n )\n elif arg_mensage[0] == \"unicode_normalize\":\n try:\n if arg_mensage[1] == \"help\":\n print(\n \"\"\"{YELLOW}unicode_normalize{YELLOW}{BLUE} =>{BLUE}{RED}Transforme caracteres Unicode em uma das formas de normalização['NFC', 'NFKC', 'NFD','NFKD']\\n \n {YELLOW}NFD{YELLOW}{BLUE} =>{BLUE}{RED}Normalisation Form Canonical Decomposition\n {YELLOW}NFC{YELLOW}{BLUE} =>{BLUE}{RED}Normalisation Form Canonical Composition\n {YELLOW}NFKD{YELLOW}{BLUE} =>{BLUE}{RED}Normalisation Form Compatibility Decomposition\n {YELLOW}NFKC{YELLOW}{BLUE} =>{BLUE}{RED}Normalisation Form Compatibility Composition \n encode unicode_normalize str encoding['NFC', 'NFKC', 'NFD','NFKD']\\n\"\"\".format(\n YELLOW=Fore.YELLOW, BLUE=Fore.BLUE, RED=Fore.RED\n )\n )\n else:\n print(unicodedata.normalize(arg_mensage[1], arg_mensage[2]))\n except IndexError:\n print(\n \"digite a sintaxe correta\\nncode url_encode safa[] encoding\\n ou use o comando help\"\n )\n elif arg_mensage[0] == \"qp_encoding\":\n try:\n if arg_mensage[1] == \"help\":\n print(\n \"\"\"{YELLOW}qp_encoding{YELLOW}{BLUE} =>{BLUE}{RED}\n Quoted-Printable, ou QP encoding, \n é uma codificação que usa caracteres ASCII imprimíveis (alfanuméricos e o sinal de igual '=') \n para transmitir dados de 8 bits em um caminho de dados de 7 bits ou, geralmente, em um meio que não é 8- um pouco limpo. \n É definido como uma codificação de transferência de conteúdo MIME para uso em e-mail.\n QP funciona usando o sinal de igual '=' como um caractere de escape. Ele também limita o comprimento da linha a 76, pois alguns softwares têm limites no comprimento da linha\\nencode qp_encoding TXT encode\"\"\".format(\n YELLOW=Fore.YELLOW, BLUE=Fore.BLUE, RED=Fore.RED\n )\n )\n else:\n encoded = quopri.encodestring(arg_mensage[1].encode(arg_mensage[2]))\n print(encoded.decode())\n except IndexError:\n print(\n \"digite a sintaxe correta\\nencode qp_encoding é utf-16\\n ou use o comando help\"\n )\n elif arg_mensage[0] == \"idna\":\n try:\n if arg_mensage[1] == \"help\":\n print(\n \"\"\"{YELLOW}idna{YELLOW}{BLUE} =>{BLUE}{RED}encode personalidado para url\\nencode url_encode safa[] encoding\"\"\".format(\n YELLOW=Fore.YELLOW, BLUE=Fore.BLUE, RED=Fore.RED\n )\n )\n else:\n print(idna.encode(arg_mensage[1]).decode(arg_mensage[2]))\n except IndexError:\n print(\n \"digite a sintaxe correta\\nncode idna string encoding\\n ou use o comando help\"\n )\n\n else:\n pass\n try:\n pass\n\n except IndexError:\n print(\"verificar a saida\")", "title": "" }, { "docid": "d69cb165f6e7120da54e572059acfe7a", "score": "0.48557425", "text": "def customization_data(client=None):\n\n yield ImportDefinition(u\"\"\"\neyJhY3Rpb25fb3JkZXIiOiBbXSwgImFjdGlvbnMiOiBbeyJhdXRvbWF0aW9ucyI6IFtdLCAiY29u\nZGl0aW9ucyI6IFt7ImV2YWx1YXRpb25faWQiOiBudWxsLCAiZmllbGRfbmFtZSI6ICJhcnRpZmFj\ndC50eXBlIiwgIm1ldGhvZCI6ICJlcXVhbHMiLCAidHlwZSI6IG51bGwsICJ2YWx1ZSI6ICJTdHJp\nbmcifV0sICJlbmFibGVkIjogdHJ1ZSwgImV4cG9ydF9rZXkiOiAiRXhhbXBsZTogRWxhc3RpY1Nl\nYXJjaCBRdWVyeSBmcm9tIEFydGlmYWN0IiwgImlkIjogMzksICJsb2dpY190eXBlIjogImFsbCIs\nICJtZXNzYWdlX2Rlc3RpbmF0aW9ucyI6IFtdLCAibmFtZSI6ICJFeGFtcGxlOiBFbGFzdGljU2Vh\ncmNoIFF1ZXJ5IGZyb20gQXJ0aWZhY3QiLCAib2JqZWN0X3R5cGUiOiAiYXJ0aWZhY3QiLCAidGFn\ncyI6IFtdLCAidGltZW91dF9zZWNvbmRzIjogODY0MDAsICJ0eXBlIjogMSwgInV1aWQiOiAiMWQw\nZDUwZTMtMmJmYy00NWRlLTkwZWMtZjJmNDRhNzE5OWY0IiwgInZpZXdfaXRlbXMiOiBbXSwgIndv\ncmtmbG93cyI6IFsiZXhhbXBsZV9lbGFzdGljc2VhcmNoX3F1ZXJ5X2Zyb21fYXJ0aWZhY3QiXX0s\nIHsiYXV0b21hdGlvbnMiOiBbXSwgImNvbmRpdGlvbnMiOiBbXSwgImVuYWJsZWQiOiB0cnVlLCAi\nZXhwb3J0X2tleSI6ICJFeGFtcGxlOiBFbGFzdGljU2VhcmNoIFF1ZXJ5IGZyb20gSW5jaWRlbnQi\nLCAiaWQiOiA0MCwgImxvZ2ljX3R5cGUiOiAiYWxsIiwgIm1lc3NhZ2VfZGVzdGluYXRpb25zIjog\nW10sICJuYW1lIjogIkV4YW1wbGU6IEVsYXN0aWNTZWFyY2ggUXVlcnkgZnJvbSBJbmNpZGVudCIs\nICJvYmplY3RfdHlwZSI6ICJpbmNpZGVudCIsICJ0YWdzIjogW10sICJ0aW1lb3V0X3NlY29uZHMi\nOiA4NjQwMCwgInR5cGUiOiAxLCAidXVpZCI6ICI1NjI1MjI2YS0zOTE5LTRlMTYtOTY2ZS0yZDg5\nZmEyMWQyMTciLCAidmlld19pdGVtcyI6IFtdLCAid29ya2Zsb3dzIjogWyJleGFtcGxlX2VsYXN0\naWNzZWFyY2hfcXVlcnlfZnJvbV9pbmNpZGVudCJdfV0sICJhcHBzIjogW10sICJhdXRvbWF0aWNf\ndGFza3MiOiBbXSwgImV4cG9ydF9kYXRlIjogMTYwMjA3ODAzMzM0MSwgImV4cG9ydF9mb3JtYXRf\ndmVyc2lvbiI6IDIsICJmaWVsZHMiOiBbeyJhbGxvd19kZWZhdWx0X3ZhbHVlIjogZmFsc2UsICJi\nbGFua19vcHRpb24iOiBmYWxzZSwgImNhbGN1bGF0ZWQiOiBmYWxzZSwgImNoYW5nZWFibGUiOiB0\ncnVlLCAiY2hvc2VuIjogZmFsc2UsICJkZWZhdWx0X2Nob3Nlbl9ieV9zZXJ2ZXIiOiBmYWxzZSwg\nImRlcHJlY2F0ZWQiOiBmYWxzZSwgImV4cG9ydF9rZXkiOiAiX19mdW5jdGlvbi9lc19kb2NfdHlw\nZSIsICJoaWRlX25vdGlmaWNhdGlvbiI6IGZhbHNlLCAiaWQiOiAzODYsICJpbnB1dF90eXBlIjog\nInRleHQiLCAiaW50ZXJuYWwiOiBmYWxzZSwgImlzX3RyYWNrZWQiOiBmYWxzZSwgIm5hbWUiOiAi\nZXNfZG9jX3R5cGUiLCAib3BlcmF0aW9uX3Blcm1zIjoge30sICJvcGVyYXRpb25zIjogW10sICJw\nbGFjZWhvbGRlciI6ICIiLCAicHJlZml4IjogbnVsbCwgInJlYWRfb25seSI6IGZhbHNlLCAicmlj\naF90ZXh0IjogZmFsc2UsICJ0YWdzIjogW10sICJ0ZW1wbGF0ZXMiOiBbXSwgInRleHQiOiAiZXNf\nZG9jX3R5cGUiLCAidG9vbHRpcCI6ICJUaGUgZG9jdW1lbnQgdHlwZSB0aGF0IHdpbGwgYmUgc2Vh\ncmNoLiIsICJ0eXBlX2lkIjogMTEsICJ1dWlkIjogIjFlMDQxNzc1LWM5YmEtNDNhZS1hOGNmLTNm\nNGRkYTlhMDY4MSIsICJ2YWx1ZXMiOiBbXX0sIHsiYWxsb3dfZGVmYXVsdF92YWx1ZSI6IGZhbHNl\nLCAiYmxhbmtfb3B0aW9uIjogZmFsc2UsICJjYWxjdWxhdGVkIjogZmFsc2UsICJjaGFuZ2VhYmxl\nIjogdHJ1ZSwgImNob3NlbiI6IGZhbHNlLCAiZGVmYXVsdF9jaG9zZW5fYnlfc2VydmVyIjogZmFs\nc2UsICJkZXByZWNhdGVkIjogZmFsc2UsICJleHBvcnRfa2V5IjogIl9fZnVuY3Rpb24vZXNfcXVl\ncnkiLCAiaGlkZV9ub3RpZmljYXRpb24iOiBmYWxzZSwgImlkIjogMzg0LCAiaW5wdXRfdHlwZSI6\nICJ0ZXh0YXJlYSIsICJpbnRlcm5hbCI6IGZhbHNlLCAiaXNfdHJhY2tlZCI6IGZhbHNlLCAibmFt\nZSI6ICJlc19xdWVyeSIsICJvcGVyYXRpb25fcGVybXMiOiB7fSwgIm9wZXJhdGlvbnMiOiBbXSwg\nInBsYWNlaG9sZGVyIjogIiIsICJwcmVmaXgiOiBudWxsLCAicmVhZF9vbmx5IjogZmFsc2UsICJy\nZXF1aXJlZCI6ICJhbHdheXMiLCAicmljaF90ZXh0IjogZmFsc2UsICJ0YWdzIjogW10sICJ0ZW1w\nbGF0ZXMiOiBbeyJpZCI6IDEwLCAibmFtZSI6ICJtYXRjaF90ZXJtX3NvcnRlZCIsICJ0ZW1wbGF0\nZSI6IHsiZm9ybWF0IjogInRleHQiLCAiY29udGVudCI6ICJ7XG4gICAgXCJzb3J0XCIgOiBbXG4g\nICAgICAgIHsgXCI8U09SVF9WQUxVRT5cIiA6IFwiZGVzY1wiIH1cbiAgICBdLFxuICAgIFwicXVl\ncnlcIiA6IHtcbiAgICAgICAgXCJ0ZXJtXCIgOiA8VEVSTV9UT19CRV9TRUFSQ0hFRD5cbiAgICB9\nXG59In0sICJ1dWlkIjogIjI0YTMxOTFiLWQxYjMtNDFiMS05OWMwLTFkMjI5MGJhMzZlZCJ9LCB7\nImlkIjogOSwgIm5hbWUiOiAibWF0Y2hfdGVybSIsICJ0ZW1wbGF0ZSI6IHsiZm9ybWF0IjogInRl\neHQiLCAiY29udGVudCI6ICJ7XG4gICAgXCJxdWVyeVwiIDoge1xuICAgICAgICBcInRlcm1cIiA6\nIHs8VEVSTV9UT19CRV9TRUFSQ0hFRD59XG4gICAgfVxufSJ9LCAidXVpZCI6ICIyYzk5ZDgwNC0z\nNmI3LTRkYTctYThjYi1hZmEwMjRlZDZiNWQifSwgeyJpZCI6IDExLCAibmFtZSI6ICJtYXRjaF9h\nbGwiLCAidGVtcGxhdGUiOiB7ImZvcm1hdCI6ICJ0ZXh0IiwgImNvbnRlbnQiOiAie1xuICAgIFwi\ncXVlcnlcIjoge1xuICAgICAgICBcIm1hdGNoX2FsbFwiOiB7fVxuICAgIH1cbn0ifSwgInV1aWQi\nOiAiMjQ1N2M4ZDAtMmIxOS00NzU2LTlhMWEtNGU2NDEwNjlkMjM5In1dLCAidGV4dCI6ICJlc19x\ndWVyeSIsICJ0b29sdGlwIjogIlRoZSBxdWVyeSB0aGF0IHdpbGwgYmUgc3VibWl0dGVkIHRvIEVs\nYXN0aWNTZWFyY2giLCAidHlwZV9pZCI6IDExLCAidXVpZCI6ICJiOTJjYzNlZC0yODc4LTQ2MzAt\nODFhNy01ODMwNzgwZmE1ZDkiLCAidmFsdWVzIjogW119LCB7ImFsbG93X2RlZmF1bHRfdmFsdWUi\nOiBmYWxzZSwgImJsYW5rX29wdGlvbiI6IGZhbHNlLCAiY2FsY3VsYXRlZCI6IGZhbHNlLCAiY2hh\nbmdlYWJsZSI6IHRydWUsICJjaG9zZW4iOiBmYWxzZSwgImRlZmF1bHRfY2hvc2VuX2J5X3NlcnZl\nciI6IGZhbHNlLCAiZGVwcmVjYXRlZCI6IGZhbHNlLCAiZXhwb3J0X2tleSI6ICJfX2Z1bmN0aW9u\nL2VzX2luZGV4IiwgImhpZGVfbm90aWZpY2F0aW9uIjogZmFsc2UsICJpZCI6IDM4NSwgImlucHV0\nX3R5cGUiOiAidGV4dCIsICJpbnRlcm5hbCI6IGZhbHNlLCAiaXNfdHJhY2tlZCI6IGZhbHNlLCAi\nbmFtZSI6ICJlc19pbmRleCIsICJvcGVyYXRpb25fcGVybXMiOiB7fSwgIm9wZXJhdGlvbnMiOiBb\nXSwgInBsYWNlaG9sZGVyIjogIiIsICJwcmVmaXgiOiBudWxsLCAicmVhZF9vbmx5IjogZmFsc2Us\nICJyaWNoX3RleHQiOiBmYWxzZSwgInRhZ3MiOiBbXSwgInRlbXBsYXRlcyI6IFtdLCAidGV4dCI6\nICJlc19pbmRleCIsICJ0b29sdGlwIjogIlRoZSBpbmRleCB0aGF0IHdpbGwgYmUgc2VhcmNoZWQg\nZm9yIGRhdGEuIElmIGxlZnQgYmxhbmsgYWxsIGluZGljZXMgd2lsbCBiZSBzZWFyY2hlZC4iLCAi\ndHlwZV9pZCI6IDExLCAidXVpZCI6ICJlZWQ1NTQ0My03ZDgwLTQ0NTEtYjI3NS0zMWYyZTA5YzNh\nODQiLCAidmFsdWVzIjogW119LCB7ImV4cG9ydF9rZXkiOiAiaW5jaWRlbnQvaW50ZXJuYWxfY3Vz\ndG9taXphdGlvbnNfZmllbGQiLCAiaWQiOiAwLCAiaW5wdXRfdHlwZSI6ICJ0ZXh0IiwgImludGVy\nbmFsIjogdHJ1ZSwgIm5hbWUiOiAiaW50ZXJuYWxfY3VzdG9taXphdGlvbnNfZmllbGQiLCAicmVh\nZF9vbmx5IjogdHJ1ZSwgInRleHQiOiAiQ3VzdG9taXphdGlvbnMgRmllbGQgKGludGVybmFsKSIs\nICJ0eXBlX2lkIjogMCwgInV1aWQiOiAiYmZlZWMyZDQtMzc3MC0xMWU4LWFkMzktNGEwMDA0MDQ0\nYWExIn1dLCAiZnVuY3Rpb25zIjogW3siY3JlYXRvciI6IHsiZGlzcGxheV9uYW1lIjogIlJlc2ls\naWVudCBBZG1pbiIsICJpZCI6IDUsICJuYW1lIjogImFkbWluMUBleGFtcGxlLmNvbSIsICJ0eXBl\nIjogInVzZXIifSwgImRlc2NyaXB0aW9uIjogeyJmb3JtYXQiOiAidGV4dCIsICJjb250ZW50Ijog\nIkEgZnVuY3Rpb24gdGhhdCBhbGxvd3MgYSB1c2VyIHRvIHF1ZXJ5IGEgc3BlY2lmaWVkIEVsYXN0\naWNTZWFyY2ggZGF0YXN0b3JlIGZvciBkYXRhLiJ9LCAiZGVzdGluYXRpb25faGFuZGxlIjogImZu\nX2VsYXN0aWNzZWFyY2giLCAiZGlzcGxheV9uYW1lIjogIkVsYXN0aWNTZWFyY2ggVXRpbGl0aWVz\nOiBRdWVyeSIsICJleHBvcnRfa2V5IjogImZuX2VsYXN0aWNzZWFyY2hfcXVlcnkiLCAiaWQiOiA3\nLCAibGFzdF9tb2RpZmllZF9ieSI6IHsiZGlzcGxheV9uYW1lIjogIlJlc2lsaWVudCBBZG1pbiIs\nICJpZCI6IDUsICJuYW1lIjogImFkbWluMUBleGFtcGxlLmNvbSIsICJ0eXBlIjogInVzZXIifSwg\nImxhc3RfbW9kaWZpZWRfdGltZSI6IDE2MDIwNzc4Mzk1NTIsICJuYW1lIjogImZuX2VsYXN0aWNz\nZWFyY2hfcXVlcnkiLCAidGFncyI6IFtdLCAidXVpZCI6ICI0ZjEwMzQ5MC01OTVkLTRhYjktYmEz\nMC04MjAyYzhkZGZlOWQiLCAidmVyc2lvbiI6IDEsICJ2aWV3X2l0ZW1zIjogW3siY29udGVudCI6\nICJiOTJjYzNlZC0yODc4LTQ2MzAtODFhNy01ODMwNzgwZmE1ZDkiLCAiZWxlbWVudCI6ICJmaWVs\nZF91dWlkIiwgImZpZWxkX3R5cGUiOiAiX19mdW5jdGlvbiIsICJzaG93X2lmIjogbnVsbCwgInNo\nb3dfbGlua19oZWFkZXIiOiBmYWxzZSwgInN0ZXBfbGFiZWwiOiBudWxsfSwgeyJjb250ZW50Ijog\nIjFlMDQxNzc1LWM5YmEtNDNhZS1hOGNmLTNmNGRkYTlhMDY4MSIsICJlbGVtZW50IjogImZpZWxk\nX3V1aWQiLCAiZmllbGRfdHlwZSI6ICJfX2Z1bmN0aW9uIiwgInNob3dfaWYiOiBudWxsLCAic2hv\nd19saW5rX2hlYWRlciI6IGZhbHNlLCAic3RlcF9sYWJlbCI6IG51bGx9LCB7ImNvbnRlbnQiOiAi\nZWVkNTU0NDMtN2Q4MC00NDUxLWIyNzUtMzFmMmUwOWMzYTg0IiwgImVsZW1lbnQiOiAiZmllbGRf\ndXVpZCIsICJmaWVsZF90eXBlIjogIl9fZnVuY3Rpb24iLCAic2hvd19pZiI6IG51bGwsICJzaG93\nX2xpbmtfaGVhZGVyIjogZmFsc2UsICJzdGVwX2xhYmVsIjogbnVsbH1dLCAid29ya2Zsb3dzIjog\nW3siYWN0aW9ucyI6IFtdLCAiZGVzY3JpcHRpb24iOiBudWxsLCAibmFtZSI6ICJFeGFtcGxlOiBF\nbGFzdGljU2VhcmNoIFF1ZXJ5IGZyb20gQXJ0aWZhY3QiLCAib2JqZWN0X3R5cGUiOiAiYXJ0aWZh\nY3QiLCAicHJvZ3JhbW1hdGljX25hbWUiOiAiZXhhbXBsZV9lbGFzdGljc2VhcmNoX3F1ZXJ5X2Zy\nb21fYXJ0aWZhY3QiLCAidGFncyI6IFtdLCAidXVpZCI6IG51bGwsICJ3b3JrZmxvd19pZCI6IDE0\nfSwgeyJhY3Rpb25zIjogW10sICJkZXNjcmlwdGlvbiI6IG51bGwsICJuYW1lIjogIkV4YW1wbGU6\nIEVsYXN0aWNTZWFyY2ggUXVlcnkgZnJvbSBJbmNpZGVudCIsICJvYmplY3RfdHlwZSI6ICJpbmNp\nZGVudCIsICJwcm9ncmFtbWF0aWNfbmFtZSI6ICJleGFtcGxlX2VsYXN0aWNzZWFyY2hfcXVlcnlf\nZnJvbV9pbmNpZGVudCIsICJ0YWdzIjogW10sICJ1dWlkIjogbnVsbCwgIndvcmtmbG93X2lkIjog\nMTN9XX1dLCAiZ2VvcyI6IG51bGwsICJncm91cHMiOiBudWxsLCAiaWQiOiAyLCAiaW5ib3VuZF9t\nYWlsYm94ZXMiOiBudWxsLCAiaW5jaWRlbnRfYXJ0aWZhY3RfdHlwZXMiOiBbXSwgImluY2lkZW50\nX3R5cGVzIjogW3sidXBkYXRlX2RhdGUiOiAxNjAyMDc0NzA1NDExLCAiY3JlYXRlX2RhdGUiOiAx\nNjAyMDc0NzA1NDExLCAidXVpZCI6ICJiZmVlYzJkNC0zNzcwLTExZTgtYWQzOS00YTAwMDQwNDRh\nYTAiLCAiZGVzY3JpcHRpb24iOiAiQ3VzdG9taXphdGlvbiBQYWNrYWdlcyAoaW50ZXJuYWwpIiwg\nImV4cG9ydF9rZXkiOiAiQ3VzdG9taXphdGlvbiBQYWNrYWdlcyAoaW50ZXJuYWwpIiwgIm5hbWUi\nOiAiQ3VzdG9taXphdGlvbiBQYWNrYWdlcyAoaW50ZXJuYWwpIiwgImVuYWJsZWQiOiBmYWxzZSwg\nInN5c3RlbSI6IGZhbHNlLCAicGFyZW50X2lkIjogbnVsbCwgImhpZGRlbiI6IGZhbHNlLCAiaWQi\nOiAwfV0sICJpbmR1c3RyaWVzIjogbnVsbCwgImxheW91dHMiOiBbXSwgImxvY2FsZSI6IG51bGws\nICJtZXNzYWdlX2Rlc3RpbmF0aW9ucyI6IFt7ImFwaV9rZXlzIjogW10sICJkZXN0aW5hdGlvbl90\neXBlIjogMCwgImV4cGVjdF9hY2siOiB0cnVlLCAiZXhwb3J0X2tleSI6ICJmbl9lbGFzdGljc2Vh\ncmNoIiwgIm5hbWUiOiAiZm5fZWxhc3RpY3NlYXJjaCIsICJwcm9ncmFtbWF0aWNfbmFtZSI6ICJm\nbl9lbGFzdGljc2VhcmNoIiwgInRhZ3MiOiBbXSwgInVzZXJzIjogWyJhZG1pbjFAZXhhbXBsZS5j\nb20iXSwgInV1aWQiOiAiMjhlMmU4YTItZWE2NS00MzBiLWFiYzMtNDhkMmJiM2Q2MGRiIn1dLCAi\nbm90aWZpY2F0aW9ucyI6IG51bGwsICJvdmVycmlkZXMiOiBbXSwgInBoYXNlcyI6IFtdLCAicmVn\ndWxhdG9ycyI6IG51bGwsICJyb2xlcyI6IFtdLCAic2NyaXB0cyI6IFtdLCAic2VydmVyX3ZlcnNp\nb24iOiB7ImJ1aWxkX251bWJlciI6IDAsICJtYWpvciI6IDM2LCAibWlub3IiOiAwLCAidmVyc2lv\nbiI6ICIzNi4wLjAifSwgInRhZ3MiOiBbXSwgInRhc2tfb3JkZXIiOiBbXSwgInRpbWVmcmFtZXMi\nOiBudWxsLCAidHlwZXMiOiBbXSwgIndvcmtmbG93cyI6IFt7ImFjdGlvbnMiOiBbXSwgImNvbnRl\nbnQiOiB7InZlcnNpb24iOiAxLCAid29ya2Zsb3dfaWQiOiAiZXhhbXBsZV9lbGFzdGljc2VhcmNo\nX3F1ZXJ5X2Zyb21faW5jaWRlbnQiLCAieG1sIjogIjw/eG1sIHZlcnNpb249XCIxLjBcIiBlbmNv\nZGluZz1cIlVURi04XCI/PjxkZWZpbml0aW9ucyB4bWxucz1cImh0dHA6Ly93d3cub21nLm9yZy9z\ncGVjL0JQTU4vMjAxMDA1MjQvTU9ERUxcIiB4bWxuczpicG1uZGk9XCJodHRwOi8vd3d3Lm9tZy5v\ncmcvc3BlYy9CUE1OLzIwMTAwNTI0L0RJXCIgeG1sbnM6b21nZGM9XCJodHRwOi8vd3d3Lm9tZy5v\ncmcvc3BlYy9ERC8yMDEwMDUyNC9EQ1wiIHhtbG5zOm9tZ2RpPVwiaHR0cDovL3d3dy5vbWcub3Jn\nL3NwZWMvREQvMjAxMDA1MjQvRElcIiB4bWxuczpyZXNpbGllbnQ9XCJodHRwOi8vcmVzaWxpZW50\nLmlibS5jb20vYnBtblwiIHhtbG5zOnhzZD1cImh0dHA6Ly93d3cudzMub3JnLzIwMDEvWE1MU2No\nZW1hXCIgeG1sbnM6eHNpPVwiaHR0cDovL3d3dy53My5vcmcvMjAwMS9YTUxTY2hlbWEtaW5zdGFu\nY2VcIiB0YXJnZXROYW1lc3BhY2U9XCJodHRwOi8vd3d3LmNhbXVuZGEub3JnL3Rlc3RcIj48cHJv\nY2VzcyBpZD1cImV4YW1wbGVfZWxhc3RpY3NlYXJjaF9xdWVyeV9mcm9tX2luY2lkZW50XCIgaXNF\neGVjdXRhYmxlPVwidHJ1ZVwiIG5hbWU9XCJFeGFtcGxlOiBFbGFzdGljU2VhcmNoIFF1ZXJ5IGZy\nb20gSW5jaWRlbnRcIj48ZG9jdW1lbnRhdGlvbj5BbiBleGFtcGxlIHdoaWNoIGF0dGVtcHRzIHRv\nIHF1ZXJ5IEVsYXN0aWNTZWFyY2ggdXNpbmcgYSBwcmUtZGVmaW5lZCBxdWVyeS4gUXVlcnkgZXhh\nbXBsZXMgYXJlIHByb3ZpZGVkIGR1cmluZyB3b3JrZmxvdyBjcmVhdGlvbi48L2RvY3VtZW50YXRp\nb24+PHN0YXJ0RXZlbnQgaWQ9XCJTdGFydEV2ZW50XzE1NWFzeG1cIj48b3V0Z29pbmc+U2VxdWVu\nY2VGbG93XzFlNmg0bWQ8L291dGdvaW5nPjwvc3RhcnRFdmVudD48c2VydmljZVRhc2sgaWQ9XCJT\nZXJ2aWNlVGFza18xMjhseHd2XCIgbmFtZT1cIkVsYXN0aWNTZWFyY2ggVXRpbGl0aWVzOiBRdWVy\neVwiIHJlc2lsaWVudDp0eXBlPVwiZnVuY3Rpb25cIj48ZXh0ZW5zaW9uRWxlbWVudHM+PHJlc2ls\naWVudDpmdW5jdGlvbiB1dWlkPVwiNGYxMDM0OTAtNTk1ZC00YWI5LWJhMzAtODIwMmM4ZGRmZTlk\nXCI+e1wiaW5wdXRzXCI6e1wiYjkyY2MzZWQtMjg3OC00NjMwLTgxYTctNTgzMDc4MGZhNWQ5XCI6\ne1wiaW5wdXRfdHlwZVwiOlwic3RhdGljXCIsXCJzdGF0aWNfaW5wdXRcIjp7XCJtdWx0aXNlbGVj\ndF92YWx1ZVwiOltdLFwidGV4dF9jb250ZW50X3ZhbHVlXCI6e1wiZm9ybWF0XCI6XCJ0ZXh0XCIs\nXCJjb250ZW50XCI6XCJ7XFxuICAgIFxcXCJxdWVyeVxcXCI6IHtcXG4gICAgICAgIFxcXCJtYXRj\naF9hbGxcXFwiOiB7fVxcbiAgICB9XFxufVwifX19fSxcInBvc3RfcHJvY2Vzc2luZ19zY3JpcHRc\nIjpcIlxcXCJcXFwiXFxcIlxcbiMgQW4gRXhhbXBsZSBvZiB0aGUgcmVzdWx0IG9iamVjdCBcXG4g\nICAgcmVzdWx0cyA9IHtcXG4gICAgICAgIFxcXCJpbnB1dHNcXFwiOiB7XFxuICAgICAgICAgIFxc\nXCJlc19xdWVyeVxcXCI6IHsgXFxcInF1ZXJ5XFxcIjogeyBcXFwibWF0Y2hfYWxsXFxcIjoge30g\nfSB9LFxcbiAgICAgICAgICBcXFwiZXNfZG9jX3R5cGVcXFwiOiBsb2dzLFxcbiAgICAgICAgICBc\nXFwiZXNfaW5kZXhcXFwiIDogbXlfbG9nc3RvcmVcXG4gICAgICAgIH0sXFxuICAgICAgICBcXFwi\ncXVlcnlfcmVzdWx0c1xcXCI6IFtcXG4gICAgICAgICAgJmx0O2VsYXN0aWNzZWFyY2gtcmVjb3Jk\nJmd0OyxcXG4gICAgICAgIFxcXCJzdWNjZXNzXFxcIjogVHJ1ZSAvIEZhbHNlLFxcbiAgICAgICAg\nXFxcIm1hdGNoZWRfcmVjb3Jkc1xcXCI6IDEwMDAsXFxuICAgICAgICBcXFwicmV0dXJuZWRfcmVj\nb3Jkc1xcXCI6IDEwMFxcbiAgICB9XFxuICAgIE5vdGU6IFRoZSBzY2hlbWEgb2YgZWxhc3RpY3Nl\nYXJjaC1yZWNvcmQ7IG91dGxpbmVkIGFib3ZlLCB3aWxsIHJlZmxlY3QgdGhlIHN0cnVjdHVyZSBv\nZiB5b3VyIGRhdGEgaW4gRWxhc3RpYyBpdHNlbGZcXG5cXFwiXFxcIlxcXCJcXG5cXG5pZiByZXN1\nbHRzLm1hdGNoZWRfcmVjb3JkczpcXG4gIG5vdGVUZXh0ID0gXFxcIlxcXCJcXFwiJmx0O2ImZ3Q7\nRWxhc3RpY1NlYXJjaCBRdWVyeSBzdGF0dXMmbHQ7L2ImZ3Q7XFxuICAgICAgICAgICAgICAgICZs\ndDticiZndDsgUXVlcnkgc3VwcGxpZWQ6ICZsdDtiJmd0O3swfSZsdDsvYiZndDtcXG4gICAgICAg\nICAgICAgICAgJmx0O2JyJmd0OyBUb3RhbCBtYXRjaGVkIHJlY29yZHMgOiZsdDtiJmd0O3sxfSZs\ndDsvYiZndDtcXFwiXFxcIlxcXCIuZm9ybWF0KHJlc3VsdHMuaW5wdXRzW1xcXCJlc19xdWVyeVxc\nXCJdLCByZXN1bHRzLm1hdGNoZWRfcmVjb3JkcylcXG4gIFxcbiAgaWYgcmVzdWx0cy5yZXR1cm5l\nZF9yZWNvcmRzICE9IDA6XFxuICAgIG5vdGVUZXh0ICs9IFxcXCJcXFwiXFxcIiZsdDticiZndDsg\nVG90YWwgcmV0dXJuZWQgcmVjb3JkcyA6ICZsdDtiJmd0O3swfSZsdDsvYiZndDtcXFwiXFxcIlxc\nXCIuZm9ybWF0KHJlc3VsdHMucmV0dXJuZWRfcmVjb3JkcylcXG4gIGluY2lkZW50LmFkZE5vdGUo\naGVscGVyLmNyZWF0ZVJpY2hUZXh0KG5vdGVUZXh0KSlcIixcInJlc3VsdF9uYW1lXCI6XCJcIn08\nL3Jlc2lsaWVudDpmdW5jdGlvbj48L2V4dGVuc2lvbkVsZW1lbnRzPjxpbmNvbWluZz5TZXF1ZW5j\nZUZsb3dfMWU2aDRtZDwvaW5jb21pbmc+PG91dGdvaW5nPlNlcXVlbmNlRmxvd18wOGJ1bjIwPC9v\ndXRnb2luZz48L3NlcnZpY2VUYXNrPjxlbmRFdmVudCBpZD1cIkVuZEV2ZW50XzBuejVyNzJcIj48\naW5jb21pbmc+U2VxdWVuY2VGbG93XzA4YnVuMjA8L2luY29taW5nPjwvZW5kRXZlbnQ+PHNlcXVl\nbmNlRmxvdyBpZD1cIlNlcXVlbmNlRmxvd18xZTZoNG1kXCIgc291cmNlUmVmPVwiU3RhcnRFdmVu\ndF8xNTVhc3htXCIgdGFyZ2V0UmVmPVwiU2VydmljZVRhc2tfMTI4bHh3dlwiLz48c2VxdWVuY2VG\nbG93IGlkPVwiU2VxdWVuY2VGbG93XzA4YnVuMjBcIiBzb3VyY2VSZWY9XCJTZXJ2aWNlVGFza18x\nMjhseHd2XCIgdGFyZ2V0UmVmPVwiRW5kRXZlbnRfMG56NXI3MlwiLz48dGV4dEFubm90YXRpb24g\naWQ9XCJUZXh0QW5ub3RhdGlvbl8xa3h4aXl0XCI+PHRleHQ+U3RhcnQgeW91ciB3b3JrZmxvdyBo\nZXJlPC90ZXh0PjwvdGV4dEFubm90YXRpb24+PGFzc29jaWF0aW9uIGlkPVwiQXNzb2NpYXRpb25f\nMXNldWo0OFwiIHNvdXJjZVJlZj1cIlN0YXJ0RXZlbnRfMTU1YXN4bVwiIHRhcmdldFJlZj1cIlRl\neHRBbm5vdGF0aW9uXzFreHhpeXRcIi8+PHRleHRBbm5vdGF0aW9uIGlkPVwiVGV4dEFubm90YXRp\nb25fMTdmbW5vbFwiPjx0ZXh0PlRha2VzIGluIGFuIGVsYXN0aWNzZWFyY2ggcXVlcnkgYW5kIG9w\ndGlvbmFsbHksIGFuIGluZGV4IGFuZCBkb2NfdHlwZSB0byBzZWFyY2ggYWdhaW5zdDwvdGV4dD48\nL3RleHRBbm5vdGF0aW9uPjxhc3NvY2lhdGlvbiBpZD1cIkFzc29jaWF0aW9uXzA5MjM4aTNcIiBz\nb3VyY2VSZWY9XCJTZXJ2aWNlVGFza18xMjhseHd2XCIgdGFyZ2V0UmVmPVwiVGV4dEFubm90YXRp\nb25fMTdmbW5vbFwiLz48dGV4dEFubm90YXRpb24gaWQ9XCJUZXh0QW5ub3RhdGlvbl8wMDB4ejZq\nXCI+PHRleHQ+UmV0dXJucyByZXN1bHQgb2YgcXVlcnkgaW5jbHVkaW5nIGhvdyBtYW55IG1hdGNo\nZWQgYW5kIHJldHVybmVkIHJlY29yZHMuIFNhdmVzIHF1ZXJ5IGluZm9ybWF0aW9uIGluIGEgcmlj\naCB0ZXh0IG5vdGU8L3RleHQ+PC90ZXh0QW5ub3RhdGlvbj48YXNzb2NpYXRpb24gaWQ9XCJBc3Nv\nY2lhdGlvbl8wNDU3eTQzXCIgc291cmNlUmVmPVwiU2VydmljZVRhc2tfMTI4bHh3dlwiIHRhcmdl\ndFJlZj1cIlRleHRBbm5vdGF0aW9uXzAwMHh6NmpcIi8+PC9wcm9jZXNzPjxicG1uZGk6QlBNTkRp\nYWdyYW0gaWQ9XCJCUE1ORGlhZ3JhbV8xXCI+PGJwbW5kaTpCUE1OUGxhbmUgYnBtbkVsZW1lbnQ9\nXCJ1bmRlZmluZWRcIiBpZD1cIkJQTU5QbGFuZV8xXCI+PGJwbW5kaTpCUE1OU2hhcGUgYnBtbkVs\nZW1lbnQ9XCJTdGFydEV2ZW50XzE1NWFzeG1cIiBpZD1cIlN0YXJ0RXZlbnRfMTU1YXN4bV9kaVwi\nPjxvbWdkYzpCb3VuZHMgaGVpZ2h0PVwiMzZcIiB3aWR0aD1cIjM2XCIgeD1cIjE2MlwiIHk9XCIx\nODhcIi8+PGJwbW5kaTpCUE1OTGFiZWw+PG9tZ2RjOkJvdW5kcyBoZWlnaHQ9XCIwXCIgd2lkdGg9\nXCI5MFwiIHg9XCIxNTdcIiB5PVwiMjIzXCIvPjwvYnBtbmRpOkJQTU5MYWJlbD48L2JwbW5kaTpC\nUE1OU2hhcGU+PGJwbW5kaTpCUE1OU2hhcGUgYnBtbkVsZW1lbnQ9XCJUZXh0QW5ub3RhdGlvbl8x\na3h4aXl0XCIgaWQ9XCJUZXh0QW5ub3RhdGlvbl8xa3h4aXl0X2RpXCI+PG9tZ2RjOkJvdW5kcyBo\nZWlnaHQ9XCIzMFwiIHdpZHRoPVwiMTAwXCIgeD1cIjk5XCIgeT1cIjI1NFwiLz48L2JwbW5kaTpC\nUE1OU2hhcGU+PGJwbW5kaTpCUE1ORWRnZSBicG1uRWxlbWVudD1cIkFzc29jaWF0aW9uXzFzZXVq\nNDhcIiBpZD1cIkFzc29jaWF0aW9uXzFzZXVqNDhfZGlcIj48b21nZGk6d2F5cG9pbnQgeD1cIjE2\nOVwiIHhzaTp0eXBlPVwib21nZGM6UG9pbnRcIiB5PVwiMjIwXCIvPjxvbWdkaTp3YXlwb2ludCB4\nPVwiMTUzXCIgeHNpOnR5cGU9XCJvbWdkYzpQb2ludFwiIHk9XCIyNTRcIi8+PC9icG1uZGk6QlBN\nTkVkZ2U+PGJwbW5kaTpCUE1OU2hhcGUgYnBtbkVsZW1lbnQ9XCJTZXJ2aWNlVGFza18xMjhseHd2\nXCIgaWQ9XCJTZXJ2aWNlVGFza18xMjhseHd2X2RpXCI+PG9tZ2RjOkJvdW5kcyBoZWlnaHQ9XCI4\nMFwiIHdpZHRoPVwiMTAwXCIgeD1cIjMxOC4wNzc3NTAwMDAwMDAwNFwiIHk9XCIxNjUuNTQ3MjUw\nMDAwMDAwMDJcIi8+PC9icG1uZGk6QlBNTlNoYXBlPjxicG1uZGk6QlBNTlNoYXBlIGJwbW5FbGVt\nZW50PVwiRW5kRXZlbnRfMG56NXI3MlwiIGlkPVwiRW5kRXZlbnRfMG56NXI3Ml9kaVwiPjxvbWdk\nYzpCb3VuZHMgaGVpZ2h0PVwiMzZcIiB3aWR0aD1cIjM2XCIgeD1cIjU2OC4wNTY3NDQxODYwNDY2\nXCIgeT1cIjE4OFwiLz48YnBtbmRpOkJQTU5MYWJlbD48b21nZGM6Qm91bmRzIGhlaWdodD1cIjEz\nXCIgd2lkdGg9XCIwXCIgeD1cIjU4Ni4wNTY3NDQxODYwNDY2XCIgeT1cIjIyN1wiLz48L2JwbW5k\naTpCUE1OTGFiZWw+PC9icG1uZGk6QlBNTlNoYXBlPjxicG1uZGk6QlBNTkVkZ2UgYnBtbkVsZW1l\nbnQ9XCJTZXF1ZW5jZUZsb3dfMWU2aDRtZFwiIGlkPVwiU2VxdWVuY2VGbG93XzFlNmg0bWRfZGlc\nIj48b21nZGk6d2F5cG9pbnQgeD1cIjE5OFwiIHhzaTp0eXBlPVwib21nZGM6UG9pbnRcIiB5PVwi\nMjA2XCIvPjxvbWdkaTp3YXlwb2ludCB4PVwiMzE4XCIgeHNpOnR5cGU9XCJvbWdkYzpQb2ludFwi\nIHk9XCIyMDZcIi8+PGJwbW5kaTpCUE1OTGFiZWw+PG9tZ2RjOkJvdW5kcyBoZWlnaHQ9XCIxM1wi\nIHdpZHRoPVwiMFwiIHg9XCIyNThcIiB5PVwiMTg0XCIvPjwvYnBtbmRpOkJQTU5MYWJlbD48L2Jw\nbW5kaTpCUE1ORWRnZT48YnBtbmRpOkJQTU5FZGdlIGJwbW5FbGVtZW50PVwiU2VxdWVuY2VGbG93\nXzA4YnVuMjBcIiBpZD1cIlNlcXVlbmNlRmxvd18wOGJ1bjIwX2RpXCI+PG9tZ2RpOndheXBvaW50\nIHg9XCI0MThcIiB4c2k6dHlwZT1cIm9tZ2RjOlBvaW50XCIgeT1cIjIwNlwiLz48b21nZGk6d2F5\ncG9pbnQgeD1cIjU2OFwiIHhzaTp0eXBlPVwib21nZGM6UG9pbnRcIiB5PVwiMjA2XCIvPjxicG1u\nZGk6QlBNTkxhYmVsPjxvbWdkYzpCb3VuZHMgaGVpZ2h0PVwiMTNcIiB3aWR0aD1cIjBcIiB4PVwi\nNDkzXCIgeT1cIjE4NFwiLz48L2JwbW5kaTpCUE1OTGFiZWw+PC9icG1uZGk6QlBNTkVkZ2U+PGJw\nbW5kaTpCUE1OU2hhcGUgYnBtbkVsZW1lbnQ9XCJUZXh0QW5ub3RhdGlvbl8xN2Ztbm9sXCIgaWQ9\nXCJUZXh0QW5ub3RhdGlvbl8xN2Ztbm9sX2RpXCI+PG9tZ2RjOkJvdW5kcyBoZWlnaHQ9XCI3Mlwi\nIHdpZHRoPVwiMTQ0XCIgeD1cIjE1N1wiIHk9XCI3MlwiLz48L2JwbW5kaTpCUE1OU2hhcGU+PGJw\nbW5kaTpCUE1ORWRnZSBicG1uRWxlbWVudD1cIkFzc29jaWF0aW9uXzA5MjM4aTNcIiBpZD1cIkFz\nc29jaWF0aW9uXzA5MjM4aTNfZGlcIj48b21nZGk6d2F5cG9pbnQgeD1cIjMyMVwiIHhzaTp0eXBl\nPVwib21nZGM6UG9pbnRcIiB5PVwiMTczXCIvPjxvbWdkaTp3YXlwb2ludCB4PVwiMjgwXCIgeHNp\nOnR5cGU9XCJvbWdkYzpQb2ludFwiIHk9XCIxNDRcIi8+PC9icG1uZGk6QlBNTkVkZ2U+PGJwbW5k\naTpCUE1OU2hhcGUgYnBtbkVsZW1lbnQ9XCJUZXh0QW5ub3RhdGlvbl8wMDB4ejZqXCIgaWQ9XCJU\nZXh0QW5ub3RhdGlvbl8wMDB4ejZqX2RpXCI+PG9tZ2RjOkJvdW5kcyBoZWlnaHQ9XCI5NFwiIHdp\nZHRoPVwiMTg5XCIgeD1cIjQzNVwiIHk9XCI2MVwiLz48L2JwbW5kaTpCUE1OU2hhcGU+PGJwbW5k\naTpCUE1ORWRnZSBicG1uRWxlbWVudD1cIkFzc29jaWF0aW9uXzA0NTd5NDNcIiBpZD1cIkFzc29j\naWF0aW9uXzA0NTd5NDNfZGlcIj48b21nZGk6d2F5cG9pbnQgeD1cIjQxOFwiIHhzaTp0eXBlPVwi\nb21nZGM6UG9pbnRcIiB5PVwiMTc2XCIvPjxvbWdkaTp3YXlwb2ludCB4PVwiNDUzXCIgeHNpOnR5\ncGU9XCJvbWdkYzpQb2ludFwiIHk9XCIxNTVcIi8+PC9icG1uZGk6QlBNTkVkZ2U+PC9icG1uZGk6\nQlBNTlBsYW5lPjwvYnBtbmRpOkJQTU5EaWFncmFtPjwvZGVmaW5pdGlvbnM+In0sICJjb250ZW50\nX3ZlcnNpb24iOiAxLCAiY3JlYXRvcl9pZCI6ICJhZG1pbjFAZXhhbXBsZS5jb20iLCAiZGVzY3Jp\ncHRpb24iOiAiQW4gZXhhbXBsZSB3aGljaCBhdHRlbXB0cyB0byBxdWVyeSBFbGFzdGljU2VhcmNo\nIHVzaW5nIGEgcHJlLWRlZmluZWQgcXVlcnkuIFF1ZXJ5IGV4YW1wbGVzIGFyZSBwcm92aWRlZCBk\ndXJpbmcgd29ya2Zsb3cgY3JlYXRpb24uIiwgImV4cG9ydF9rZXkiOiAiZXhhbXBsZV9lbGFzdGlj\nc2VhcmNoX3F1ZXJ5X2Zyb21faW5jaWRlbnQiLCAibGFzdF9tb2RpZmllZF9ieSI6ICJhZG1pbjFA\nZXhhbXBsZS5jb20iLCAibGFzdF9tb2RpZmllZF90aW1lIjogMTYwMjA3Nzg0MDAxMiwgIm5hbWUi\nOiAiRXhhbXBsZTogRWxhc3RpY1NlYXJjaCBRdWVyeSBmcm9tIEluY2lkZW50IiwgIm9iamVjdF90\neXBlIjogImluY2lkZW50IiwgInByb2dyYW1tYXRpY19uYW1lIjogImV4YW1wbGVfZWxhc3RpY3Nl\nYXJjaF9xdWVyeV9mcm9tX2luY2lkZW50IiwgInRhZ3MiOiBbXSwgInV1aWQiOiAiOTBjNjUwYTct\nZGE1NS00YmZkLWI0MDktYzkzOGVjNjA4ZDJiIiwgIndvcmtmbG93X2lkIjogMTN9LCB7ImFjdGlv\nbnMiOiBbXSwgImNvbnRlbnQiOiB7InZlcnNpb24iOiAxLCAid29ya2Zsb3dfaWQiOiAiZXhhbXBs\nZV9lbGFzdGljc2VhcmNoX3F1ZXJ5X2Zyb21fYXJ0aWZhY3QiLCAieG1sIjogIjw/eG1sIHZlcnNp\nb249XCIxLjBcIiBlbmNvZGluZz1cIlVURi04XCI/PjxkZWZpbml0aW9ucyB4bWxucz1cImh0dHA6\nLy93d3cub21nLm9yZy9zcGVjL0JQTU4vMjAxMDA1MjQvTU9ERUxcIiB4bWxuczpicG1uZGk9XCJo\ndHRwOi8vd3d3Lm9tZy5vcmcvc3BlYy9CUE1OLzIwMTAwNTI0L0RJXCIgeG1sbnM6b21nZGM9XCJo\ndHRwOi8vd3d3Lm9tZy5vcmcvc3BlYy9ERC8yMDEwMDUyNC9EQ1wiIHhtbG5zOm9tZ2RpPVwiaHR0\ncDovL3d3dy5vbWcub3JnL3NwZWMvREQvMjAxMDA1MjQvRElcIiB4bWxuczpyZXNpbGllbnQ9XCJo\ndHRwOi8vcmVzaWxpZW50LmlibS5jb20vYnBtblwiIHhtbG5zOnhzZD1cImh0dHA6Ly93d3cudzMu\nb3JnLzIwMDEvWE1MU2NoZW1hXCIgeG1sbnM6eHNpPVwiaHR0cDovL3d3dy53My5vcmcvMjAwMS9Y\nTUxTY2hlbWEtaW5zdGFuY2VcIiB0YXJnZXROYW1lc3BhY2U9XCJodHRwOi8vd3d3LmNhbXVuZGEu\nb3JnL3Rlc3RcIj48cHJvY2VzcyBpZD1cImV4YW1wbGVfZWxhc3RpY3NlYXJjaF9xdWVyeV9mcm9t\nX2FydGlmYWN0XCIgaXNFeGVjdXRhYmxlPVwidHJ1ZVwiIG5hbWU9XCJFeGFtcGxlOiBFbGFzdGlj\nU2VhcmNoIFF1ZXJ5IGZyb20gQXJ0aWZhY3RcIj48ZG9jdW1lbnRhdGlvbj48IVtDREFUQVtBbiBl\neGFtcGxlIHdoaWNoIGF0dGVtcHRzIHRvIHF1ZXJ5IEVsYXN0aWNTZWFyY2ggdXNpbmcgZGF0YSBn\nYXRoZXJlZCBmcm9tIGFuIGFydGlmYWN0LiBJbnRlbmRlZCB0byBiZSB1c2VkIG9uIGFuIGFydGlm\nYWN0IG9mIHR5cGUgJ1N0cmluZyddXT48L2RvY3VtZW50YXRpb24+PHN0YXJ0RXZlbnQgaWQ9XCJT\ndGFydEV2ZW50XzE1NWFzeG1cIj48b3V0Z29pbmc+U2VxdWVuY2VGbG93XzBhZXRzaTM8L291dGdv\naW5nPjwvc3RhcnRFdmVudD48c2VydmljZVRhc2sgaWQ9XCJTZXJ2aWNlVGFza18wZTU2cjI5XCIg\nbmFtZT1cIkVsYXN0aWNTZWFyY2ggVXRpbGl0aWVzOiBRdWVyeVwiIHJlc2lsaWVudDp0eXBlPVwi\nZnVuY3Rpb25cIj48ZXh0ZW5zaW9uRWxlbWVudHM+PHJlc2lsaWVudDpmdW5jdGlvbiB1dWlkPVwi\nNGYxMDM0OTAtNTk1ZC00YWI5LWJhMzAtODIwMmM4ZGRmZTlkXCI+e1wiaW5wdXRzXCI6e30sXCJw\nb3N0X3Byb2Nlc3Npbmdfc2NyaXB0XCI6XCJcXFwiXFxcIlxcXCJcXG4jIEFuIEV4YW1wbGUgb2Yg\ndGhlIHJlc3VsdCBvYmplY3QgXFxuICAgIHJlc3VsdHMgPSB7XFxuICAgICAgICBcXFwiaW5wdXRz\nXFxcIjoge1xcbiAgICAgICAgICBcXFwiZXNfcXVlcnlcXFwiOiB7IFxcXCJxdWVyeVxcXCI6IHsg\nXFxcIm1hdGNoX2FsbFxcXCI6IHt9IH0gfSxcXG4gICAgICAgICAgXFxcImVzX2RvY190eXBlXFxc\nIjogbG9ncyxcXG4gICAgICAgICAgXFxcImVzX2luZGV4XFxcIiA6IG15X2xvZ3N0b3JlXFxuICAg\nICAgICB9LFxcbiAgICAgICAgXFxcInF1ZXJ5X3Jlc3VsdHNcXFwiOiBbXFxuICAgICAgICAgICZs\ndDtlbGFzdGljc2VhcmNoLXJlY29yZCZndDssXFxuICAgICAgICBcXFwic3VjY2Vzc1xcXCI6IFRy\ndWUgLyBGYWxzZSxcXG4gICAgICAgIFxcXCJtYXRjaGVkX3JlY29yZHNcXFwiOiAxMDAwLFxcbiAg\nICAgICAgXFxcInJldHVybmVkX3JlY29yZHNcXFwiOiAxMDBcXG4gICAgfVxcbiAgICBOb3RlOiBU\naGUgc2NoZW1hIG9mIGVsYXN0aWNzZWFyY2gtcmVjb3JkOyBvdXRsaW5lZCBhYm92ZSwgd2lsbCBy\nZWZsZWN0IHRoZSBzdHJ1Y3R1cmUgb2YgeW91ciBkYXRhIGluIEVsYXN0aWMgaXRzZWxmXFxuXFxc\nIlxcXCJcXFwiXFxuXFxuaWYgcmVzdWx0cy5tYXRjaGVkX3JlY29yZHM6XFxuICBub3RlVGV4dCA9\nIFxcXCJcXFwiXFxcIiZsdDtiJmd0O0VsYXN0aWNTZWFyY2ggUXVlcnkgc3RhdHVzJmx0Oy9iJmd0\nO1xcbiAgICAgICAgICAgICAgICAmbHQ7YnImZ3Q7IFF1ZXJ5IHN1cHBsaWVkOiAmbHQ7YiZndDt7\nMH0mbHQ7L2ImZ3Q7XFxuICAgICAgICAgICAgICAgICZsdDticiZndDsgVG90YWwgbWF0Y2hlZCBy\nZWNvcmRzIDombHQ7YiZndDt7MX0mbHQ7L2ImZ3Q7XFxcIlxcXCJcXFwiLmZvcm1hdChyZXN1bHRz\nLmlucHV0c1tcXFwiZXNfcXVlcnlcXFwiXSwgcmVzdWx0cy5tYXRjaGVkX3JlY29yZHMpXFxuICBc\nXG4gIGlmIHJlc3VsdHMucmV0dXJuZWRfcmVjb3JkcyAhPSAwOlxcbiAgICBub3RlVGV4dCArPSBc\nXFwiXFxcIlxcXCImbHQ7YnImZ3Q7IFRvdGFsIHJldHVybmVkIHJlY29yZHMgOiAmbHQ7YiZndDt7\nMH0mbHQ7L2ImZ3Q7XFxcIlxcXCJcXFwiLmZvcm1hdChyZXN1bHRzLnJldHVybmVkX3JlY29yZHMp\nXFxuICBpbmNpZGVudC5hZGROb3RlKGhlbHBlci5jcmVhdGVSaWNoVGV4dChub3RlVGV4dCkpXCIs\nXCJwcmVfcHJvY2Vzc2luZ19zY3JpcHRcIjpcImlmIGFydGlmYWN0LnZhbHVlIGlzIG5vdCBOb25l\nOlxcbiAgaW5wdXRzLmVzX3F1ZXJ5ID0gYXJ0aWZhY3QudmFsdWVcIn08L3Jlc2lsaWVudDpmdW5j\ndGlvbj48L2V4dGVuc2lvbkVsZW1lbnRzPjxpbmNvbWluZz5TZXF1ZW5jZUZsb3dfMGFldHNpMzwv\naW5jb21pbmc+PG91dGdvaW5nPlNlcXVlbmNlRmxvd18xeXo3N3B3PC9vdXRnb2luZz48L3NlcnZp\nY2VUYXNrPjxlbmRFdmVudCBpZD1cIkVuZEV2ZW50XzBnaXpxcmRcIj48aW5jb21pbmc+U2VxdWVu\nY2VGbG93XzF5ejc3cHc8L2luY29taW5nPjwvZW5kRXZlbnQ+PHNlcXVlbmNlRmxvdyBpZD1cIlNl\ncXVlbmNlRmxvd18wYWV0c2kzXCIgc291cmNlUmVmPVwiU3RhcnRFdmVudF8xNTVhc3htXCIgdGFy\nZ2V0UmVmPVwiU2VydmljZVRhc2tfMGU1NnIyOVwiLz48c2VxdWVuY2VGbG93IGlkPVwiU2VxdWVu\nY2VGbG93XzF5ejc3cHdcIiBzb3VyY2VSZWY9XCJTZXJ2aWNlVGFza18wZTU2cjI5XCIgdGFyZ2V0\nUmVmPVwiRW5kRXZlbnRfMGdpenFyZFwiLz48dGV4dEFubm90YXRpb24gaWQ9XCJUZXh0QW5ub3Rh\ndGlvbl8xa3h4aXl0XCI+PHRleHQ+U3RhcnQgeW91ciB3b3JrZmxvdyBoZXJlPC90ZXh0PjwvdGV4\ndEFubm90YXRpb24+PGFzc29jaWF0aW9uIGlkPVwiQXNzb2NpYXRpb25fMXNldWo0OFwiIHNvdXJj\nZVJlZj1cIlN0YXJ0RXZlbnRfMTU1YXN4bVwiIHRhcmdldFJlZj1cIlRleHRBbm5vdGF0aW9uXzFr\neHhpeXRcIi8+PHRleHRBbm5vdGF0aW9uIGlkPVwiVGV4dEFubm90YXRpb25fMDUxYXRpZVwiPjx0\nZXh0PlRha2VzIGluIGFuIGVsYXN0aWNzZWFyY2ggcXVlcnkgYW5kIG9wdGlvbmFsbHksIGFuIGlu\nZGV4IGFuZCBkb2NfdHlwZSB0byBzZWFyY2ggYWdhaW5zdDwvdGV4dD48L3RleHRBbm5vdGF0aW9u\nPjxhc3NvY2lhdGlvbiBpZD1cIkFzc29jaWF0aW9uXzF0YTh6YXdcIiBzb3VyY2VSZWY9XCJTZXJ2\naWNlVGFza18wZTU2cjI5XCIgdGFyZ2V0UmVmPVwiVGV4dEFubm90YXRpb25fMDUxYXRpZVwiLz48\ndGV4dEFubm90YXRpb24gaWQ9XCJUZXh0QW5ub3RhdGlvbl8wMGpuNmJjXCI+PHRleHQ+UmV0dXJu\ncyByZXN1bHQgb2YgcXVlcnkgaW5jbHVkaW5nIGhvdyBtYW55IG1hdGNoZWQgYW5kIHJldHVybmVk\nIHJlY29yZHMuIFNhdmVzIHF1ZXJ5IGluZm9ybWF0aW9uIGluIGEgcmljaCB0ZXh0IG5vdGU8L3Rl\neHQ+PC90ZXh0QW5ub3RhdGlvbj48YXNzb2NpYXRpb24gaWQ9XCJBc3NvY2lhdGlvbl8wdDZ4OTJz\nXCIgc291cmNlUmVmPVwiU2VydmljZVRhc2tfMGU1NnIyOVwiIHRhcmdldFJlZj1cIlRleHRBbm5v\ndGF0aW9uXzAwam42YmNcIi8+PC9wcm9jZXNzPjxicG1uZGk6QlBNTkRpYWdyYW0gaWQ9XCJCUE1O\nRGlhZ3JhbV8xXCI+PGJwbW5kaTpCUE1OUGxhbmUgYnBtbkVsZW1lbnQ9XCJ1bmRlZmluZWRcIiBp\nZD1cIkJQTU5QbGFuZV8xXCI+PGJwbW5kaTpCUE1OU2hhcGUgYnBtbkVsZW1lbnQ9XCJTdGFydEV2\nZW50XzE1NWFzeG1cIiBpZD1cIlN0YXJ0RXZlbnRfMTU1YXN4bV9kaVwiPjxvbWdkYzpCb3VuZHMg\naGVpZ2h0PVwiMzZcIiB3aWR0aD1cIjM2XCIgeD1cIjE2MlwiIHk9XCIxODhcIi8+PGJwbW5kaTpC\nUE1OTGFiZWw+PG9tZ2RjOkJvdW5kcyBoZWlnaHQ9XCIwXCIgd2lkdGg9XCI5MFwiIHg9XCIxNTdc\nIiB5PVwiMjIzXCIvPjwvYnBtbmRpOkJQTU5MYWJlbD48L2JwbW5kaTpCUE1OU2hhcGU+PGJwbW5k\naTpCUE1OU2hhcGUgYnBtbkVsZW1lbnQ9XCJUZXh0QW5ub3RhdGlvbl8xa3h4aXl0XCIgaWQ9XCJU\nZXh0QW5ub3RhdGlvbl8xa3h4aXl0X2RpXCI+PG9tZ2RjOkJvdW5kcyBoZWlnaHQ9XCIzMFwiIHdp\nZHRoPVwiMTAwXCIgeD1cIjk5XCIgeT1cIjI1NFwiLz48L2JwbW5kaTpCUE1OU2hhcGU+PGJwbW5k\naTpCUE1ORWRnZSBicG1uRWxlbWVudD1cIkFzc29jaWF0aW9uXzFzZXVqNDhcIiBpZD1cIkFzc29j\naWF0aW9uXzFzZXVqNDhfZGlcIj48b21nZGk6d2F5cG9pbnQgeD1cIjE2OVwiIHhzaTp0eXBlPVwi\nb21nZGM6UG9pbnRcIiB5PVwiMjIwXCIvPjxvbWdkaTp3YXlwb2ludCB4PVwiMTUzXCIgeHNpOnR5\ncGU9XCJvbWdkYzpQb2ludFwiIHk9XCIyNTRcIi8+PC9icG1uZGk6QlBNTkVkZ2U+PGJwbW5kaTpC\nUE1OU2hhcGUgYnBtbkVsZW1lbnQ9XCJTZXJ2aWNlVGFza18wZTU2cjI5XCIgaWQ9XCJTZXJ2aWNl\nVGFza18wZTU2cjI5X2RpXCI+PG9tZ2RjOkJvdW5kcyBoZWlnaHQ9XCI4MFwiIHdpZHRoPVwiMTAw\nXCIgeD1cIjM2OVwiIHk9XCIxNjZcIi8+PC9icG1uZGk6QlBNTlNoYXBlPjxicG1uZGk6QlBNTlNo\nYXBlIGJwbW5FbGVtZW50PVwiRW5kRXZlbnRfMGdpenFyZFwiIGlkPVwiRW5kRXZlbnRfMGdpenFy\nZF9kaVwiPjxvbWdkYzpCb3VuZHMgaGVpZ2h0PVwiMzZcIiB3aWR0aD1cIjM2XCIgeD1cIjY0NFwi\nIHk9XCIxODhcIi8+PGJwbW5kaTpCUE1OTGFiZWw+PG9tZ2RjOkJvdW5kcyBoZWlnaHQ9XCIxM1wi\nIHdpZHRoPVwiMFwiIHg9XCI2NjJcIiB5PVwiMjI3XCIvPjwvYnBtbmRpOkJQTU5MYWJlbD48L2Jw\nbW5kaTpCUE1OU2hhcGU+PGJwbW5kaTpCUE1ORWRnZSBicG1uRWxlbWVudD1cIlNlcXVlbmNlRmxv\nd18wYWV0c2kzXCIgaWQ9XCJTZXF1ZW5jZUZsb3dfMGFldHNpM19kaVwiPjxvbWdkaTp3YXlwb2lu\ndCB4PVwiMTk4XCIgeHNpOnR5cGU9XCJvbWdkYzpQb2ludFwiIHk9XCIyMDZcIi8+PG9tZ2RpOndh\neXBvaW50IHg9XCIzNjlcIiB4c2k6dHlwZT1cIm9tZ2RjOlBvaW50XCIgeT1cIjIwNlwiLz48YnBt\nbmRpOkJQTU5MYWJlbD48b21nZGM6Qm91bmRzIGhlaWdodD1cIjEzXCIgd2lkdGg9XCIwXCIgeD1c\nIjI4My41XCIgeT1cIjE4NFwiLz48L2JwbW5kaTpCUE1OTGFiZWw+PC9icG1uZGk6QlBNTkVkZ2U+\nPGJwbW5kaTpCUE1ORWRnZSBicG1uRWxlbWVudD1cIlNlcXVlbmNlRmxvd18xeXo3N3B3XCIgaWQ9\nXCJTZXF1ZW5jZUZsb3dfMXl6Nzdwd19kaVwiPjxvbWdkaTp3YXlwb2ludCB4PVwiNDY5XCIgeHNp\nOnR5cGU9XCJvbWdkYzpQb2ludFwiIHk9XCIyMDZcIi8+PG9tZ2RpOndheXBvaW50IHg9XCI2NDRc\nIiB4c2k6dHlwZT1cIm9tZ2RjOlBvaW50XCIgeT1cIjIwNlwiLz48YnBtbmRpOkJQTU5MYWJlbD48\nb21nZGM6Qm91bmRzIGhlaWdodD1cIjEzXCIgd2lkdGg9XCIwXCIgeD1cIjU1Ni41XCIgeT1cIjE4\nNFwiLz48L2JwbW5kaTpCUE1OTGFiZWw+PC9icG1uZGk6QlBNTkVkZ2U+PGJwbW5kaTpCUE1OU2hh\ncGUgYnBtbkVsZW1lbnQ9XCJUZXh0QW5ub3RhdGlvbl8wNTFhdGllXCIgaWQ9XCJUZXh0QW5ub3Rh\ndGlvbl8wNTFhdGllX2RpXCI+PG9tZ2RjOkJvdW5kcyBoZWlnaHQ9XCI5NlwiIHdpZHRoPVwiMTM3\nXCIgeD1cIjIwOVwiIHk9XCI3N1wiLz48L2JwbW5kaTpCUE1OU2hhcGU+PGJwbW5kaTpCUE1ORWRn\nZSBicG1uRWxlbWVudD1cIkFzc29jaWF0aW9uXzF0YTh6YXdcIiBpZD1cIkFzc29jaWF0aW9uXzF0\nYTh6YXdfZGlcIj48b21nZGk6d2F5cG9pbnQgeD1cIjM2OVwiIHhzaTp0eXBlPVwib21nZGM6UG9p\nbnRcIiB5PVwiMTc3XCIvPjxvbWdkaTp3YXlwb2ludCB4PVwiMzQ2XCIgeHNpOnR5cGU9XCJvbWdk\nYzpQb2ludFwiIHk9XCIxNjRcIi8+PC9icG1uZGk6QlBNTkVkZ2U+PGJwbW5kaTpCUE1OU2hhcGUg\nYnBtbkVsZW1lbnQ9XCJUZXh0QW5ub3RhdGlvbl8wMGpuNmJjXCIgaWQ9XCJUZXh0QW5ub3RhdGlv\nbl8wMGpuNmJjX2RpXCI+PG9tZ2RjOkJvdW5kcyBoZWlnaHQ9XCIxMDFcIiB3aWR0aD1cIjEyOVwi\nIHg9XCI1MDRcIiB5PVwiODBcIi8+PC9icG1uZGk6QlBNTlNoYXBlPjxicG1uZGk6QlBNTkVkZ2Ug\nYnBtbkVsZW1lbnQ9XCJBc3NvY2lhdGlvbl8wdDZ4OTJzXCIgaWQ9XCJBc3NvY2lhdGlvbl8wdDZ4\nOTJzX2RpXCI+PG9tZ2RpOndheXBvaW50IHg9XCI0NjlcIiB4c2k6dHlwZT1cIm9tZ2RjOlBvaW50\nXCIgeT1cIjE4MVwiLz48b21nZGk6d2F5cG9pbnQgeD1cIjUwNFwiIHhzaTp0eXBlPVwib21nZGM6\nUG9pbnRcIiB5PVwiMTYzXCIvPjwvYnBtbmRpOkJQTU5FZGdlPjwvYnBtbmRpOkJQTU5QbGFuZT48\nL2JwbW5kaTpCUE1ORGlhZ3JhbT48L2RlZmluaXRpb25zPiJ9LCAiY29udGVudF92ZXJzaW9uIjog\nMSwgImNyZWF0b3JfaWQiOiAiYWRtaW4xQGV4YW1wbGUuY29tIiwgImRlc2NyaXB0aW9uIjogIkFu\nIGV4YW1wbGUgd2hpY2ggYXR0ZW1wdHMgdG8gcXVlcnkgRWxhc3RpY1NlYXJjaCB1c2luZyBkYXRh\nIGdhdGhlcmVkIGZyb20gYW4gYXJ0aWZhY3QuIEludGVuZGVkIHRvIGJlIHVzZWQgb24gYW4gYXJ0\naWZhY3Qgb2YgdHlwZSAnU3RyaW5nJyIsICJleHBvcnRfa2V5IjogImV4YW1wbGVfZWxhc3RpY3Nl\nYXJjaF9xdWVyeV9mcm9tX2FydGlmYWN0IiwgImxhc3RfbW9kaWZpZWRfYnkiOiAiYWRtaW4xQGV4\nYW1wbGUuY29tIiwgImxhc3RfbW9kaWZpZWRfdGltZSI6IDE2MDIwNzc4NDAxMzYsICJuYW1lIjog\nIkV4YW1wbGU6IEVsYXN0aWNTZWFyY2ggUXVlcnkgZnJvbSBBcnRpZmFjdCIsICJvYmplY3RfdHlw\nZSI6ICJhcnRpZmFjdCIsICJwcm9ncmFtbWF0aWNfbmFtZSI6ICJleGFtcGxlX2VsYXN0aWNzZWFy\nY2hfcXVlcnlfZnJvbV9hcnRpZmFjdCIsICJ0YWdzIjogW10sICJ1dWlkIjogIjI5YWZkMTIyLTJl\nMjQtNDUxNi1iNzc5LTg4N2M1MDk2MmY1ZiIsICJ3b3JrZmxvd19pZCI6IDE0fV0sICJ3b3Jrc3Bh\nY2VzIjogW119\n\"\"\")", "title": "" }, { "docid": "90559a1bd5251060fa2d5f7b02bc9741", "score": "0.48470083", "text": "def customization_data(client=None):\n\n yield ImportDefinition(u\"\"\"\neyJhY3Rpb25fb3JkZXIiOiBbXSwgImFjdGlvbnMiOiBbeyJhdXRvbWF0aW9ucyI6IFtdLCAiY29u\nZGl0aW9ucyI6IFt7ImV2YWx1YXRpb25faWQiOiBudWxsLCAiZmllbGRfbmFtZSI6ICJhcnRpZmFj\ndC50eXBlIiwgIm1ldGhvZCI6ICJlcXVhbHMiLCAidHlwZSI6IG51bGwsICJ2YWx1ZSI6ICJTdHJp\nbmcifV0sICJlbmFibGVkIjogdHJ1ZSwgImV4cG9ydF9rZXkiOiAiRXhhbXBsZTogR2VvY29kaW5n\nIEdldCBBZGRyZXNzIiwgImlkIjogMjEyLCAibG9naWNfdHlwZSI6ICJhbGwiLCAibWVzc2FnZV9k\nZXN0aW5hdGlvbnMiOiBbXSwgIm5hbWUiOiAiRXhhbXBsZTogR2VvY29kaW5nIEdldCBBZGRyZXNz\nIiwgIm9iamVjdF90eXBlIjogImFydGlmYWN0IiwgInRhZ3MiOiBbXSwgInRpbWVvdXRfc2Vjb25k\ncyI6IDg2NDAwLCAidHlwZSI6IDEsICJ1dWlkIjogIjhkNzVjMzY4LThkMzMtNDJlMy1iZTBlLTEx\nMWVmNWY5MTQzNSIsICJ2aWV3X2l0ZW1zIjogW10sICJ3b3JrZmxvd3MiOiBbImV4YW1wbGVfZ2Vv\nY29kaW5nX2dldF9hZGRyZXNzIl19LCB7ImF1dG9tYXRpb25zIjogW10sICJjb25kaXRpb25zIjog\nW3siZXZhbHVhdGlvbl9pZCI6IG51bGwsICJmaWVsZF9uYW1lIjogImFydGlmYWN0LnR5cGUiLCAi\nbWV0aG9kIjogImVxdWFscyIsICJ0eXBlIjogbnVsbCwgInZhbHVlIjogIlN0cmluZyJ9XSwgImVu\nYWJsZWQiOiB0cnVlLCAiZXhwb3J0X2tleSI6ICJFeGFtcGxlOiBHZW9jb2RpbmcgR2V0IENvb3Jk\naW5hdGVzIiwgImlkIjogMjEzLCAibG9naWNfdHlwZSI6ICJhbGwiLCAibWVzc2FnZV9kZXN0aW5h\ndGlvbnMiOiBbXSwgIm5hbWUiOiAiRXhhbXBsZTogR2VvY29kaW5nIEdldCBDb29yZGluYXRlcyIs\nICJvYmplY3RfdHlwZSI6ICJhcnRpZmFjdCIsICJ0YWdzIjogW10sICJ0aW1lb3V0X3NlY29uZHMi\nOiA4NjQwMCwgInR5cGUiOiAxLCAidXVpZCI6ICIxM2I0MTdlOS02ZDMwLTQ3NzgtOTUzYS0yMDVm\nY2QyYjQ1NzgiLCAidmlld19pdGVtcyI6IFtdLCAid29ya2Zsb3dzIjogWyJleGFtcGxlX2dlb2Nv\nZGluZ19nZXRfY29vcmRpbmF0ZXMiXX1dLCAiYXV0b21hdGljX3Rhc2tzIjogW10sICJleHBvcnRf\nZGF0ZSI6IDE2MDA5ODExNjc2OTIsICJleHBvcnRfZm9ybWF0X3ZlcnNpb24iOiAyLCAiZmllbGRz\nIjogW3siYWxsb3dfZGVmYXVsdF92YWx1ZSI6IGZhbHNlLCAiYmxhbmtfb3B0aW9uIjogZmFsc2Us\nICJjYWxjdWxhdGVkIjogZmFsc2UsICJjaGFuZ2VhYmxlIjogdHJ1ZSwgImNob3NlbiI6IGZhbHNl\nLCAiZGVmYXVsdF9jaG9zZW5fYnlfc2VydmVyIjogZmFsc2UsICJkZXByZWNhdGVkIjogZmFsc2Us\nICJleHBvcnRfa2V5IjogIl9fZnVuY3Rpb24vZ2VvY29kaW5nX2RhdGEiLCAiaGlkZV9ub3RpZmlj\nYXRpb24iOiBmYWxzZSwgImlkIjogMTkyMiwgImlucHV0X3R5cGUiOiAidGV4dCIsICJpbnRlcm5h\nbCI6IGZhbHNlLCAiaXNfdHJhY2tlZCI6IGZhbHNlLCAibmFtZSI6ICJnZW9jb2RpbmdfZGF0YSIs\nICJvcGVyYXRpb25fcGVybXMiOiB7fSwgIm9wZXJhdGlvbnMiOiBbXSwgInBsYWNlaG9sZGVyIjog\nIiIsICJwcmVmaXgiOiBudWxsLCAicmVhZF9vbmx5IjogZmFsc2UsICJyZXF1aXJlZCI6ICJhbHdh\neXMiLCAicmljaF90ZXh0IjogZmFsc2UsICJ0YWdzIjogW10sICJ0ZW1wbGF0ZXMiOiBbXSwgInRl\neHQiOiAiZ2VvY29kaW5nX2RhdGEiLCAidG9vbHRpcCI6ICJ2YWx1ZSB3aWxsIGJlIHNwZWNpZmlj\nIHRvIHRoZSBzb3VyY2UgZmllbGQiLCAidHlwZV9pZCI6IDExLCAidXVpZCI6ICJkMzZlMTUxYy1l\nMTdhLTRkMjQtODMyYS0xYTJkNzM4NjJhYmIiLCAidmFsdWVzIjogW119LCB7ImFsbG93X2RlZmF1\nbHRfdmFsdWUiOiBmYWxzZSwgImJsYW5rX29wdGlvbiI6IGZhbHNlLCAiY2FsY3VsYXRlZCI6IGZh\nbHNlLCAiY2hhbmdlYWJsZSI6IHRydWUsICJjaG9zZW4iOiBmYWxzZSwgImRlZmF1bHRfY2hvc2Vu\nX2J5X3NlcnZlciI6IGZhbHNlLCAiZGVwcmVjYXRlZCI6IGZhbHNlLCAiZXhwb3J0X2tleSI6ICJf\nX2Z1bmN0aW9uL2dlb2NvZGluZ19zb3VyY2UiLCAiaGlkZV9ub3RpZmljYXRpb24iOiBmYWxzZSwg\nImlkIjogMTkyMSwgImlucHV0X3R5cGUiOiAic2VsZWN0IiwgImludGVybmFsIjogZmFsc2UsICJp\nc190cmFja2VkIjogZmFsc2UsICJuYW1lIjogImdlb2NvZGluZ19zb3VyY2UiLCAib3BlcmF0aW9u\nX3Blcm1zIjoge30sICJvcGVyYXRpb25zIjogW10sICJwbGFjZWhvbGRlciI6ICIiLCAicHJlZml4\nIjogbnVsbCwgInJlYWRfb25seSI6IGZhbHNlLCAicmVxdWlyZWQiOiAiYWx3YXlzIiwgInJpY2hf\ndGV4dCI6IGZhbHNlLCAidGFncyI6IFtdLCAidGVtcGxhdGVzIjogW10sICJ0ZXh0IjogImdlb2Nv\nZGluZ19zb3VyY2UiLCAidG9vbHRpcCI6ICIiLCAidHlwZV9pZCI6IDExLCAidXVpZCI6ICI2OTZh\nZmU1My1iZmNmLTQ3ZmItOGI2Ni1lZDQ4MDc4MjI4MTgiLCAidmFsdWVzIjogW3siZGVmYXVsdCI6\nIHRydWUsICJlbmFibGVkIjogdHJ1ZSwgImhpZGRlbiI6IGZhbHNlLCAibGFiZWwiOiAiYWRkcmVz\ncyIsICJwcm9wZXJ0aWVzIjogbnVsbCwgInV1aWQiOiAiN2E2MDQ4NDAtZDczMi00NjZmLWE5Yjkt\nMTUxMGNiMjEzZGFjIiwgInZhbHVlIjogODUyfSwgeyJkZWZhdWx0IjogZmFsc2UsICJlbmFibGVk\nIjogdHJ1ZSwgImhpZGRlbiI6IGZhbHNlLCAibGFiZWwiOiAibGF0bG5nIiwgInByb3BlcnRpZXMi\nOiBudWxsLCAidXVpZCI6ICJhYTQyZDhmMi0wNjUyLTQwOTAtYTdiMS05NDg5ODlmODk3Y2EiLCAi\ndmFsdWUiOiA4NTN9XX0sIHsiZXhwb3J0X2tleSI6ICJpbmNpZGVudC9pbnRlcm5hbF9jdXN0b21p\nemF0aW9uc19maWVsZCIsICJpZCI6IDAsICJpbnB1dF90eXBlIjogInRleHQiLCAiaW50ZXJuYWwi\nOiB0cnVlLCAibmFtZSI6ICJpbnRlcm5hbF9jdXN0b21pemF0aW9uc19maWVsZCIsICJyZWFkX29u\nbHkiOiB0cnVlLCAidGV4dCI6ICJDdXN0b21pemF0aW9ucyBGaWVsZCAoaW50ZXJuYWwpIiwgInR5\ncGVfaWQiOiAwLCAidXVpZCI6ICJiZmVlYzJkNC0zNzcwLTExZTgtYWQzOS00YTAwMDQwNDRhYTEi\nfV0sICJmdW5jdGlvbnMiOiBbeyJjcmVhdG9yIjogeyJkaXNwbGF5X25hbWUiOiAiUmVzaWxpZW50\nIFN5c2FkbWluIiwgImlkIjogMywgIm5hbWUiOiAiYUBleGFtcGxlLmNvbSIsICJ0eXBlIjogInVz\nZXIifSwgImRlc2NyaXB0aW9uIjogeyJmb3JtYXQiOiAidGV4dCIsICJjb250ZW50IjogIlByb3Zp\nZGUgY29udmVyc2lvbiBvZiBhZGRyZXNzZXMgYW5kIHdvcmxkIGNvb3JkaW5hdGVzIn0sICJkZXN0\naW5hdGlvbl9oYW5kbGUiOiAiZm5fZ2VvY29kaW5nIiwgImRpc3BsYXlfbmFtZSI6ICJHZW9jb2Rp\nbmciLCAiZXhwb3J0X2tleSI6ICJnZW9jb2RpbmciLCAiaWQiOiA3NCwgImxhc3RfbW9kaWZpZWRf\nYnkiOiB7ImRpc3BsYXlfbmFtZSI6ICJSZXNpbGllbnQgU3lzYWRtaW4iLCAiaWQiOiAzLCAibmFt\nZSI6ICJhQGV4YW1wbGUuY29tIiwgInR5cGUiOiAidXNlciJ9LCAibGFzdF9tb2RpZmllZF90aW1l\nIjogMTYwMDYxNTY4NDg1NCwgIm5hbWUiOiAiZ2VvY29kaW5nIiwgInRhZ3MiOiBbXSwgInV1aWQi\nOiAiNDAxMWZlOWYtMzM0ZS00N2MzLWE5NmEtZjk5NDg0Zjc4YjliIiwgInZlcnNpb24iOiAxLCAi\ndmlld19pdGVtcyI6IFt7ImNvbnRlbnQiOiAiNjk2YWZlNTMtYmZjZi00N2ZiLThiNjYtZWQ0ODA3\nODIyODE4IiwgImVsZW1lbnQiOiAiZmllbGRfdXVpZCIsICJmaWVsZF90eXBlIjogIl9fZnVuY3Rp\nb24iLCAic2hvd19pZiI6IG51bGwsICJzaG93X2xpbmtfaGVhZGVyIjogZmFsc2UsICJzdGVwX2xh\nYmVsIjogbnVsbH0sIHsiY29udGVudCI6ICJkMzZlMTUxYy1lMTdhLTRkMjQtODMyYS0xYTJkNzM4\nNjJhYmIiLCAiZWxlbWVudCI6ICJmaWVsZF91dWlkIiwgImZpZWxkX3R5cGUiOiAiX19mdW5jdGlv\nbiIsICJzaG93X2lmIjogbnVsbCwgInNob3dfbGlua19oZWFkZXIiOiBmYWxzZSwgInN0ZXBfbGFi\nZWwiOiBudWxsfV0sICJ3b3JrZmxvd3MiOiBbeyJhY3Rpb25zIjogW10sICJkZXNjcmlwdGlvbiI6\nIG51bGwsICJuYW1lIjogIkV4YW1wbGU6IEdlb2NvZGluZyBHZXQgQWRkcmVzcyIsICJvYmplY3Rf\ndHlwZSI6ICJhcnRpZmFjdCIsICJwcm9ncmFtbWF0aWNfbmFtZSI6ICJleGFtcGxlX2dlb2NvZGlu\nZ19nZXRfYWRkcmVzcyIsICJ0YWdzIjogW10sICJ1dWlkIjogbnVsbCwgIndvcmtmbG93X2lkIjog\nOTV9LCB7ImFjdGlvbnMiOiBbXSwgImRlc2NyaXB0aW9uIjogbnVsbCwgIm5hbWUiOiAiRXhhbXBs\nZTogR2VvY29kaW5nIEdldCBDb29yZGluYXRlcyIsICJvYmplY3RfdHlwZSI6ICJhcnRpZmFjdCIs\nICJwcm9ncmFtbWF0aWNfbmFtZSI6ICJleGFtcGxlX2dlb2NvZGluZ19nZXRfY29vcmRpbmF0ZXMi\nLCAidGFncyI6IFtdLCAidXVpZCI6IG51bGwsICJ3b3JrZmxvd19pZCI6IDk2fSwgeyJhY3Rpb25z\nIjogW10sICJkZXNjcmlwdGlvbiI6IG51bGwsICJuYW1lIjogIkdlb2NvZGluZyBTY2hlZHVsZXIi\nLCAib2JqZWN0X3R5cGUiOiAiYXJ0aWZhY3QiLCAicHJvZ3JhbW1hdGljX25hbWUiOiAiZ2VvY29k\naW5nX3NjaGVkdWxlciIsICJ0YWdzIjogW10sICJ1dWlkIjogbnVsbCwgIndvcmtmbG93X2lkIjog\nOTh9XX1dLCAiZ2VvcyI6IG51bGwsICJncm91cHMiOiBudWxsLCAiaWQiOiA2NywgImluYm91bmRf\nbWFpbGJveGVzIjogbnVsbCwgImluY2lkZW50X2FydGlmYWN0X3R5cGVzIjogW10sICJpbmNpZGVu\ndF90eXBlcyI6IFt7InVwZGF0ZV9kYXRlIjogMTYwMDk4MTE2NTAyOSwgImNyZWF0ZV9kYXRlIjog\nMTYwMDk4MTE2NTAyOSwgInV1aWQiOiAiYmZlZWMyZDQtMzc3MC0xMWU4LWFkMzktNGEwMDA0MDQ0\nYWEwIiwgImRlc2NyaXB0aW9uIjogIkN1c3RvbWl6YXRpb24gUGFja2FnZXMgKGludGVybmFsKSIs\nICJleHBvcnRfa2V5IjogIkN1c3RvbWl6YXRpb24gUGFja2FnZXMgKGludGVybmFsKSIsICJuYW1l\nIjogIkN1c3RvbWl6YXRpb24gUGFja2FnZXMgKGludGVybmFsKSIsICJlbmFibGVkIjogZmFsc2Us\nICJzeXN0ZW0iOiBmYWxzZSwgInBhcmVudF9pZCI6IG51bGwsICJoaWRkZW4iOiBmYWxzZSwgImlk\nIjogMH1dLCAiaW5kdXN0cmllcyI6IG51bGwsICJsYXlvdXRzIjogW10sICJsb2NhbGUiOiBudWxs\nLCAibWVzc2FnZV9kZXN0aW5hdGlvbnMiOiBbeyJhcGlfa2V5cyI6IFtdLCAiZGVzdGluYXRpb25f\ndHlwZSI6IDAsICJleHBlY3RfYWNrIjogdHJ1ZSwgImV4cG9ydF9rZXkiOiAiZm5fZ2VvY29kaW5n\nIiwgIm5hbWUiOiAiZm5fZ2VvY29kaW5nIiwgInByb2dyYW1tYXRpY19uYW1lIjogImZuX2dlb2Nv\nZGluZyIsICJ0YWdzIjogW10sICJ1c2VycyI6IFsiYUBleGFtcGxlLmNvbSJdLCAidXVpZCI6ICJh\nZjg1OWE0Ni0yOWU2LTQ0ZWItODc5Mi03NWUxOWY1MTc2YjEifV0sICJub3RpZmljYXRpb25zIjog\nbnVsbCwgIm92ZXJyaWRlcyI6IFtdLCAicGhhc2VzIjogW10sICJyZWd1bGF0b3JzIjogbnVsbCwg\nInJvbGVzIjogW10sICJzY3JpcHRzIjogW10sICJzZXJ2ZXJfdmVyc2lvbiI6IHsiYnVpbGRfbnVt\nYmVyIjogMzIsICJtYWpvciI6IDM1LCAibWlub3IiOiAyLCAidmVyc2lvbiI6ICIzNS4yLjMyIn0s\nICJ0YWdzIjogW10sICJ0YXNrX29yZGVyIjogW10sICJ0aW1lZnJhbWVzIjogbnVsbCwgInR5cGVz\nIjogW10sICJ3b3JrZmxvd3MiOiBbeyJhY3Rpb25zIjogW10sICJjb250ZW50IjogeyJ2ZXJzaW9u\nIjogMiwgIndvcmtmbG93X2lkIjogImV4YW1wbGVfZ2VvY29kaW5nX2dldF9jb29yZGluYXRlcyIs\nICJ4bWwiOiAiPD94bWwgdmVyc2lvbj1cIjEuMFwiIGVuY29kaW5nPVwiVVRGLThcIj8+PGRlZmlu\naXRpb25zIHhtbG5zPVwiaHR0cDovL3d3dy5vbWcub3JnL3NwZWMvQlBNTi8yMDEwMDUyNC9NT0RF\nTFwiIHhtbG5zOmJwbW5kaT1cImh0dHA6Ly93d3cub21nLm9yZy9zcGVjL0JQTU4vMjAxMDA1MjQv\nRElcIiB4bWxuczpvbWdkYz1cImh0dHA6Ly93d3cub21nLm9yZy9zcGVjL0RELzIwMTAwNTI0L0RD\nXCIgeG1sbnM6b21nZGk9XCJodHRwOi8vd3d3Lm9tZy5vcmcvc3BlYy9ERC8yMDEwMDUyNC9ESVwi\nIHhtbG5zOnJlc2lsaWVudD1cImh0dHA6Ly9yZXNpbGllbnQuaWJtLmNvbS9icG1uXCIgeG1sbnM6\neHNkPVwiaHR0cDovL3d3dy53My5vcmcvMjAwMS9YTUxTY2hlbWFcIiB4bWxuczp4c2k9XCJodHRw\nOi8vd3d3LnczLm9yZy8yMDAxL1hNTFNjaGVtYS1pbnN0YW5jZVwiIHRhcmdldE5hbWVzcGFjZT1c\nImh0dHA6Ly93d3cuY2FtdW5kYS5vcmcvdGVzdFwiPjxwcm9jZXNzIGlkPVwiZXhhbXBsZV9nZW9j\nb2RpbmdfZ2V0X2Nvb3JkaW5hdGVzXCIgaXNFeGVjdXRhYmxlPVwidHJ1ZVwiIG5hbWU9XCJFeGFt\ncGxlOiBHZW9jb2RpbmcgR2V0IENvb3JkaW5hdGVzXCI+PGRvY3VtZW50YXRpb24+R2V0IGxvY2F0\naW9uIGFkZHJlc3MgZnJvbSBsYXRpdHVkZS9sb25naXR1ZGUgY29vcmRpbmF0ZXM8L2RvY3VtZW50\nYXRpb24+PHN0YXJ0RXZlbnQgaWQ9XCJTdGFydEV2ZW50XzE1NWFzeG1cIj48b3V0Z29pbmc+U2Vx\ndWVuY2VGbG93XzBjbm9ueHM8L291dGdvaW5nPjwvc3RhcnRFdmVudD48c2VydmljZVRhc2sgaWQ9\nXCJTZXJ2aWNlVGFza18wOGZsZGsyXCIgbmFtZT1cIkdlb2NvZGluZ1wiIHJlc2lsaWVudDp0eXBl\nPVwiZnVuY3Rpb25cIj48ZXh0ZW5zaW9uRWxlbWVudHM+PHJlc2lsaWVudDpmdW5jdGlvbiB1dWlk\nPVwiNDAxMWZlOWYtMzM0ZS00N2MzLWE5NmEtZjk5NDg0Zjc4YjliXCI+e1wiaW5wdXRzXCI6e1wi\nNjk2YWZlNTMtYmZjZi00N2ZiLThiNjYtZWQ0ODA3ODIyODE4XCI6e1wiaW5wdXRfdHlwZVwiOlwi\nc3RhdGljXCIsXCJzdGF0aWNfaW5wdXRcIjp7XCJtdWx0aXNlbGVjdF92YWx1ZVwiOltdLFwic2Vs\nZWN0X3ZhbHVlXCI6XCI3YTYwNDg0MC1kNzMyLTQ2NmYtYTliOS0xNTEwY2IyMTNkYWNcIn19fSxc\nInBvc3RfcHJvY2Vzc2luZ19zY3JpcHRcIjpcIiMgYnVpbGQgbm90ZVxcbmlmIHJlc3VsdHMuZ2V0\nKCdyZXNwb25zZScpIGFuZCByZXN1bHRzWydyZXNwb25zZSddLmdldCgncmVzdWx0cycpOlxcbiAg\nbGF0bG5nID0gdVxcXCIsXFxcIi5qb2luKChzdHIocmVzdWx0cy5yZXNwb25zZVsncmVzdWx0cydd\nWzBdWydnZW9tZXRyeSddWydsb2NhdGlvbiddWydsYXQnXSksIHN0cihyZXN1bHRzLnJlc3BvbnNl\nWydyZXN1bHRzJ11bMF1bJ2dlb21ldHJ5J11bJ2xvY2F0aW9uJ11bJ2xuZyddKSkpXFxuICBpbmNp\nZGVudC5hZGROb3RlKGhlbHBlci5jcmVhdGVSaWNoVGV4dCh1XFxcIiZsdDtkaXYmZ3Q7QXJ0aWZh\nY3Q6IHt9Jmx0Oy9kaXYmZ3Q7Jmx0O2RpdiZndDt7fSZsdDsvZGl2Jmd0O1xcXCIuZm9ybWF0KGFy\ndGlmYWN0LnZhbHVlLCBsYXRsbmcpKSlcXG4nJydcXG4gIHtcXG4gICdyZXNwb25zZSc6IHtcXG4g\nICAgdSdzdGF0dXMnOiB1J09LJyxcXG4gICAgdSdyZXN1bHRzJzogW1xcbiAgICAgIHtcXG4gICAg\nICAgIHUnZ2VvbWV0cnknOiB7XFxuICAgICAgICAgIHUnbG9jYXRpb24nOiB7XFxuICAgICAgICAg\nICAgdSdsYXQnOiA0Mi4zNjU2MTE5LFxcbiAgICAgICAgICAgIHUnbG5nJzogLTcxLjA4MDU4NDFc\nXG4gICAgICAgICAgfSxcXG4gICAgICAgICAgdSd2aWV3cG9ydCc6IHtcXG4gICAgICAgICAgICB1\nJ25vcnRoZWFzdCc6IHtcXG4gICAgICAgICAgICAgIHUnbGF0JzogNDIuMzY2OTYwODgwMjkxNDks\nXFxuICAgICAgICAgICAgICB1J2xuZyc6IC03MS4wNzkyMzUxMTk3MDg0OVxcbiAgICAgICAgICAg\nIH0sXFxuICAgICAgICAgICAgdSdzb3V0aHdlc3QnOiB7XFxuICAgICAgICAgICAgICB1J2xhdCc6\nIDQyLjM2NDI2MjkxOTcwODQ5LFxcbiAgICAgICAgICAgICAgdSdsbmcnOiAtNzEuMDgxOTMzMDgw\nMjkxNVxcbiAgICAgICAgICAgIH1cXG4gICAgICAgICAgfSxcXG4gICAgICAgICAgdSdsb2NhdGlv\nbl90eXBlJzogdSdST09GVE9QJ1xcbiAgICAgICAgfSxcXG4gICAgICAgIHUncGxhY2VfaWQnOiB1\nJ0NoSUpXWXJ0TnJ0dzQ0a1JvUzYtTVplT3pHUScsXFxuICAgICAgICB1J3BsdXNfY29kZSc6IHtc\nXG4gICAgICAgICAgdSdnbG9iYWxfY29kZSc6IHUnODdKQzlXODkrNlEnLFxcbiAgICAgICAgICB1\nJ2NvbXBvdW5kX2NvZGUnOiB1JzlXODkrNlEgQ2FtYnJpZGdlLCBNYXNzYWNodXNldHRzLCBVbml0\nZWQgU3RhdGVzJ1xcbiAgICAgICAgfSxcXG4gICAgICAgIHUnYWRkcmVzc19jb21wb25lbnRzJzog\nW1xcbiAgICAgICAgICB7XFxuICAgICAgICAgICAgdSdsb25nX25hbWUnOiB1Jzc1JyxcXG4gICAg\nICAgICAgICB1J3R5cGVzJzogW1xcbiAgICAgICAgICAgICAgdSdzdHJlZXRfbnVtYmVyJ1xcbiAg\nICAgICAgICAgIF0sXFxuICAgICAgICAgICAgdSdzaG9ydF9uYW1lJzogdSc3NSdcXG4gICAgICAg\nICAgfSxcXG4gICAgICAgICAge1xcbiAgICAgICAgICAgIHUnbG9uZ19uYW1lJzogdSdCaW5uZXkg\nU3RyZWV0JyxcXG4gICAgICAgICAgICB1J3R5cGVzJzogW1xcbiAgICAgICAgICAgICAgdSdyb3V0\nZSdcXG4gICAgICAgICAgICBdLFxcbiAgICAgICAgICAgIHUnc2hvcnRfbmFtZSc6IHUnQmlubmV5\nIFN0J1xcbiAgICAgICAgICB9LFxcbiAgICAgICAgICB7XFxuICAgICAgICAgICAgdSdsb25nX25h\nbWUnOiB1J0Vhc3QgQ2FtYnJpZGdlJyxcXG4gICAgICAgICAgICB1J3R5cGVzJzogW1xcbiAgICAg\nICAgICAgICAgdSduZWlnaGJvcmhvb2QnLFxcbiAgICAgICAgICAgICAgdSdwb2xpdGljYWwnXFxu\nICAgICAgICAgICAgXSxcXG4gICAgICAgICAgICB1J3Nob3J0X25hbWUnOiB1J0Vhc3QgQ2FtYnJp\nZGdlJ1xcbiAgICAgICAgICB9LFxcbiAgICAgICAgICB7XFxuICAgICAgICAgICAgdSdsb25nX25h\nbWUnOiB1J0NhbWJyaWRnZScsXFxuICAgICAgICAgICAgdSd0eXBlcyc6IFtcXG4gICAgICAgICAg\nICAgIHUnbG9jYWxpdHknLFxcbiAgICAgICAgICAgICAgdSdwb2xpdGljYWwnXFxuICAgICAgICAg\nICAgXSxcXG4gICAgICAgICAgICB1J3Nob3J0X25hbWUnOiB1J0NhbWJyaWRnZSdcXG4gICAgICAg\nICAgfSxcXG4gICAgICAgICAge1xcbiAgICAgICAgICAgIHUnbG9uZ19uYW1lJzogdSdNaWRkbGVz\nZXggQ291bnR5JyxcXG4gICAgICAgICAgICB1J3R5cGVzJzogW1xcbiAgICAgICAgICAgICAgdSdh\nZG1pbmlzdHJhdGl2ZV9hcmVhX2xldmVsXzInLFxcbiAgICAgICAgICAgICAgdSdwb2xpdGljYWwn\nXFxuICAgICAgICAgICAgXSxcXG4gICAgICAgICAgICB1J3Nob3J0X25hbWUnOiB1J01pZGRsZXNl\neCBDb3VudHknXFxuICAgICAgICAgIH0sXFxuICAgICAgICAgIHtcXG4gICAgICAgICAgICB1J2xv\nbmdfbmFtZSc6IHUnTWFzc2FjaHVzZXR0cycsXFxuICAgICAgICAgICAgdSd0eXBlcyc6IFtcXG4g\nICAgICAgICAgICAgIHUnYWRtaW5pc3RyYXRpdmVfYXJlYV9sZXZlbF8xJyxcXG4gICAgICAgICAg\nICAgIHUncG9saXRpY2FsJ1xcbiAgICAgICAgICAgIF0sXFxuICAgICAgICAgICAgdSdzaG9ydF9u\nYW1lJzogdSdNQSdcXG4gICAgICAgICAgfSxcXG4gICAgICAgICAge1xcbiAgICAgICAgICAgIHUn\nbG9uZ19uYW1lJzogdSdVbml0ZWQgU3RhdGVzJyxcXG4gICAgICAgICAgICB1J3R5cGVzJzogW1xc\nbiAgICAgICAgICAgICAgdSdjb3VudHJ5JyxcXG4gICAgICAgICAgICAgIHUncG9saXRpY2FsJ1xc\nbiAgICAgICAgICAgIF0sXFxuICAgICAgICAgICAgdSdzaG9ydF9uYW1lJzogdSdVUydcXG4gICAg\nICAgICAgfSxcXG4gICAgICAgICAge1xcbiAgICAgICAgICAgIHUnbG9uZ19uYW1lJzogdScwMjE0\nMicsXFxuICAgICAgICAgICAgdSd0eXBlcyc6IFtcXG4gICAgICAgICAgICAgIHUncG9zdGFsX2Nv\nZGUnXFxuICAgICAgICAgICAgXSxcXG4gICAgICAgICAgICB1J3Nob3J0X25hbWUnOiB1JzAyMTQy\nJ1xcbiAgICAgICAgICB9LFxcbiAgICAgICAgICB7XFxuICAgICAgICAgICAgdSdsb25nX25hbWUn\nOiB1JzExMjMnLFxcbiAgICAgICAgICAgIHUndHlwZXMnOiBbXFxuICAgICAgICAgICAgICB1J3Bv\nc3RhbF9jb2RlX3N1ZmZpeCdcXG4gICAgICAgICAgICBdLFxcbiAgICAgICAgICAgIHUnc2hvcnRf\nbmFtZSc6IHUnMTEyMydcXG4gICAgICAgICAgfVxcbiAgICAgICAgXSxcXG4gICAgICAgIHUnZm9y\nbWF0dGVkX2FkZHJlc3MnOiB1Jzc1IEJpbm5leSBTdCwgQ2FtYnJpZGdlLCBNQSAwMjE0MiwgVVNB\nJyxcXG4gICAgICAgIHUndHlwZXMnOiBbXFxuICAgICAgICAgIHUnc3RyZWV0X2FkZHJlc3MnXFxu\nICAgICAgICBdXFxuICAgICAgfVxcbiAgICBdXFxuICB9XFxufVxcbicnJ1wiLFwicHJlX3Byb2Nl\nc3Npbmdfc2NyaXB0XCI6XCJpbnB1dHMuZ2VvY29kaW5nX2RhdGEgPSBhcnRpZmFjdC52YWx1ZVwi\nfTwvcmVzaWxpZW50OmZ1bmN0aW9uPjwvZXh0ZW5zaW9uRWxlbWVudHM+PGluY29taW5nPlNlcXVl\nbmNlRmxvd18wY25vbnhzPC9pbmNvbWluZz48b3V0Z29pbmc+U2VxdWVuY2VGbG93XzB0aDdidXU8\nL291dGdvaW5nPjwvc2VydmljZVRhc2s+PHNlcXVlbmNlRmxvdyBpZD1cIlNlcXVlbmNlRmxvd18w\nY25vbnhzXCIgc291cmNlUmVmPVwiU3RhcnRFdmVudF8xNTVhc3htXCIgdGFyZ2V0UmVmPVwiU2Vy\ndmljZVRhc2tfMDhmbGRrMlwiLz48ZW5kRXZlbnQgaWQ9XCJFbmRFdmVudF8xc3VxODF2XCI+PGlu\nY29taW5nPlNlcXVlbmNlRmxvd18wdGg3YnV1PC9pbmNvbWluZz48L2VuZEV2ZW50PjxzZXF1ZW5j\nZUZsb3cgaWQ9XCJTZXF1ZW5jZUZsb3dfMHRoN2J1dVwiIHNvdXJjZVJlZj1cIlNlcnZpY2VUYXNr\nXzA4ZmxkazJcIiB0YXJnZXRSZWY9XCJFbmRFdmVudF8xc3VxODF2XCIvPjx0ZXh0QW5ub3RhdGlv\nbiBpZD1cIlRleHRBbm5vdGF0aW9uXzFreHhpeXRcIj48dGV4dD5TdGFydCB5b3VyIHdvcmtmbG93\nIGhlcmU8L3RleHQ+PC90ZXh0QW5ub3RhdGlvbj48YXNzb2NpYXRpb24gaWQ9XCJBc3NvY2lhdGlv\nbl8xc2V1ajQ4XCIgc291cmNlUmVmPVwiU3RhcnRFdmVudF8xNTVhc3htXCIgdGFyZ2V0UmVmPVwi\nVGV4dEFubm90YXRpb25fMWt4eGl5dFwiLz48dGV4dEFubm90YXRpb24gaWQ9XCJUZXh0QW5ub3Rh\ndGlvbl8wMHc1aTByXCI+PHRleHQ+SW5wdXQgZnJlZSBmb3JtYXR0ZWQgQWRkcmVzcyBzdWNoIGFz\nOiA3NSBCaW5uZXkgU3QuLCBDYW1icmlkZ2UsIE1BIDAyMDI2LCBVU0E8L3RleHQ+PC90ZXh0QW5u\nb3RhdGlvbj48YXNzb2NpYXRpb24gaWQ9XCJBc3NvY2lhdGlvbl8xb2VlOWRiXCIgc291cmNlUmVm\nPVwiU2VydmljZVRhc2tfMDhmbGRrMlwiIHRhcmdldFJlZj1cIlRleHRBbm5vdGF0aW9uXzAwdzVp\nMHJcIi8+PHRleHRBbm5vdGF0aW9uIGlkPVwiVGV4dEFubm90YXRpb25fMDd3dW8wb1wiPjx0ZXh0\nPlJlc3VsdHMgZGlzcGxheWVkIGluIGEgbm90ZTwvdGV4dD48L3RleHRBbm5vdGF0aW9uPjxhc3Nv\nY2lhdGlvbiBpZD1cIkFzc29jaWF0aW9uXzEwZHcyaWdcIiBzb3VyY2VSZWY9XCJTZXJ2aWNlVGFz\na18wOGZsZGsyXCIgdGFyZ2V0UmVmPVwiVGV4dEFubm90YXRpb25fMDd3dW8wb1wiLz48L3Byb2Nl\nc3M+PGJwbW5kaTpCUE1ORGlhZ3JhbSBpZD1cIkJQTU5EaWFncmFtXzFcIj48YnBtbmRpOkJQTU5Q\nbGFuZSBicG1uRWxlbWVudD1cInVuZGVmaW5lZFwiIGlkPVwiQlBNTlBsYW5lXzFcIj48YnBtbmRp\nOkJQTU5TaGFwZSBicG1uRWxlbWVudD1cIlN0YXJ0RXZlbnRfMTU1YXN4bVwiIGlkPVwiU3RhcnRF\ndmVudF8xNTVhc3htX2RpXCI+PG9tZ2RjOkJvdW5kcyBoZWlnaHQ9XCIzNlwiIHdpZHRoPVwiMzZc\nIiB4PVwiMTYyXCIgeT1cIjE4OFwiLz48YnBtbmRpOkJQTU5MYWJlbD48b21nZGM6Qm91bmRzIGhl\naWdodD1cIjBcIiB3aWR0aD1cIjkwXCIgeD1cIjE1N1wiIHk9XCIyMjNcIi8+PC9icG1uZGk6QlBN\nTkxhYmVsPjwvYnBtbmRpOkJQTU5TaGFwZT48YnBtbmRpOkJQTU5TaGFwZSBicG1uRWxlbWVudD1c\nIlRleHRBbm5vdGF0aW9uXzFreHhpeXRcIiBpZD1cIlRleHRBbm5vdGF0aW9uXzFreHhpeXRfZGlc\nIj48b21nZGM6Qm91bmRzIGhlaWdodD1cIjMwXCIgd2lkdGg9XCIxMDBcIiB4PVwiOTlcIiB5PVwi\nMjU0XCIvPjwvYnBtbmRpOkJQTU5TaGFwZT48YnBtbmRpOkJQTU5FZGdlIGJwbW5FbGVtZW50PVwi\nQXNzb2NpYXRpb25fMXNldWo0OFwiIGlkPVwiQXNzb2NpYXRpb25fMXNldWo0OF9kaVwiPjxvbWdk\naTp3YXlwb2ludCB4PVwiMTY5XCIgeHNpOnR5cGU9XCJvbWdkYzpQb2ludFwiIHk9XCIyMjBcIi8+\nPG9tZ2RpOndheXBvaW50IHg9XCIxNTNcIiB4c2k6dHlwZT1cIm9tZ2RjOlBvaW50XCIgeT1cIjI1\nNFwiLz48L2JwbW5kaTpCUE1ORWRnZT48YnBtbmRpOkJQTU5TaGFwZSBicG1uRWxlbWVudD1cIlNl\ncnZpY2VUYXNrXzA4ZmxkazJcIiBpZD1cIlNlcnZpY2VUYXNrXzA4ZmxkazJfZGlcIj48b21nZGM6\nQm91bmRzIGhlaWdodD1cIjgwXCIgd2lkdGg9XCIxMDBcIiB4PVwiMjQ4XCIgeT1cIjE2NlwiLz48\nL2JwbW5kaTpCUE1OU2hhcGU+PGJwbW5kaTpCUE1ORWRnZSBicG1uRWxlbWVudD1cIlNlcXVlbmNl\nRmxvd18wY25vbnhzXCIgaWQ9XCJTZXF1ZW5jZUZsb3dfMGNub254c19kaVwiPjxvbWdkaTp3YXlw\nb2ludCB4PVwiMTk4XCIgeHNpOnR5cGU9XCJvbWdkYzpQb2ludFwiIHk9XCIyMDZcIi8+PG9tZ2Rp\nOndheXBvaW50IHg9XCIyNDhcIiB4c2k6dHlwZT1cIm9tZ2RjOlBvaW50XCIgeT1cIjIwNlwiLz48\nYnBtbmRpOkJQTU5MYWJlbD48b21nZGM6Qm91bmRzIGhlaWdodD1cIjEzXCIgd2lkdGg9XCIwXCIg\neD1cIjIyM1wiIHk9XCIxODRcIi8+PC9icG1uZGk6QlBNTkxhYmVsPjwvYnBtbmRpOkJQTU5FZGdl\nPjxicG1uZGk6QlBNTlNoYXBlIGJwbW5FbGVtZW50PVwiRW5kRXZlbnRfMXN1cTgxdlwiIGlkPVwi\nRW5kRXZlbnRfMXN1cTgxdl9kaVwiPjxvbWdkYzpCb3VuZHMgaGVpZ2h0PVwiMzZcIiB3aWR0aD1c\nIjM2XCIgeD1cIjQxOVwiIHk9XCIxODhcIi8+PGJwbW5kaTpCUE1OTGFiZWw+PG9tZ2RjOkJvdW5k\ncyBoZWlnaHQ9XCIxM1wiIHdpZHRoPVwiMFwiIHg9XCI0MzdcIiB5PVwiMjI3XCIvPjwvYnBtbmRp\nOkJQTU5MYWJlbD48L2JwbW5kaTpCUE1OU2hhcGU+PGJwbW5kaTpCUE1ORWRnZSBicG1uRWxlbWVu\ndD1cIlNlcXVlbmNlRmxvd18wdGg3YnV1XCIgaWQ9XCJTZXF1ZW5jZUZsb3dfMHRoN2J1dV9kaVwi\nPjxvbWdkaTp3YXlwb2ludCB4PVwiMzQ4XCIgeHNpOnR5cGU9XCJvbWdkYzpQb2ludFwiIHk9XCIy\nMDZcIi8+PG9tZ2RpOndheXBvaW50IHg9XCI0MTlcIiB4c2k6dHlwZT1cIm9tZ2RjOlBvaW50XCIg\neT1cIjIwNlwiLz48YnBtbmRpOkJQTU5MYWJlbD48b21nZGM6Qm91bmRzIGhlaWdodD1cIjEzXCIg\nd2lkdGg9XCIwXCIgeD1cIjM4My41XCIgeT1cIjE4NFwiLz48L2JwbW5kaTpCUE1OTGFiZWw+PC9i\ncG1uZGk6QlBNTkVkZ2U+PGJwbW5kaTpCUE1OU2hhcGUgYnBtbkVsZW1lbnQ9XCJUZXh0QW5ub3Rh\ndGlvbl8wMHc1aTByXCIgaWQ9XCJUZXh0QW5ub3RhdGlvbl8wMHc1aTByX2RpXCI+PG9tZ2RjOkJv\ndW5kcyBoZWlnaHQ9XCI4MVwiIHdpZHRoPVwiMTY0XCIgeD1cIjE1NVwiIHk9XCIzOVwiLz48L2Jw\nbW5kaTpCUE1OU2hhcGU+PGJwbW5kaTpCUE1ORWRnZSBicG1uRWxlbWVudD1cIkFzc29jaWF0aW9u\nXzFvZWU5ZGJcIiBpZD1cIkFzc29jaWF0aW9uXzFvZWU5ZGJfZGlcIj48b21nZGk6d2F5cG9pbnQg\neD1cIjI3OVwiIHhzaTp0eXBlPVwib21nZGM6UG9pbnRcIiB5PVwiMTY2XCIvPjxvbWdkaTp3YXlw\nb2ludCB4PVwiMjU3XCIgeHNpOnR5cGU9XCJvbWdkYzpQb2ludFwiIHk9XCIxMjBcIi8+PC9icG1u\nZGk6QlBNTkVkZ2U+PGJwbW5kaTpCUE1OU2hhcGUgYnBtbkVsZW1lbnQ9XCJUZXh0QW5ub3RhdGlv\nbl8wN3d1bzBvXCIgaWQ9XCJUZXh0QW5ub3RhdGlvbl8wN3d1bzBvX2RpXCI+PG9tZ2RjOkJvdW5k\ncyBoZWlnaHQ9XCI2MFwiIHdpZHRoPVwiMTU0XCIgeD1cIjM4N1wiIHk9XCIzNVwiLz48L2JwbW5k\naTpCUE1OU2hhcGU+PGJwbW5kaTpCUE1ORWRnZSBicG1uRWxlbWVudD1cIkFzc29jaWF0aW9uXzEw\nZHcyaWdcIiBpZD1cIkFzc29jaWF0aW9uXzEwZHcyaWdfZGlcIj48b21nZGk6d2F5cG9pbnQgeD1c\nIjM0MVwiIHhzaTp0eXBlPVwib21nZGM6UG9pbnRcIiB5PVwiMTY5XCIvPjxvbWdkaTp3YXlwb2lu\ndCB4PVwiNDI5XCIgeHNpOnR5cGU9XCJvbWdkYzpQb2ludFwiIHk9XCI5NVwiLz48L2JwbW5kaTpC\nUE1ORWRnZT48L2JwbW5kaTpCUE1OUGxhbmU+PC9icG1uZGk6QlBNTkRpYWdyYW0+PC9kZWZpbml0\naW9ucz4ifSwgImNvbnRlbnRfdmVyc2lvbiI6IDIsICJjcmVhdG9yX2lkIjogImFAZXhhbXBsZS5j\nb20iLCAiZGVzY3JpcHRpb24iOiAiR2V0IGxvY2F0aW9uIGFkZHJlc3MgZnJvbSBsYXRpdHVkZS9s\nb25naXR1ZGUgY29vcmRpbmF0ZXMiLCAiZXhwb3J0X2tleSI6ICJleGFtcGxlX2dlb2NvZGluZ19n\nZXRfY29vcmRpbmF0ZXMiLCAibGFzdF9tb2RpZmllZF9ieSI6ICJhQGV4YW1wbGUuY29tIiwgImxh\nc3RfbW9kaWZpZWRfdGltZSI6IDE2MDA5NzI2ODQ4OTgsICJuYW1lIjogIkV4YW1wbGU6IEdlb2Nv\nZGluZyBHZXQgQ29vcmRpbmF0ZXMiLCAib2JqZWN0X3R5cGUiOiAiYXJ0aWZhY3QiLCAicHJvZ3Jh\nbW1hdGljX25hbWUiOiAiZXhhbXBsZV9nZW9jb2RpbmdfZ2V0X2Nvb3JkaW5hdGVzIiwgInRhZ3Mi\nOiBbXSwgInV1aWQiOiAiOTM5ZjAxMjItNTA5Yy00MjEwLWI0N2UtZjgyY2FjZWY0ZGUxIiwgIndv\ncmtmbG93X2lkIjogOTZ9LCB7ImFjdGlvbnMiOiBbXSwgImNvbnRlbnQiOiB7InZlcnNpb24iOiAx\nLCAid29ya2Zsb3dfaWQiOiAiZXhhbXBsZV9nZW9jb2RpbmdfZ2V0X2FkZHJlc3MiLCAieG1sIjog\nIjw/eG1sIHZlcnNpb249XCIxLjBcIiBlbmNvZGluZz1cIlVURi04XCI/PjxkZWZpbml0aW9ucyB4\nbWxucz1cImh0dHA6Ly93d3cub21nLm9yZy9zcGVjL0JQTU4vMjAxMDA1MjQvTU9ERUxcIiB4bWxu\nczpicG1uZGk9XCJodHRwOi8vd3d3Lm9tZy5vcmcvc3BlYy9CUE1OLzIwMTAwNTI0L0RJXCIgeG1s\nbnM6b21nZGM9XCJodHRwOi8vd3d3Lm9tZy5vcmcvc3BlYy9ERC8yMDEwMDUyNC9EQ1wiIHhtbG5z\nOm9tZ2RpPVwiaHR0cDovL3d3dy5vbWcub3JnL3NwZWMvREQvMjAxMDA1MjQvRElcIiB4bWxuczpy\nZXNpbGllbnQ9XCJodHRwOi8vcmVzaWxpZW50LmlibS5jb20vYnBtblwiIHhtbG5zOnhzZD1cImh0\ndHA6Ly93d3cudzMub3JnLzIwMDEvWE1MU2NoZW1hXCIgeG1sbnM6eHNpPVwiaHR0cDovL3d3dy53\nMy5vcmcvMjAwMS9YTUxTY2hlbWEtaW5zdGFuY2VcIiB0YXJnZXROYW1lc3BhY2U9XCJodHRwOi8v\nd3d3LmNhbXVuZGEub3JnL3Rlc3RcIj48cHJvY2VzcyBpZD1cImV4YW1wbGVfZ2VvY29kaW5nX2dl\ndF9hZGRyZXNzXCIgaXNFeGVjdXRhYmxlPVwidHJ1ZVwiIG5hbWU9XCJFeGFtcGxlOiBHZW9jb2Rp\nbmcgR2V0IEFkZHJlc3NcIj48ZG9jdW1lbnRhdGlvbj5SZXR1cm4gYWRkcmVzcyBmcm9tIGxhdGl0\ndWRlL2xvbmdpdHVkZSBpbnB1dDwvZG9jdW1lbnRhdGlvbj48c3RhcnRFdmVudCBpZD1cIlN0YXJ0\nRXZlbnRfMTU1YXN4bVwiPjxvdXRnb2luZz5TZXF1ZW5jZUZsb3dfMDh0emJudzwvb3V0Z29pbmc+\nPC9zdGFydEV2ZW50PjxzZXJ2aWNlVGFzayBpZD1cIlNlcnZpY2VUYXNrXzFnbG5sODNcIiBuYW1l\nPVwiR2VvY29kaW5nXCIgcmVzaWxpZW50OnR5cGU9XCJmdW5jdGlvblwiPjxleHRlbnNpb25FbGVt\nZW50cz48cmVzaWxpZW50OmZ1bmN0aW9uIHV1aWQ9XCI0MDExZmU5Zi0zMzRlLTQ3YzMtYTk2YS1m\nOTk0ODRmNzhiOWJcIj57XCJpbnB1dHNcIjp7XCI2OTZhZmU1My1iZmNmLTQ3ZmItOGI2Ni1lZDQ4\nMDc4MjI4MThcIjp7XCJpbnB1dF90eXBlXCI6XCJzdGF0aWNcIixcInN0YXRpY19pbnB1dFwiOntc\nIm11bHRpc2VsZWN0X3ZhbHVlXCI6W10sXCJzZWxlY3RfdmFsdWVcIjpcImFhNDJkOGYyLTA2NTIt\nNDA5MC1hN2IxLTk0ODk4OWY4OTdjYVwifX19LFwicG9zdF9wcm9jZXNzaW5nX3NjcmlwdFwiOlwi\nIyBidWlsZCBub3RlXFxuaWYgcmVzdWx0cy5nZXQoXFxcInJlc3BvbnNlXFxcIikgYW5kIHJlc3Vs\ndHNbXFxcInJlc3BvbnNlXFxcIl0uZ2V0KCdyZXN1bHRzJyk6XFxuICAgIGluY2lkZW50LmFkZE5v\ndGUoaGVscGVyLmNyZWF0ZVJpY2hUZXh0KFxcXCImbHQ7ZGl2Jmd0O0FydGlmYWN0OiB7fSZsdDsv\nZGl2Jmd0OyZsdDtkaXYmZ3Q7e30mbHQ7L2RpdiZndDtcXFwiLmZvcm1hdChhcnRpZmFjdC52YWx1\nZSwgcmVzdWx0cy5yZXNwb25zZVsncmVzdWx0cyddWzBdWydmb3JtYXR0ZWRfYWRkcmVzcyddKSkp\nXFxuICAgICcnJ1xcbiAgICB7XFxuICAncmVzcG9uc2UnOiB7XFxuICAgIHUnc3RhdHVzJzogdSdP\nSycsXFxuICAgIHUncGx1c19jb2RlJzoge1xcbiAgICAgIHUnZ2xvYmFsX2NvZGUnOiB1Jzg3SkM5\nVzg5KzZRJyxcXG4gICAgICB1J2NvbXBvdW5kX2NvZGUnOiB1JzlXODkrNlEgQ2FtYnJpZGdlLCBN\nQSwgVVNBJ1xcbiAgICB9LFxcbiAgICB1J3Jlc3VsdHMnOiBbXFxuICAgICAge1xcbiAgICAgICAg\ndSdnZW9tZXRyeSc6IHtcXG4gICAgICAgICAgdSdsb2NhdGlvbic6IHtcXG4gICAgICAgICAgICB1\nJ2xhdCc6IDQyLjM2NTYxMTksXFxuICAgICAgICAgICAgdSdsbmcnOiAtNzEuMDgwNTg0MVxcbiAg\nICAgICAgICB9LFxcbiAgICAgICAgICB1J3ZpZXdwb3J0Jzoge1xcbiAgICAgICAgICAgIHUnbm9y\ndGhlYXN0Jzoge1xcbiAgICAgICAgICAgICAgdSdsYXQnOiA0Mi4zNjY5NjA4ODAyOTE0OSxcXG4g\nICAgICAgICAgICAgIHUnbG5nJzogLTcxLjA3OTIzNTExOTcwODQ5XFxuICAgICAgICAgICAgfSxc\nXG4gICAgICAgICAgICB1J3NvdXRod2VzdCc6IHtcXG4gICAgICAgICAgICAgIHUnbGF0JzogNDIu\nMzY0MjYyOTE5NzA4NDksXFxuICAgICAgICAgICAgICB1J2xuZyc6IC03MS4wODE5MzMwODAyOTE1\nXFxuICAgICAgICAgICAgfVxcbiAgICAgICAgICB9LFxcbiAgICAgICAgICB1J2xvY2F0aW9uX3R5\ncGUnOiB1J1JPT0ZUT1AnXFxuICAgICAgICB9LFxcbiAgICAgICAgdSdwbGFjZV9pZCc6IHUnQ2hJ\nSldZcnROcnR3NDRrUm9TNi1NWmVPekdRJyxcXG4gICAgICAgIHUncGx1c19jb2RlJzoge1xcbiAg\nICAgICAgICB1J2dsb2JhbF9jb2RlJzogdSc4N0pDOVc4OSs2UScsXFxuICAgICAgICAgIHUnY29t\ncG91bmRfY29kZSc6IHUnOVc4OSs2USBDYW1icmlkZ2UsIE1hc3NhY2h1c2V0dHMsIFVuaXRlZCBT\ndGF0ZXMnXFxuICAgICAgICB9LFxcbiAgICAgICAgdSdhZGRyZXNzX2NvbXBvbmVudHMnOiBbXFxu\nICAgICAgICAgIHtcXG4gICAgICAgICAgICB1J2xvbmdfbmFtZSc6IHUnNzUnLFxcbiAgICAgICAg\nICAgIHUndHlwZXMnOiBbXFxuICAgICAgICAgICAgICB1J3N0cmVldF9udW1iZXInXFxuICAgICAg\nICAgICAgXSxcXG4gICAgICAgICAgICB1J3Nob3J0X25hbWUnOiB1Jzc1J1xcbiAgICAgICAgICB9\nLFxcbiAgICAgICAgICB7XFxuICAgICAgICAgICAgdSdsb25nX25hbWUnOiB1J0Jpbm5leSBTdHJl\nZXQnLFxcbiAgICAgICAgICAgIHUndHlwZXMnOiBbXFxuICAgICAgICAgICAgICB1J3JvdXRlJ1xc\nbiAgICAgICAgICAgIF0sXFxuICAgICAgICAgICAgdSdzaG9ydF9uYW1lJzogdSdCaW5uZXkgU3Qn\nXFxuICAgICAgICAgIH0sXFxuICAgICAgICAgIHtcXG4gICAgICAgICAgICB1J2xvbmdfbmFtZSc6\nIHUnRWFzdCBDYW1icmlkZ2UnLFxcbiAgICAgICAgICAgIHUndHlwZXMnOiBbXFxuICAgICAgICAg\nICAgICB1J25laWdoYm9yaG9vZCcsXFxuICAgICAgICAgICAgICB1J3BvbGl0aWNhbCdcXG4gICAg\nICAgICAgICBdLFxcbiAgICAgICAgICAgIHUnc2hvcnRfbmFtZSc6IHUnRWFzdCBDYW1icmlkZ2Un\nXFxuICAgICAgICAgIH0sXFxuICAgICAgICAgIHtcXG4gICAgICAgICAgICB1J2xvbmdfbmFtZSc6\nIHUnQ2FtYnJpZGdlJyxcXG4gICAgICAgICAgICB1J3R5cGVzJzogW1xcbiAgICAgICAgICAgICAg\ndSdsb2NhbGl0eScsXFxuICAgICAgICAgICAgICB1J3BvbGl0aWNhbCdcXG4gICAgICAgICAgICBd\nLFxcbiAgICAgICAgICAgIHUnc2hvcnRfbmFtZSc6IHUnQ2FtYnJpZGdlJ1xcbiAgICAgICAgICB9\nLFxcbiAgICAgICAgICB7XFxuICAgICAgICAgICAgdSdsb25nX25hbWUnOiB1J01pZGRsZXNleCBD\nb3VudHknLFxcbiAgICAgICAgICAgIHUndHlwZXMnOiBbXFxuICAgICAgICAgICAgICB1J2FkbWlu\naXN0cmF0aXZlX2FyZWFfbGV2ZWxfMicsXFxuICAgICAgICAgICAgICB1J3BvbGl0aWNhbCdcXG4g\nICAgICAgICAgICBdLFxcbiAgICAgICAgICAgIHUnc2hvcnRfbmFtZSc6IHUnTWlkZGxlc2V4IENv\ndW50eSdcXG4gICAgICAgICAgfSxcXG4gICAgICAgICAge1xcbiAgICAgICAgICAgIHUnbG9uZ19u\nYW1lJzogdSdNYXNzYWNodXNldHRzJyxcXG4gICAgICAgICAgICB1J3R5cGVzJzogW1xcbiAgICAg\nICAgICAgICAgdSdhZG1pbmlzdHJhdGl2ZV9hcmVhX2xldmVsXzEnLFxcbiAgICAgICAgICAgICAg\ndSdwb2xpdGljYWwnXFxuICAgICAgICAgICAgXSxcXG4gICAgICAgICAgICB1J3Nob3J0X25hbWUn\nOiB1J01BJ1xcbiAgICAgICAgICB9LFxcbiAgICAgICAgICB7XFxuICAgICAgICAgICAgdSdsb25n\nX25hbWUnOiB1J1VuaXRlZCBTdGF0ZXMnLFxcbiAgICAgICAgICAgIHUndHlwZXMnOiBbXFxuICAg\nICAgICAgICAgICB1J2NvdW50cnknLFxcbiAgICAgICAgICAgICAgdSdwb2xpdGljYWwnXFxuICAg\nICAgICAgICAgXSxcXG4gICAgICAgICAgICB1J3Nob3J0X25hbWUnOiB1J1VTJ1xcbiAgICAgICAg\nICB9LFxcbiAgICAgICAgICB7XFxuICAgICAgICAgICAgdSdsb25nX25hbWUnOiB1JzAyMTQyJyxc\nXG4gICAgICAgICAgICB1J3R5cGVzJzogW1xcbiAgICAgICAgICAgICAgdSdwb3N0YWxfY29kZSdc\nXG4gICAgICAgICAgICBdLFxcbiAgICAgICAgICAgIHUnc2hvcnRfbmFtZSc6IHUnMDIxNDInXFxu\nICAgICAgICAgIH0sXFxuICAgICAgICAgIHtcXG4gICAgICAgICAgICB1J2xvbmdfbmFtZSc6IHUn\nMTEyMycsXFxuICAgICAgICAgICAgdSd0eXBlcyc6IFtcXG4gICAgICAgICAgICAgIHUncG9zdGFs\nX2NvZGVfc3VmZml4J1xcbiAgICAgICAgICAgIF0sXFxuICAgICAgICAgICAgdSdzaG9ydF9uYW1l\nJzogdScxMTIzJ1xcbiAgICAgICAgICB9XFxuICAgICAgICBdLFxcbiAgICAgICAgdSdmb3JtYXR0\nZWRfYWRkcmVzcyc6IHUnNzUgQmlubmV5IFN0LCBDYW1icmlkZ2UsIE1BIDAyMTQyLCBVU0EnLFxc\nbiAgICAgICAgdSd0eXBlcyc6IFtcXG4gICAgICAgICAgdSdzdHJlZXRfYWRkcmVzcydcXG4gICAg\nICAgIF1cXG4gICAgICB9LFxcbiAgICAgIHtcXG4gICAgICAgIHUnZ2VvbWV0cnknOiB7XFxuICAg\nICAgICAgIHUnbG9jYXRpb24nOiB7XFxuICAgICAgICAgICAgdSdsYXQnOiA0Mi4zNjU2MTE5LFxc\nbiAgICAgICAgICAgIHUnbG5nJzogLTcxLjA4MDU4NDFcXG4gICAgICAgICAgfSxcXG4gICAgICAg\nICAgdSd2aWV3cG9ydCc6IHtcXG4gICAgICAgICAgICB1J25vcnRoZWFzdCc6IHtcXG4gICAgICAg\nICAgICAgIHUnbGF0JzogNDIuMzY2OTYwODgwMjkxNDksXFxuICAgICAgICAgICAgICB1J2xuZyc6\nIC03MS4wNzkyMzUxMTk3MDg0OVxcbiAgICAgICAgICAgIH0sXFxuICAgICAgICAgICAgdSdzb3V0\naHdlc3QnOiB7XFxuICAgICAgICAgICAgICB1J2xhdCc6IDQyLjM2NDI2MjkxOTcwODQ5LFxcbiAg\nICAgICAgICAgICAgdSdsbmcnOiAtNzEuMDgxOTMzMDgwMjkxNVxcbiAgICAgICAgICAgIH1cXG4g\nICAgICAgICAgfSxcXG4gICAgICAgICAgdSdsb2NhdGlvbl90eXBlJzogdSdST09GVE9QJ1xcbiAg\nICAgICAgfSxcXG4gICAgICAgIHUncGxhY2VfaWQnOiB1J0NoSUpPZG5MbGFkMzQ0a1JKN2NGQjhN\nZmNvRScsXFxuICAgICAgICB1J3BsdXNfY29kZSc6IHtcXG4gICAgICAgICAgdSdnbG9iYWxfY29k\nZSc6IHUnODdKQzlXODkrNlEnLFxcbiAgICAgICAgICB1J2NvbXBvdW5kX2NvZGUnOiB1JzlXODkr\nNlEgQ2FtYnJpZGdlLCBNYXNzYWNodXNldHRzLCBVbml0ZWQgU3RhdGVzJ1xcbiAgICAgICAgfSxc\nXG4gICAgICAgIHUnYWRkcmVzc19jb21wb25lbnRzJzogW1xcbiAgICAgICAgICB7XFxuICAgICAg\nICAgICAgdSdsb25nX25hbWUnOiB1Jzc1JyxcXG4gICAgICAgICAgICB1J3R5cGVzJzogW1xcbiAg\nICAgICAgICAgICAgdSdzdHJlZXRfbnVtYmVyJ1xcbiAgICAgICAgICAgIF0sXFxuICAgICAgICAg\nICAgdSdzaG9ydF9uYW1lJzogdSc3NSdcXG4gICAgICAgICAgfSxcXG4gICAgICAgICAge1xcbiAg\nICAgICAgICAgIHUnbG9uZ19uYW1lJzogdSdCaW5uZXkgU3RyZWV0JyxcXG4gICAgICAgICAgICB1\nJ3R5cGVzJzogW1xcbiAgICAgICAgICAgICAgdSdyb3V0ZSdcXG4gICAgICAgICAgICBdLFxcbiAg\nICAgICAgICAgIHUnc2hvcnRfbmFtZSc6IHUnQmlubmV5IFN0J1xcbiAgICAgICAgICB9LFxcbiAg\nICAgICAgICB7XFxuICAgICAgICAgICAgdSdsb25nX25hbWUnOiB1J0Vhc3QgQ2FtYnJpZGdlJyxc\nXG4gICAgICAgICAgICB1J3R5cGVzJzogW1xcbiAgICAgICAgICAgICAgdSduZWlnaGJvcmhvb2Qn\nLFxcbiAgICAgICAgICAgICAgdSdwb2xpdGljYWwnXFxuICAgICAgICAgICAgXSxcXG4gICAgICAg\nICAgICB1J3Nob3J0X25hbWUnOiB1J0Vhc3QgQ2FtYnJpZGdlJ1xcbiAgICAgICAgICB9LFxcbiAg\nICAgICAgICB7XFxuICAgICAgICAgICAgdSdsb25nX25hbWUnOiB1J0NhbWJyaWRnZScsXFxuICAg\nICAgICAgICAgdSd0eXBlcyc6IFtcXG4gICAgICAgICAgICAgIHUnbG9jYWxpdHknLFxcbiAgICAg\nICAgICAgICAgdSdwb2xpdGljYWwnXFxuICAgICAgICAgICAgXSxcXG4gICAgICAgICAgICB1J3No\nb3J0X25hbWUnOiB1J0NhbWJyaWRnZSdcXG4gICAgICAgICAgfSxcXG4gICAgICAgICAge1xcbiAg\nICAgICAgICAgIHUnbG9uZ19uYW1lJzogdSdNaWRkbGVzZXggQ291bnR5JyxcXG4gICAgICAgICAg\nICB1J3R5cGVzJzogW1xcbiAgICAgICAgICAgICAgdSdhZG1pbmlzdHJhdGl2ZV9hcmVhX2xldmVs\nXzInLFxcbiAgICAgICAgICAgICAgdSdwb2xpdGljYWwnXFxuICAgICAgICAgICAgXSxcXG4gICAg\nICAgICAgICB1J3Nob3J0X25hbWUnOiB1J01pZGRsZXNleCBDb3VudHknXFxuICAgICAgICAgIH0s\nXFxuICAgICAgICAgIHtcXG4gICAgICAgICAgICB1J2xvbmdfbmFtZSc6IHUnTWFzc2FjaHVzZXR0\ncycsXFxuICAgICAgICAgICAgdSd0eXBlcyc6IFtcXG4gICAgICAgICAgICAgIHUnYWRtaW5pc3Ry\nYXRpdmVfYXJlYV9sZXZlbF8xJyxcXG4gICAgICAgICAgICAgIHUncG9saXRpY2FsJ1xcbiAgICAg\nICAgICAgIF0sXFxuICAgICAgICAgICAgdSdzaG9ydF9uYW1lJzogdSdNQSdcXG4gICAgICAgICAg\nfSxcXG4gICAgICAgICAge1xcbiAgICAgICAgICAgIHUnbG9uZ19uYW1lJzogdSdVbml0ZWQgU3Rh\ndGVzJyxcXG4gICAgICAgICAgICB1J3R5cGVzJzogW1xcbiAgICAgICAgICAgICAgdSdjb3VudHJ5\nJyxcXG4gICAgICAgICAgICAgIHUncG9saXRpY2FsJ1xcbiAgICAgICAgICAgIF0sXFxuICAgICAg\nICAgICAgdSdzaG9ydF9uYW1lJzogdSdVUydcXG4gICAgICAgICAgfSxcXG4gICAgICAgICAge1xc\nbiAgICAgICAgICAgIHUnbG9uZ19uYW1lJzogdScwMjE0MicsXFxuICAgICAgICAgICAgdSd0eXBl\ncyc6IFtcXG4gICAgICAgICAgICAgIHUncG9zdGFsX2NvZGUnXFxuICAgICAgICAgICAgXSxcXG4g\nICAgICAgICAgICB1J3Nob3J0X25hbWUnOiB1JzAyMTQyJ1xcbiAgICAgICAgICB9XFxuICAgICAg\nICBdLFxcbiAgICAgICAgdSdmb3JtYXR0ZWRfYWRkcmVzcyc6IHUnNzUgQmlubmV5IFN0LCBDYW1i\ncmlkZ2UsIE1BIDAyMTQyLCBVU0EnLFxcbiAgICAgICAgdSd0eXBlcyc6IFtcXG4gICAgICAgICAg\ndSdlc3RhYmxpc2htZW50JyxcXG4gICAgICAgICAgdSdwb2ludF9vZl9pbnRlcmVzdCdcXG4gICAg\nICAgIF1cXG4gICAgICB9LFxcbiAgICAgIHtcXG4gICAgICAgIHUnZ2VvbWV0cnknOiB7XFxuICAg\nICAgICAgIHUnbG9jYXRpb25fdHlwZSc6IHUnUk9PRlRPUCcsXFxuICAgICAgICAgIHUnYm91bmRz\nJzoge1xcbiAgICAgICAgICAgIHUnbm9ydGhlYXN0Jzoge1xcbiAgICAgICAgICAgICAgdSdsYXQn\nOiA0Mi4zNjYxOTM2LFxcbiAgICAgICAgICAgICAgdSdsbmcnOiAtNzEuMDc5ODE5M1xcbiAgICAg\nICAgICAgIH0sXFxuICAgICAgICAgICAgdSdzb3V0aHdlc3QnOiB7XFxuICAgICAgICAgICAgICB1\nJ2xhdCc6IDQyLjM2NTUxNTksXFxuICAgICAgICAgICAgICB1J2xuZyc6IC03MS4wODE3NTc1XFxu\nICAgICAgICAgICAgfVxcbiAgICAgICAgICB9LFxcbiAgICAgICAgICB1J3ZpZXdwb3J0Jzoge1xc\nbiAgICAgICAgICAgIHUnbm9ydGhlYXN0Jzoge1xcbiAgICAgICAgICAgICAgdSdsYXQnOiA0Mi4z\nNjcyMDM3MzAyOTE1LFxcbiAgICAgICAgICAgICAgdSdsbmcnOiAtNzEuMDc5NDM5NDE5NzA4NDlc\nXG4gICAgICAgICAgICB9LFxcbiAgICAgICAgICAgIHUnc291dGh3ZXN0Jzoge1xcbiAgICAgICAg\nICAgICAgdSdsYXQnOiA0Mi4zNjQ1MDU3Njk3MDg1LFxcbiAgICAgICAgICAgICAgdSdsbmcnOiAt\nNzEuMDgyMTM3MzgwMjkxNVxcbiAgICAgICAgICAgIH1cXG4gICAgICAgICAgfSxcXG4gICAgICAg\nICAgdSdsb2NhdGlvbic6IHtcXG4gICAgICAgICAgICB1J2xhdCc6IDQyLjM2NTgyMzMsXFxuICAg\nICAgICAgICAgdSdsbmcnOiAtNzEuMDgwNzlcXG4gICAgICAgICAgfVxcbiAgICAgICAgfSxcXG4g\nICAgICAgIHUnYWRkcmVzc19jb21wb25lbnRzJzogW1xcbiAgICAgICAgICB7XFxuICAgICAgICAg\nICAgdSdsb25nX25hbWUnOiB1JzEyNScsXFxuICAgICAgICAgICAgdSd0eXBlcyc6IFtcXG4gICAg\nICAgICAgICAgIHUnc3RyZWV0X251bWJlcidcXG4gICAgICAgICAgICBdLFxcbiAgICAgICAgICAg\nIHUnc2hvcnRfbmFtZSc6IHUnMTI1J1xcbiAgICAgICAgICB9LFxcbiAgICAgICAgICB7XFxuICAg\nICAgICAgICAgdSdsb25nX25hbWUnOiB1J0Jpbm5leSBTdHJlZXQnLFxcbiAgICAgICAgICAgIHUn\ndHlwZXMnOiBbXFxuICAgICAgICAgICAgICB1J3JvdXRlJ1xcbiAgICAgICAgICAgIF0sXFxuICAg\nICAgICAgICAgdSdzaG9ydF9uYW1lJzogdSdCaW5uZXkgU3QnXFxuICAgICAgICAgIH0sXFxuICAg\nICAgICAgIHtcXG4gICAgICAgICAgICB1J2xvbmdfbmFtZSc6IHUnRWFzdCBDYW1icmlkZ2UnLFxc\nbiAgICAgICAgICAgIHUndHlwZXMnOiBbXFxuICAgICAgICAgICAgICB1J25laWdoYm9yaG9vZCcs\nXFxuICAgICAgICAgICAgICB1J3BvbGl0aWNhbCdcXG4gICAgICAgICAgICBdLFxcbiAgICAgICAg\nICAgIHUnc2hvcnRfbmFtZSc6IHUnRWFzdCBDYW1icmlkZ2UnXFxuICAgICAgICAgIH0sXFxuICAg\nICAgICAgIHtcXG4gICAgICAgICAgICB1J2xvbmdfbmFtZSc6IHUnQ2FtYnJpZGdlJyxcXG4gICAg\nICAgICAgICB1J3R5cGVzJzogW1xcbiAgICAgICAgICAgICAgdSdsb2NhbGl0eScsXFxuICAgICAg\nICAgICAgICB1J3BvbGl0aWNhbCdcXG4gICAgICAgICAgICBdLFxcbiAgICAgICAgICAgIHUnc2hv\ncnRfbmFtZSc6IHUnQ2FtYnJpZGdlJ1xcbiAgICAgICAgICB9LFxcbiAgICAgICAgICB7XFxuICAg\nICAgICAgICAgdSdsb25nX25hbWUnOiB1J01pZGRsZXNleCBDb3VudHknLFxcbiAgICAgICAgICAg\nIHUndHlwZXMnOiBbXFxuICAgICAgICAgICAgICB1J2FkbWluaXN0cmF0aXZlX2FyZWFfbGV2ZWxf\nMicsXFxuICAgICAgICAgICAgICB1J3BvbGl0aWNhbCdcXG4gICAgICAgICAgICBdLFxcbiAgICAg\nICAgICAgIHUnc2hvcnRfbmFtZSc6IHUnTWlkZGxlc2V4IENvdW50eSdcXG4gICAgICAgICAgfSxc\nXG4gICAgICAgICAge1xcbiAgICAgICAgICAgIHUnbG9uZ19uYW1lJzogdSdNYXNzYWNodXNldHRz\nJyxcXG4gICAgICAgICAgICB1J3R5cGVzJzogW1xcbiAgICAgICAgICAgICAgdSdhZG1pbmlzdHJh\ndGl2ZV9hcmVhX2xldmVsXzEnLFxcbiAgICAgICAgICAgICAgdSdwb2xpdGljYWwnXFxuICAgICAg\nICAgICAgXSxcXG4gICAgICAgICAgICB1J3Nob3J0X25hbWUnOiB1J01BJ1xcbiAgICAgICAgICB9\nLFxcbiAgICAgICAgICB7XFxuICAgICAgICAgICAgdSdsb25nX25hbWUnOiB1J1VuaXRlZCBTdGF0\nZXMnLFxcbiAgICAgICAgICAgIHUndHlwZXMnOiBbXFxuICAgICAgICAgICAgICB1J2NvdW50cnkn\nLFxcbiAgICAgICAgICAgICAgdSdwb2xpdGljYWwnXFxuICAgICAgICAgICAgXSxcXG4gICAgICAg\nICAgICB1J3Nob3J0X25hbWUnOiB1J1VTJ1xcbiAgICAgICAgICB9LFxcbiAgICAgICAgICB7XFxu\nICAgICAgICAgICAgdSdsb25nX25hbWUnOiB1JzAyMTQyJyxcXG4gICAgICAgICAgICB1J3R5cGVz\nJzogW1xcbiAgICAgICAgICAgICAgdSdwb3N0YWxfY29kZSdcXG4gICAgICAgICAgICBdLFxcbiAg\nICAgICAgICAgIHUnc2hvcnRfbmFtZSc6IHUnMDIxNDInXFxuICAgICAgICAgIH1cXG4gICAgICAg\nIF0sXFxuICAgICAgICB1J3BsYWNlX2lkJzogdSdDaElKd3g1clNydHc0NGtSakRGZWo1VGNmbXMn\nLFxcbiAgICAgICAgdSdmb3JtYXR0ZWRfYWRkcmVzcyc6IHUnMTI1IEJpbm5leSBTdCwgQ2FtYnJp\nZGdlLCBNQSAwMjE0MiwgVVNBJyxcXG4gICAgICAgIHUndHlwZXMnOiBbXFxuICAgICAgICAgIHUn\ncHJlbWlzZSdcXG4gICAgICAgIF1cXG4gICAgICB9LFxcbiAgICAgIHtcXG4gICAgICAgIHUnZ2Vv\nbWV0cnknOiB7XFxuICAgICAgICAgIHUnbG9jYXRpb24nOiB7XFxuICAgICAgICAgICAgdSdsYXQn\nOiA0Mi4zNjU0NDI0LFxcbiAgICAgICAgICAgIHUnbG5nJzogLTcxLjA3OTk0ODZcXG4gICAgICAg\nICAgfSxcXG4gICAgICAgICAgdSd2aWV3cG9ydCc6IHtcXG4gICAgICAgICAgICB1J25vcnRoZWFz\ndCc6IHtcXG4gICAgICAgICAgICAgIHUnbGF0JzogNDIuMzY2NzkxMzgwMjkxNSxcXG4gICAgICAg\nICAgICAgIHUnbG5nJzogLTcxLjA3ODU5OTYxOTcwODQ4XFxuICAgICAgICAgICAgfSxcXG4gICAg\nICAgICAgICB1J3NvdXRod2VzdCc6IHtcXG4gICAgICAgICAgICAgIHUnbGF0JzogNDIuMzY0MDkz\nNDE5NzA4NSxcXG4gICAgICAgICAgICAgIHUnbG5nJzogLTcxLjA4MTI5NzU4MDI5MTQ5XFxuICAg\nICAgICAgICAgfVxcbiAgICAgICAgICB9LFxcbiAgICAgICAgICB1J2xvY2F0aW9uX3R5cGUnOiB1\nJ0dFT01FVFJJQ19DRU5URVInXFxuICAgICAgICB9LFxcbiAgICAgICAgdSdwbGFjZV9pZCc6IHUn\nQ2hJSkw3NlVyS1J3NDRrUk1PSTFGWW8tZ2ZBJyxcXG4gICAgICAgIHUncGx1c19jb2RlJzoge1xc\nbiAgICAgICAgICB1J2dsb2JhbF9jb2RlJzogdSc4N0pDOVc4Qys1MicsXFxuICAgICAgICAgIHUn\nY29tcG91bmRfY29kZSc6IHUnOVc4Qys1MiBDYW1icmlkZ2UsIE1hc3NhY2h1c2V0dHMsIFVuaXRl\nZCBTdGF0ZXMnXFxuICAgICAgICB9LFxcbiAgICAgICAgdSdhZGRyZXNzX2NvbXBvbmVudHMnOiBb\nXFxuICAgICAgICAgIHtcXG4gICAgICAgICAgICB1J2xvbmdfbmFtZSc6IHUnQmlubmV5IFN0L1Nl\nY29uZCcsXFxuICAgICAgICAgICAgdSd0eXBlcyc6IFtcXG4gICAgICAgICAgICAgIHUnYnVzX3N0\nYXRpb24nLFxcbiAgICAgICAgICAgICAgdSdlc3RhYmxpc2htZW50JyxcXG4gICAgICAgICAgICAg\nIHUncG9pbnRfb2ZfaW50ZXJlc3QnLFxcbiAgICAgICAgICAgICAgdSd0cmFuc2l0X3N0YXRpb24n\nXFxuICAgICAgICAgICAgXSxcXG4gICAgICAgICAgICB1J3Nob3J0X25hbWUnOiB1J0Jpbm5leSBT\ndC9TZWNvbmQnXFxuICAgICAgICAgIH0sXFxuICAgICAgICAgIHtcXG4gICAgICAgICAgICB1J2xv\nbmdfbmFtZSc6IHUnVW5pdGVkIFN0YXRlcycsXFxuICAgICAgICAgICAgdSd0eXBlcyc6IFtcXG4g\nICAgICAgICAgICAgIHUnY291bnRyeScsXFxuICAgICAgICAgICAgICB1J3BvbGl0aWNhbCdcXG4g\nICAgICAgICAgICBdLFxcbiAgICAgICAgICAgIHUnc2hvcnRfbmFtZSc6IHUnVVMnXFxuICAgICAg\nICAgIH1cXG4gICAgICAgIF0sXFxuICAgICAgICB1J2Zvcm1hdHRlZF9hZGRyZXNzJzogdSdCaW5u\nZXkgU3QvU2Vjb25kLCBVbml0ZWQgU3RhdGVzJyxcXG4gICAgICAgIHUndHlwZXMnOiBbXFxuICAg\nICAgICAgIHUnYnVzX3N0YXRpb24nLFxcbiAgICAgICAgICB1J2VzdGFibGlzaG1lbnQnLFxcbiAg\nICAgICAgICB1J3BvaW50X29mX2ludGVyZXN0JyxcXG4gICAgICAgICAgdSd0cmFuc2l0X3N0YXRp\nb24nXFxuICAgICAgICBdXFxuICAgICAgfSxcXG4gICAgICB7XFxuICAgICAgICB1J2dlb21ldHJ5\nJzoge1xcbiAgICAgICAgICB1J2xvY2F0aW9uJzoge1xcbiAgICAgICAgICAgIHUnbGF0JzogNDIu\nMzY1NTAwNyxcXG4gICAgICAgICAgICB1J2xuZyc6IC03MS4wODA2MTAyXFxuICAgICAgICAgIH0s\nXFxuICAgICAgICAgIHUndmlld3BvcnQnOiB7XFxuICAgICAgICAgICAgdSdub3J0aGVhc3QnOiB7\nXFxuICAgICAgICAgICAgICB1J2xhdCc6IDQyLjM2Njg0OTY4MDI5MTUsXFxuICAgICAgICAgICAg\nICB1J2xuZyc6IC03MS4wNzkyNjEyMTk3MDg0OFxcbiAgICAgICAgICAgIH0sXFxuICAgICAgICAg\nICAgdSdzb3V0aHdlc3QnOiB7XFxuICAgICAgICAgICAgICB1J2xhdCc6IDQyLjM2NDE1MTcxOTcw\nODUsXFxuICAgICAgICAgICAgICB1J2xuZyc6IC03MS4wODE5NTkxODAyOTE1XFxuICAgICAgICAg\nICAgfVxcbiAgICAgICAgICB9LFxcbiAgICAgICAgICB1J2xvY2F0aW9uX3R5cGUnOiB1J1JBTkdF\nX0lOVEVSUE9MQVRFRCdcXG4gICAgICAgIH0sXFxuICAgICAgICB1J2FkZHJlc3NfY29tcG9uZW50\ncyc6IFtcXG4gICAgICAgICAge1xcbiAgICAgICAgICAgIHUnbG9uZ19uYW1lJzogdSc3NScsXFxu\nICAgICAgICAgICAgdSd0eXBlcyc6IFtcXG4gICAgICAgICAgICAgIHUnc3RyZWV0X251bWJlcidc\nXG4gICAgICAgICAgICBdLFxcbiAgICAgICAgICAgIHUnc2hvcnRfbmFtZSc6IHUnNzUnXFxuICAg\nICAgICAgIH0sXFxuICAgICAgICAgIHtcXG4gICAgICAgICAgICB1J2xvbmdfbmFtZSc6IHUnQmlu\nbmV5IFN0cmVldCcsXFxuICAgICAgICAgICAgdSd0eXBlcyc6IFtcXG4gICAgICAgICAgICAgIHUn\ncm91dGUnXFxuICAgICAgICAgICAgXSxcXG4gICAgICAgICAgICB1J3Nob3J0X25hbWUnOiB1J0Jp\nbm5leSBTdCdcXG4gICAgICAgICAgfSxcXG4gICAgICAgICAge1xcbiAgICAgICAgICAgIHUnbG9u\nZ19uYW1lJzogdSdFYXN0IENhbWJyaWRnZScsXFxuICAgICAgICAgICAgdSd0eXBlcyc6IFtcXG4g\nICAgICAgICAgICAgIHUnbmVpZ2hib3Job29kJyxcXG4gICAgICAgICAgICAgIHUncG9saXRpY2Fs\nJ1xcbiAgICAgICAgICAgIF0sXFxuICAgICAgICAgICAgdSdzaG9ydF9uYW1lJzogdSdFYXN0IENh\nbWJyaWRnZSdcXG4gICAgICAgICAgfSxcXG4gICAgICAgICAge1xcbiAgICAgICAgICAgIHUnbG9u\nZ19uYW1lJzogdSdDYW1icmlkZ2UnLFxcbiAgICAgICAgICAgIHUndHlwZXMnOiBbXFxuICAgICAg\nICAgICAgICB1J2xvY2FsaXR5JyxcXG4gICAgICAgICAgICAgIHUncG9saXRpY2FsJ1xcbiAgICAg\nICAgICAgIF0sXFxuICAgICAgICAgICAgdSdzaG9ydF9uYW1lJzogdSdDYW1icmlkZ2UnXFxuICAg\nICAgICAgIH0sXFxuICAgICAgICAgIHtcXG4gICAgICAgICAgICB1J2xvbmdfbmFtZSc6IHUnTWlk\nZGxlc2V4IENvdW50eScsXFxuICAgICAgICAgICAgdSd0eXBlcyc6IFtcXG4gICAgICAgICAgICAg\nIHUnYWRtaW5pc3RyYXRpdmVfYXJlYV9sZXZlbF8yJyxcXG4gICAgICAgICAgICAgIHUncG9saXRp\nY2FsJ1xcbiAgICAgICAgICAgIF0sXFxuICAgICAgICAgICAgdSdzaG9ydF9uYW1lJzogdSdNaWRk\nbGVzZXggQ291bnR5J1xcbiAgICAgICAgICB9LFxcbiAgICAgICAgICB7XFxuICAgICAgICAgICAg\ndSdsb25nX25hbWUnOiB1J01hc3NhY2h1c2V0dHMnLFxcbiAgICAgICAgICAgIHUndHlwZXMnOiBb\nXFxuICAgICAgICAgICAgICB1J2FkbWluaXN0cmF0aXZlX2FyZWFfbGV2ZWxfMScsXFxuICAgICAg\nICAgICAgICB1J3BvbGl0aWNhbCdcXG4gICAgICAgICAgICBdLFxcbiAgICAgICAgICAgIHUnc2hv\ncnRfbmFtZSc6IHUnTUEnXFxuICAgICAgICAgIH0sXFxuICAgICAgICAgIHtcXG4gICAgICAgICAg\nICB1J2xvbmdfbmFtZSc6IHUnVW5pdGVkIFN0YXRlcycsXFxuICAgICAgICAgICAgdSd0eXBlcyc6\nIFtcXG4gICAgICAgICAgICAgIHUnY291bnRyeScsXFxuICAgICAgICAgICAgICB1J3BvbGl0aWNh\nbCdcXG4gICAgICAgICAgICBdLFxcbiAgICAgICAgICAgIHUnc2hvcnRfbmFtZSc6IHUnVVMnXFxu\nICAgICAgICAgIH0sXFxuICAgICAgICAgIHtcXG4gICAgICAgICAgICB1J2xvbmdfbmFtZSc6IHUn\nMDIxNDInLFxcbiAgICAgICAgICAgIHUndHlwZXMnOiBbXFxuICAgICAgICAgICAgICB1J3Bvc3Rh\nbF9jb2RlJ1xcbiAgICAgICAgICAgIF0sXFxuICAgICAgICAgICAgdSdzaG9ydF9uYW1lJzogdScw\nMjE0MidcXG4gICAgICAgICAgfSxcXG4gICAgICAgICAge1xcbiAgICAgICAgICAgIHUnbG9uZ19u\nYW1lJzogdScxMTIzJyxcXG4gICAgICAgICAgICB1J3R5cGVzJzogW1xcbiAgICAgICAgICAgICAg\ndSdwb3N0YWxfY29kZV9zdWZmaXgnXFxuICAgICAgICAgICAgXSxcXG4gICAgICAgICAgICB1J3No\nb3J0X25hbWUnOiB1JzExMjMnXFxuICAgICAgICAgIH1cXG4gICAgICAgIF0sXFxuICAgICAgICB1\nJ3BsYWNlX2lkJzogdSdFaVkzTlNCQ2FXNXVaWGtnVTNRc0lFTmhiV0p5YVdSblpTd2dUVUVnTURJ\neE5ESXNJRlZUUVNJYUVoZ0tGQW9TQ1ZVdkNiT2tjT09KRWJiVHIxX2ZwT3JGRUVzJyxcXG4gICAg\nICAgIHUnZm9ybWF0dGVkX2FkZHJlc3MnOiB1Jzc1IEJpbm5leSBTdCwgQ2FtYnJpZGdlLCBNQSAw\nMjE0MiwgVVNBJyxcXG4gICAgICAgIHUndHlwZXMnOiBbXFxuICAgICAgICAgIHUnc3RyZWV0X2Fk\nZHJlc3MnXFxuICAgICAgICBdXFxuICAgICAgfSxcXG4gICAgICB7XFxuICAgICAgICB1J2dlb21l\ndHJ5Jzoge1xcbiAgICAgICAgICB1J2xvY2F0aW9uX3R5cGUnOiB1J0FQUFJPWElNQVRFJyxcXG4g\nICAgICAgICAgdSdib3VuZHMnOiB7XFxuICAgICAgICAgICAgdSdub3J0aGVhc3QnOiB7XFxuICAg\nICAgICAgICAgICB1J2xhdCc6IDQyLjM2ODcxODAwMDAwMDAxLFxcbiAgICAgICAgICAgICAgdSds\nbmcnOiAtNzEuMDcxNjI3OVxcbiAgICAgICAgICAgIH0sXFxuICAgICAgICAgICAgdSdzb3V0aHdl\nc3QnOiB7XFxuICAgICAgICAgICAgICB1J2xhdCc6IDQyLjM1NDIzMzEsXFxuICAgICAgICAgICAg\nICB1J2xuZyc6IC03MS4wOTQ4NTE5XFxuICAgICAgICAgICAgfVxcbiAgICAgICAgICB9LFxcbiAg\nICAgICAgICB1J3ZpZXdwb3J0Jzoge1xcbiAgICAgICAgICAgIHUnbm9ydGhlYXN0Jzoge1xcbiAg\nICAgICAgICAgICAgdSdsYXQnOiA0Mi4zNjg3MTgwMDAwMDAwMSxcXG4gICAgICAgICAgICAgIHUn\nbG5nJzogLTcxLjA3MTYyNzlcXG4gICAgICAgICAgICB9LFxcbiAgICAgICAgICAgIHUnc291dGh3\nZXN0Jzoge1xcbiAgICAgICAgICAgICAgdSdsYXQnOiA0Mi4zNTQyMzMxLFxcbiAgICAgICAgICAg\nICAgdSdsbmcnOiAtNzEuMDk0ODUxOVxcbiAgICAgICAgICAgIH1cXG4gICAgICAgICAgfSxcXG4g\nICAgICAgICAgdSdsb2NhdGlvbic6IHtcXG4gICAgICAgICAgICB1J2xhdCc6IDQyLjM2MzU4NDcs\nXFxuICAgICAgICAgICAgdSdsbmcnOiAtNzEuMDgyNDYxMjk5OTk5OTlcXG4gICAgICAgICAgfVxc\nbiAgICAgICAgfSxcXG4gICAgICAgIHUnYWRkcmVzc19jb21wb25lbnRzJzogW1xcbiAgICAgICAg\nICB7XFxuICAgICAgICAgICAgdSdsb25nX25hbWUnOiB1JzAyMTQyJyxcXG4gICAgICAgICAgICB1\nJ3R5cGVzJzogW1xcbiAgICAgICAgICAgICAgdSdwb3N0YWxfY29kZSdcXG4gICAgICAgICAgICBd\nLFxcbiAgICAgICAgICAgIHUnc2hvcnRfbmFtZSc6IHUnMDIxNDInXFxuICAgICAgICAgIH0sXFxu\nICAgICAgICAgIHtcXG4gICAgICAgICAgICB1J2xvbmdfbmFtZSc6IHUnQ2FtYnJpZGdlJyxcXG4g\nICAgICAgICAgICB1J3R5cGVzJzogW1xcbiAgICAgICAgICAgICAgdSdsb2NhbGl0eScsXFxuICAg\nICAgICAgICAgICB1J3BvbGl0aWNhbCdcXG4gICAgICAgICAgICBdLFxcbiAgICAgICAgICAgIHUn\nc2hvcnRfbmFtZSc6IHUnQ2FtYnJpZGdlJ1xcbiAgICAgICAgICB9LFxcbiAgICAgICAgICB7XFxu\nICAgICAgICAgICAgdSdsb25nX25hbWUnOiB1J01pZGRsZXNleCBDb3VudHknLFxcbiAgICAgICAg\nICAgIHUndHlwZXMnOiBbXFxuICAgICAgICAgICAgICB1J2FkbWluaXN0cmF0aXZlX2FyZWFfbGV2\nZWxfMicsXFxuICAgICAgICAgICAgICB1J3BvbGl0aWNhbCdcXG4gICAgICAgICAgICBdLFxcbiAg\nICAgICAgICAgIHUnc2hvcnRfbmFtZSc6IHUnTWlkZGxlc2V4IENvdW50eSdcXG4gICAgICAgICAg\nfSxcXG4gICAgICAgICAge1xcbiAgICAgICAgICAgIHUnbG9uZ19uYW1lJzogdSdNYXNzYWNodXNl\ndHRzJyxcXG4gICAgICAgICAgICB1J3R5cGVzJzogW1xcbiAgICAgICAgICAgICAgdSdhZG1pbmlz\ndHJhdGl2ZV9hcmVhX2xldmVsXzEnLFxcbiAgICAgICAgICAgICAgdSdwb2xpdGljYWwnXFxuICAg\nICAgICAgICAgXSxcXG4gICAgICAgICAgICB1J3Nob3J0X25hbWUnOiB1J01BJ1xcbiAgICAgICAg\nICB9LFxcbiAgICAgICAgICB7XFxuICAgICAgICAgICAgdSdsb25nX25hbWUnOiB1J1VuaXRlZCBT\ndGF0ZXMnLFxcbiAgICAgICAgICAgIHUndHlwZXMnOiBbXFxuICAgICAgICAgICAgICB1J2NvdW50\ncnknLFxcbiAgICAgICAgICAgICAgdSdwb2xpdGljYWwnXFxuICAgICAgICAgICAgXSxcXG4gICAg\nICAgICAgICB1J3Nob3J0X25hbWUnOiB1J1VTJ1xcbiAgICAgICAgICB9XFxuICAgICAgICBdLFxc\nbiAgICAgICAgdSdwbGFjZV9pZCc6IHUnQ2hJSkp3N1ZzcVZ3NDRrUkh4U09qWkpEWXU0JyxcXG4g\nICAgICAgIHUnZm9ybWF0dGVkX2FkZHJlc3MnOiB1J0NhbWJyaWRnZSwgTUEgMDIxNDIsIFVTQScs\nXFxuICAgICAgICB1J3R5cGVzJzogW1xcbiAgICAgICAgICB1J3Bvc3RhbF9jb2RlJ1xcbiAgICAg\nICAgXVxcbiAgICAgIH0sXFxuICAgICAge1xcbiAgICAgICAgdSdnZW9tZXRyeSc6IHtcXG4gICAg\nICAgICAgdSdsb2NhdGlvbl90eXBlJzogdSdBUFBST1hJTUFURScsXFxuICAgICAgICAgIHUnYm91\nbmRzJzoge1xcbiAgICAgICAgICAgIHUnbm9ydGhlYXN0Jzoge1xcbiAgICAgICAgICAgICAgdSds\nYXQnOiA0Mi4zNzUyOTQxLFxcbiAgICAgICAgICAgICAgdSdsbmcnOiAtNzEuMDY0NTI4Nzk5OTk5\nOTlcXG4gICAgICAgICAgICB9LFxcbiAgICAgICAgICAgIHUnc291dGh3ZXN0Jzoge1xcbiAgICAg\nICAgICAgICAgdSdsYXQnOiA0Mi4zNjE1OTcsXFxuICAgICAgICAgICAgICB1J2xuZyc6IC03MS4w\nODk3MjU1XFxuICAgICAgICAgICAgfVxcbiAgICAgICAgICB9LFxcbiAgICAgICAgICB1J3ZpZXdw\nb3J0Jzoge1xcbiAgICAgICAgICAgIHUnbm9ydGhlYXN0Jzoge1xcbiAgICAgICAgICAgICAgdSds\nYXQnOiA0Mi4zNzUyOTQxLFxcbiAgICAgICAgICAgICAgdSdsbmcnOiAtNzEuMDY0NTI4Nzk5OTk5\nOTlcXG4gICAgICAgICAgICB9LFxcbiAgICAgICAgICAgIHUnc291dGh3ZXN0Jzoge1xcbiAgICAg\nICAgICAgICAgdSdsYXQnOiA0Mi4zNjE1OTcsXFxuICAgICAgICAgICAgICB1J2xuZyc6IC03MS4w\nODk3MjU1XFxuICAgICAgICAgICAgfVxcbiAgICAgICAgICB9LFxcbiAgICAgICAgICB1J2xvY2F0\naW9uJzoge1xcbiAgICAgICAgICAgIHUnbGF0JzogNDIuMzY4MzM1MyxcXG4gICAgICAgICAgICB1\nJ2xuZyc6IC03MS4wODI0NjEyOTk5OTk5OVxcbiAgICAgICAgICB9XFxuICAgICAgICB9LFxcbiAg\nICAgICAgdSdhZGRyZXNzX2NvbXBvbmVudHMnOiBbXFxuICAgICAgICAgIHtcXG4gICAgICAgICAg\nICB1J2xvbmdfbmFtZSc6IHUnRWFzdCBDYW1icmlkZ2UnLFxcbiAgICAgICAgICAgIHUndHlwZXMn\nOiBbXFxuICAgICAgICAgICAgICB1J25laWdoYm9yaG9vZCcsXFxuICAgICAgICAgICAgICB1J3Bv\nbGl0aWNhbCdcXG4gICAgICAgICAgICBdLFxcbiAgICAgICAgICAgIHUnc2hvcnRfbmFtZSc6IHUn\nRWFzdCBDYW1icmlkZ2UnXFxuICAgICAgICAgIH0sXFxuICAgICAgICAgIHtcXG4gICAgICAgICAg\nICB1J2xvbmdfbmFtZSc6IHUnQ2FtYnJpZGdlJyxcXG4gICAgICAgICAgICB1J3R5cGVzJzogW1xc\nbiAgICAgICAgICAgICAgdSdsb2NhbGl0eScsXFxuICAgICAgICAgICAgICB1J3BvbGl0aWNhbCdc\nXG4gICAgICAgICAgICBdLFxcbiAgICAgICAgICAgIHUnc2hvcnRfbmFtZSc6IHUnQ2FtYnJpZGdl\nJ1xcbiAgICAgICAgICB9LFxcbiAgICAgICAgICB7XFxuICAgICAgICAgICAgdSdsb25nX25hbWUn\nOiB1J01pZGRsZXNleCBDb3VudHknLFxcbiAgICAgICAgICAgIHUndHlwZXMnOiBbXFxuICAgICAg\nICAgICAgICB1J2FkbWluaXN0cmF0aXZlX2FyZWFfbGV2ZWxfMicsXFxuICAgICAgICAgICAgICB1\nJ3BvbGl0aWNhbCdcXG4gICAgICAgICAgICBdLFxcbiAgICAgICAgICAgIHUnc2hvcnRfbmFtZSc6\nIHUnTWlkZGxlc2V4IENvdW50eSdcXG4gICAgICAgICAgfSxcXG4gICAgICAgICAge1xcbiAgICAg\nICAgICAgIHUnbG9uZ19uYW1lJzogdSdNYXNzYWNodXNldHRzJyxcXG4gICAgICAgICAgICB1J3R5\ncGVzJzogW1xcbiAgICAgICAgICAgICAgdSdhZG1pbmlzdHJhdGl2ZV9hcmVhX2xldmVsXzEnLFxc\nbiAgICAgICAgICAgICAgdSdwb2xpdGljYWwnXFxuICAgICAgICAgICAgXSxcXG4gICAgICAgICAg\nICB1J3Nob3J0X25hbWUnOiB1J01BJ1xcbiAgICAgICAgICB9LFxcbiAgICAgICAgICB7XFxuICAg\nICAgICAgICAgdSdsb25nX25hbWUnOiB1J1VuaXRlZCBTdGF0ZXMnLFxcbiAgICAgICAgICAgIHUn\ndHlwZXMnOiBbXFxuICAgICAgICAgICAgICB1J2NvdW50cnknLFxcbiAgICAgICAgICAgICAgdSdw\nb2xpdGljYWwnXFxuICAgICAgICAgICAgXSxcXG4gICAgICAgICAgICB1J3Nob3J0X25hbWUnOiB1\nJ1VTJ1xcbiAgICAgICAgICB9XFxuICAgICAgICBdLFxcbiAgICAgICAgdSdwbGFjZV9pZCc6IHUn\nQ2hJSlgwXzUzN3R3NDRrUlFfcGxNaVk0T2hvJyxcXG4gICAgICAgIHUnZm9ybWF0dGVkX2FkZHJl\nc3MnOiB1J0Vhc3QgQ2FtYnJpZGdlLCBDYW1icmlkZ2UsIE1BLCBVU0EnLFxcbiAgICAgICAgdSd0\neXBlcyc6IFtcXG4gICAgICAgICAgdSduZWlnaGJvcmhvb2QnLFxcbiAgICAgICAgICB1J3BvbGl0\naWNhbCdcXG4gICAgICAgIF1cXG4gICAgICB9LFxcbiAgICAgIHtcXG4gICAgICAgIHUnZ2VvbWV0\ncnknOiB7XFxuICAgICAgICAgIHUnbG9jYXRpb25fdHlwZSc6IHUnQVBQUk9YSU1BVEUnLFxcbiAg\nICAgICAgICB1J2JvdW5kcyc6IHtcXG4gICAgICAgICAgICB1J25vcnRoZWFzdCc6IHtcXG4gICAg\nICAgICAgICAgIHUnbGF0JzogNDIuNDA0MzgzOSxcXG4gICAgICAgICAgICAgIHUnbG5nJzogLTcx\nLjA2NTQ4NzE5OTk5OTk5XFxuICAgICAgICAgICAgfSxcXG4gICAgICAgICAgICB1J3NvdXRod2Vz\ndCc6IHtcXG4gICAgICAgICAgICAgIHUnbGF0JzogNDIuMzUyMzk2OCxcXG4gICAgICAgICAgICAg\nIHUnbG5nJzogLTcxLjE2MDMzMjA5OTk5OTk5XFxuICAgICAgICAgICAgfVxcbiAgICAgICAgICB9\nLFxcbiAgICAgICAgICB1J3ZpZXdwb3J0Jzoge1xcbiAgICAgICAgICAgIHUnbm9ydGhlYXN0Jzog\ne1xcbiAgICAgICAgICAgICAgdSdsYXQnOiA0Mi40MDQzODM5LFxcbiAgICAgICAgICAgICAgdSds\nbmcnOiAtNzEuMDY1NDg3MTk5OTk5OTlcXG4gICAgICAgICAgICB9LFxcbiAgICAgICAgICAgIHUn\nc291dGh3ZXN0Jzoge1xcbiAgICAgICAgICAgICAgdSdsYXQnOiA0Mi4zNTIzOTY4LFxcbiAgICAg\nICAgICAgICAgdSdsbmcnOiAtNzEuMTYwMzMyMDk5OTk5OTlcXG4gICAgICAgICAgICB9XFxuICAg\nICAgICAgIH0sXFxuICAgICAgICAgIHUnbG9jYXRpb24nOiB7XFxuICAgICAgICAgICAgdSdsYXQn\nOiA0Mi4zNzM2MTU4LFxcbiAgICAgICAgICAgIHUnbG5nJzogLTcxLjEwOTczMzQ5OTk5OTk5XFxu\nICAgICAgICAgIH1cXG4gICAgICAgIH0sXFxuICAgICAgICB1J2FkZHJlc3NfY29tcG9uZW50cyc6\nIFtcXG4gICAgICAgICAge1xcbiAgICAgICAgICAgIHUnbG9uZ19uYW1lJzogdSdDYW1icmlkZ2Un\nLFxcbiAgICAgICAgICAgIHUndHlwZXMnOiBbXFxuICAgICAgICAgICAgICB1J2xvY2FsaXR5Jyxc\nXG4gICAgICAgICAgICAgIHUncG9saXRpY2FsJ1xcbiAgICAgICAgICAgIF0sXFxuICAgICAgICAg\nICAgdSdzaG9ydF9uYW1lJzogdSdDYW1icmlkZ2UnXFxuICAgICAgICAgIH0sXFxuICAgICAgICAg\nIHtcXG4gICAgICAgICAgICB1J2xvbmdfbmFtZSc6IHUnTWlkZGxlc2V4IENvdW50eScsXFxuICAg\nICAgICAgICAgdSd0eXBlcyc6IFtcXG4gICAgICAgICAgICAgIHUnYWRtaW5pc3RyYXRpdmVfYXJl\nYV9sZXZlbF8yJyxcXG4gICAgICAgICAgICAgIHUncG9saXRpY2FsJ1xcbiAgICAgICAgICAgIF0s\nXFxuICAgICAgICAgICAgdSdzaG9ydF9uYW1lJzogdSdNaWRkbGVzZXggQ291bnR5J1xcbiAgICAg\nICAgICB9LFxcbiAgICAgICAgICB7XFxuICAgICAgICAgICAgdSdsb25nX25hbWUnOiB1J01hc3Nh\nY2h1c2V0dHMnLFxcbiAgICAgICAgICAgIHUndHlwZXMnOiBbXFxuICAgICAgICAgICAgICB1J2Fk\nbWluaXN0cmF0aXZlX2FyZWFfbGV2ZWxfMScsXFxuICAgICAgICAgICAgICB1J3BvbGl0aWNhbCdc\nXG4gICAgICAgICAgICBdLFxcbiAgICAgICAgICAgIHUnc2hvcnRfbmFtZSc6IHUnTUEnXFxuICAg\nICAgICAgIH0sXFxuICAgICAgICAgIHtcXG4gICAgICAgICAgICB1J2xvbmdfbmFtZSc6IHUnVW5p\ndGVkIFN0YXRlcycsXFxuICAgICAgICAgICAgdSd0eXBlcyc6IFtcXG4gICAgICAgICAgICAgIHUn\nY291bnRyeScsXFxuICAgICAgICAgICAgICB1J3BvbGl0aWNhbCdcXG4gICAgICAgICAgICBdLFxc\nbiAgICAgICAgICAgIHUnc2hvcnRfbmFtZSc6IHUnVVMnXFxuICAgICAgICAgIH1cXG4gICAgICAg\nIF0sXFxuICAgICAgICB1J3BsYWNlX2lkJzogdSdDaElKWDh3d3k2Vnc0NGtSaDJ4b2lXU09Pc1Un\nLFxcbiAgICAgICAgdSdmb3JtYXR0ZWRfYWRkcmVzcyc6IHUnQ2FtYnJpZGdlLCBNQSwgVVNBJyxc\nXG4gICAgICAgIHUndHlwZXMnOiBbXFxuICAgICAgICAgIHUnbG9jYWxpdHknLFxcbiAgICAgICAg\nICB1J3BvbGl0aWNhbCdcXG4gICAgICAgIF1cXG4gICAgICB9LFxcbiAgICAgIHtcXG4gICAgICAg\nIHUnZ2VvbWV0cnknOiB7XFxuICAgICAgICAgIHUnbG9jYXRpb25fdHlwZSc6IHUnQVBQUk9YSU1B\nVEUnLFxcbiAgICAgICAgICB1J2JvdW5kcyc6IHtcXG4gICAgICAgICAgICB1J25vcnRoZWFzdCc6\nIHtcXG4gICAgICAgICAgICAgIHUnbGF0JzogNDIuNzM2MTAyLFxcbiAgICAgICAgICAgICAgdSds\nbmcnOiAtNzEuMDIwMDk1XFxuICAgICAgICAgICAgfSxcXG4gICAgICAgICAgICB1J3NvdXRod2Vz\ndCc6IHtcXG4gICAgICAgICAgICAgIHUnbGF0JzogNDIuMTU2NjksXFxuICAgICAgICAgICAgICB1\nJ2xuZyc6IC03MS44OTg3MTU5XFxuICAgICAgICAgICAgfVxcbiAgICAgICAgICB9LFxcbiAgICAg\nICAgICB1J3ZpZXdwb3J0Jzoge1xcbiAgICAgICAgICAgIHUnbm9ydGhlYXN0Jzoge1xcbiAgICAg\nICAgICAgICAgdSdsYXQnOiA0Mi43MzYxMDIsXFxuICAgICAgICAgICAgICB1J2xuZyc6IC03MS4w\nMjAwOTVcXG4gICAgICAgICAgICB9LFxcbiAgICAgICAgICAgIHUnc291dGh3ZXN0Jzoge1xcbiAg\nICAgICAgICAgICAgdSdsYXQnOiA0Mi4xNTY2OSxcXG4gICAgICAgICAgICAgIHUnbG5nJzogLTcx\nLjg5ODcxNTlcXG4gICAgICAgICAgICB9XFxuICAgICAgICAgIH0sXFxuICAgICAgICAgIHUnbG9j\nYXRpb24nOiB7XFxuICAgICAgICAgICAgdSdsYXQnOiA0Mi40NjcyMDYsXFxuICAgICAgICAgICAg\ndSdsbmcnOiAtNzEuMjg3NDIwOVxcbiAgICAgICAgICB9XFxuICAgICAgICB9LFxcbiAgICAgICAg\ndSdhZGRyZXNzX2NvbXBvbmVudHMnOiBbXFxuICAgICAgICAgIHtcXG4gICAgICAgICAgICB1J2xv\nbmdfbmFtZSc6IHUnTWlkZGxlc2V4IENvdW50eScsXFxuICAgICAgICAgICAgdSd0eXBlcyc6IFtc\nXG4gICAgICAgICAgICAgIHUnYWRtaW5pc3RyYXRpdmVfYXJlYV9sZXZlbF8yJyxcXG4gICAgICAg\nICAgICAgIHUncG9saXRpY2FsJ1xcbiAgICAgICAgICAgIF0sXFxuICAgICAgICAgICAgdSdzaG9y\ndF9uYW1lJzogdSdNaWRkbGVzZXggQ291bnR5J1xcbiAgICAgICAgICB9LFxcbiAgICAgICAgICB7\nXFxuICAgICAgICAgICAgdSdsb25nX25hbWUnOiB1J01hc3NhY2h1c2V0dHMnLFxcbiAgICAgICAg\nICAgIHUndHlwZXMnOiBbXFxuICAgICAgICAgICAgICB1J2FkbWluaXN0cmF0aXZlX2FyZWFfbGV2\nZWxfMScsXFxuICAgICAgICAgICAgICB1J3BvbGl0aWNhbCdcXG4gICAgICAgICAgICBdLFxcbiAg\nICAgICAgICAgIHUnc2hvcnRfbmFtZSc6IHUnTUEnXFxuICAgICAgICAgIH0sXFxuICAgICAgICAg\nIHtcXG4gICAgICAgICAgICB1J2xvbmdfbmFtZSc6IHUnVW5pdGVkIFN0YXRlcycsXFxuICAgICAg\nICAgICAgdSd0eXBlcyc6IFtcXG4gICAgICAgICAgICAgIHUnY291bnRyeScsXFxuICAgICAgICAg\nICAgICB1J3BvbGl0aWNhbCdcXG4gICAgICAgICAgICBdLFxcbiAgICAgICAgICAgIHUnc2hvcnRf\nbmFtZSc6IHUnVVMnXFxuICAgICAgICAgIH1cXG4gICAgICAgIF0sXFxuICAgICAgICB1J3BsYWNl\nX2lkJzogdSdDaElKbVF3dFFlNEw1SWtSQ0xEeWsxTVZjT0knLFxcbiAgICAgICAgdSdmb3JtYXR0\nZWRfYWRkcmVzcyc6IHUnTWlkZGxlc2V4IENvdW50eSwgTUEsIFVTQScsXFxuICAgICAgICB1J3R5\ncGVzJzogW1xcbiAgICAgICAgICB1J2FkbWluaXN0cmF0aXZlX2FyZWFfbGV2ZWxfMicsXFxuICAg\nICAgICAgIHUncG9saXRpY2FsJ1xcbiAgICAgICAgXVxcbiAgICAgIH0sXFxuICAgICAge1xcbiAg\nICAgICAgdSdnZW9tZXRyeSc6IHtcXG4gICAgICAgICAgdSdsb2NhdGlvbl90eXBlJzogdSdBUFBS\nT1hJTUFURScsXFxuICAgICAgICAgIHUnYm91bmRzJzoge1xcbiAgICAgICAgICAgIHUnbm9ydGhl\nYXN0Jzoge1xcbiAgICAgICAgICAgICAgdSdsYXQnOiA0Mi44ODY3OSxcXG4gICAgICAgICAgICAg\nIHUnbG5nJzogLTY5Ljg1ODg2MDk5OTk5OTk5XFxuICAgICAgICAgICAgfSxcXG4gICAgICAgICAg\nICB1J3NvdXRod2VzdCc6IHtcXG4gICAgICAgICAgICAgIHUnbGF0JzogNDEuMTg3MDUzMDAwMDAw\nMDEsXFxuICAgICAgICAgICAgICB1J2xuZyc6IC03My41MDgxNDE5XFxuICAgICAgICAgICAgfVxc\nbiAgICAgICAgICB9LFxcbiAgICAgICAgICB1J3ZpZXdwb3J0Jzoge1xcbiAgICAgICAgICAgIHUn\nbm9ydGhlYXN0Jzoge1xcbiAgICAgICAgICAgICAgdSdsYXQnOiA0Mi44ODY3OSxcXG4gICAgICAg\nICAgICAgIHUnbG5nJzogLTY5Ljg1ODg2MDk5OTk5OTk5XFxuICAgICAgICAgICAgfSxcXG4gICAg\nICAgICAgICB1J3NvdXRod2VzdCc6IHtcXG4gICAgICAgICAgICAgIHUnbGF0JzogNDEuMTg3MDUz\nMDAwMDAwMDEsXFxuICAgICAgICAgICAgICB1J2xuZyc6IC03My41MDgxNDE5XFxuICAgICAgICAg\nICAgfVxcbiAgICAgICAgICB9LFxcbiAgICAgICAgICB1J2xvY2F0aW9uJzoge1xcbiAgICAgICAg\nICAgIHUnbGF0JzogNDIuNDA3MjEwNyxcXG4gICAgICAgICAgICB1J2xuZyc6IC03MS4zODI0Mzc0\nXFxuICAgICAgICAgIH1cXG4gICAgICAgIH0sXFxuICAgICAgICB1J2FkZHJlc3NfY29tcG9uZW50\ncyc6IFtcXG4gICAgICAgICAge1xcbiAgICAgICAgICAgIHUnbG9uZ19uYW1lJzogdSdNYXNzYWNo\ndXNldHRzJyxcXG4gICAgICAgICAgICB1J3R5cGVzJzogW1xcbiAgICAgICAgICAgICAgdSdhZG1p\nbmlzdHJhdGl2ZV9hcmVhX2xldmVsXzEnLFxcbiAgICAgICAgICAgICAgdSdwb2xpdGljYWwnXFxu\nICAgICAgICAgICAgXSxcXG4gICAgICAgICAgICB1J3Nob3J0X25hbWUnOiB1J01BJ1xcbiAgICAg\nICAgICB9LFxcbiAgICAgICAgICB7XFxuICAgICAgICAgICAgdSdsb25nX25hbWUnOiB1J1VuaXRl\nZCBTdGF0ZXMnLFxcbiAgICAgICAgICAgIHUndHlwZXMnOiBbXFxuICAgICAgICAgICAgICB1J2Nv\ndW50cnknLFxcbiAgICAgICAgICAgICAgdSdwb2xpdGljYWwnXFxuICAgICAgICAgICAgXSxcXG4g\nICAgICAgICAgICB1J3Nob3J0X25hbWUnOiB1J1VTJ1xcbiAgICAgICAgICB9XFxuICAgICAgICBd\nLFxcbiAgICAgICAgdSdwbGFjZV9pZCc6IHUnQ2hJSl9iOXo2VzFsNDRrUkhBMkRWVGJReGtVJyxc\nXG4gICAgICAgIHUnZm9ybWF0dGVkX2FkZHJlc3MnOiB1J01hc3NhY2h1c2V0dHMsIFVTQScsXFxu\nICAgICAgICB1J3R5cGVzJzogW1xcbiAgICAgICAgICB1J2FkbWluaXN0cmF0aXZlX2FyZWFfbGV2\nZWxfMScsXFxuICAgICAgICAgIHUncG9saXRpY2FsJ1xcbiAgICAgICAgXVxcbiAgICAgIH0sXFxu\nICAgICAge1xcbiAgICAgICAgdSdnZW9tZXRyeSc6IHtcXG4gICAgICAgICAgdSdsb2NhdGlvbl90\neXBlJzogdSdBUFBST1hJTUFURScsXFxuICAgICAgICAgIHUnYm91bmRzJzoge1xcbiAgICAgICAg\nICAgIHUnbm9ydGhlYXN0Jzoge1xcbiAgICAgICAgICAgICAgdSdsYXQnOiA3MS41Mzg4MDAxLFxc\nbiAgICAgICAgICAgICAgdSdsbmcnOiAtNjYuODg1NDE3XFxuICAgICAgICAgICAgfSxcXG4gICAg\nICAgICAgICB1J3NvdXRod2VzdCc6IHtcXG4gICAgICAgICAgICAgIHUnbGF0JzogMTguNzc2Myxc\nXG4gICAgICAgICAgICAgIHUnbG5nJzogMTcwLjU5NTdcXG4gICAgICAgICAgICB9XFxuICAgICAg\nICAgIH0sXFxuICAgICAgICAgIHUndmlld3BvcnQnOiB7XFxuICAgICAgICAgICAgdSdub3J0aGVh\nc3QnOiB7XFxuICAgICAgICAgICAgICB1J2xhdCc6IDcxLjUzODgwMDEsXFxuICAgICAgICAgICAg\nICB1J2xuZyc6IC02Ni44ODU0MTdcXG4gICAgICAgICAgICB9LFxcbiAgICAgICAgICAgIHUnc291\ndGh3ZXN0Jzoge1xcbiAgICAgICAgICAgICAgdSdsYXQnOiAxOC43NzYzLFxcbiAgICAgICAgICAg\nICAgdSdsbmcnOiAxNzAuNTk1N1xcbiAgICAgICAgICAgIH1cXG4gICAgICAgICAgfSxcXG4gICAg\nICAgICAgdSdsb2NhdGlvbic6IHtcXG4gICAgICAgICAgICB1J2xhdCc6IDM3LjA5MDI0LFxcbiAg\nICAgICAgICAgIHUnbG5nJzogLTk1LjcxMjg5MVxcbiAgICAgICAgICB9XFxuICAgICAgICB9LFxc\nbiAgICAgICAgdSdhZGRyZXNzX2NvbXBvbmVudHMnOiBbXFxuICAgICAgICAgIHtcXG4gICAgICAg\nICAgICB1J2xvbmdfbmFtZSc6IHUnVW5pdGVkIFN0YXRlcycsXFxuICAgICAgICAgICAgdSd0eXBl\ncyc6IFtcXG4gICAgICAgICAgICAgIHUnY291bnRyeScsXFxuICAgICAgICAgICAgICB1J3BvbGl0\naWNhbCdcXG4gICAgICAgICAgICBdLFxcbiAgICAgICAgICAgIHUnc2hvcnRfbmFtZSc6IHUnVVMn\nXFxuICAgICAgICAgIH1cXG4gICAgICAgIF0sXFxuICAgICAgICB1J3BsYWNlX2lkJzogdSdDaElK\nQ3pZeTVJUzE2bFFSUXJmZVE1SzVPeHcnLFxcbiAgICAgICAgdSdmb3JtYXR0ZWRfYWRkcmVzcyc6\nIHUnVW5pdGVkIFN0YXRlcycsXFxuICAgICAgICB1J3R5cGVzJzogW1xcbiAgICAgICAgICB1J2Nv\ndW50cnknLFxcbiAgICAgICAgICB1J3BvbGl0aWNhbCdcXG4gICAgICAgIF1cXG4gICAgICB9XFxu\nICAgIF1cXG4gIH1cXG59XFxuICAgICcnJ1wiLFwicHJlX3Byb2Nlc3Npbmdfc2NyaXB0XCI6XCJp\nbnB1dHMuZ2VvY29kaW5nX2RhdGEgPSBhcnRpZmFjdC52YWx1ZVwifTwvcmVzaWxpZW50OmZ1bmN0\naW9uPjwvZXh0ZW5zaW9uRWxlbWVudHM+PGluY29taW5nPlNlcXVlbmNlRmxvd18wOHR6Ym53PC9p\nbmNvbWluZz48b3V0Z29pbmc+U2VxdWVuY2VGbG93XzByMGphMnA8L291dGdvaW5nPjwvc2Vydmlj\nZVRhc2s+PHNlcXVlbmNlRmxvdyBpZD1cIlNlcXVlbmNlRmxvd18wOHR6Ym53XCIgc291cmNlUmVm\nPVwiU3RhcnRFdmVudF8xNTVhc3htXCIgdGFyZ2V0UmVmPVwiU2VydmljZVRhc2tfMWdsbmw4M1wi\nLz48ZW5kRXZlbnQgaWQ9XCJFbmRFdmVudF8wc2M4Mm1tXCI+PGluY29taW5nPlNlcXVlbmNlRmxv\nd18wcjBqYTJwPC9pbmNvbWluZz48L2VuZEV2ZW50PjxzZXF1ZW5jZUZsb3cgaWQ9XCJTZXF1ZW5j\nZUZsb3dfMHIwamEycFwiIHNvdXJjZVJlZj1cIlNlcnZpY2VUYXNrXzFnbG5sODNcIiB0YXJnZXRS\nZWY9XCJFbmRFdmVudF8wc2M4Mm1tXCIvPjx0ZXh0QW5ub3RhdGlvbiBpZD1cIlRleHRBbm5vdGF0\naW9uXzFreHhpeXRcIj48dGV4dD5TdGFydCB5b3VyIHdvcmtmbG93IGhlcmU8L3RleHQ+PC90ZXh0\nQW5ub3RhdGlvbj48YXNzb2NpYXRpb24gaWQ9XCJBc3NvY2lhdGlvbl8xc2V1ajQ4XCIgc291cmNl\nUmVmPVwiU3RhcnRFdmVudF8xNTVhc3htXCIgdGFyZ2V0UmVmPVwiVGV4dEFubm90YXRpb25fMWt4\neGl5dFwiLz48dGV4dEFubm90YXRpb24gaWQ9XCJUZXh0QW5ub3RhdGlvbl8waXowd28wXCI+PHRl\neHQ+Y3JlYXRlcyBhIG5vdGUgd2l0aCByZXN1bHRzPC90ZXh0PjwvdGV4dEFubm90YXRpb24+PGFz\nc29jaWF0aW9uIGlkPVwiQXNzb2NpYXRpb25fMXBxdDR1NFwiIHNvdXJjZVJlZj1cIlNlcnZpY2VU\nYXNrXzFnbG5sODNcIiB0YXJnZXRSZWY9XCJUZXh0QW5ub3RhdGlvbl8waXowd28wXCIvPjx0ZXh0\nQW5ub3RhdGlvbiBpZD1cIlRleHRBbm5vdGF0aW9uXzA3cnM5eHBcIj48dGV4dD5JbnB1dCBsYXRp\ndHVkZS9sb25naXR1ZGU8L3RleHQ+PC90ZXh0QW5ub3RhdGlvbj48YXNzb2NpYXRpb24gaWQ9XCJB\nc3NvY2lhdGlvbl8xY3k0ZHlwXCIgc291cmNlUmVmPVwiU2VydmljZVRhc2tfMWdsbmw4M1wiIHRh\ncmdldFJlZj1cIlRleHRBbm5vdGF0aW9uXzA3cnM5eHBcIi8+PC9wcm9jZXNzPjxicG1uZGk6QlBN\nTkRpYWdyYW0gaWQ9XCJCUE1ORGlhZ3JhbV8xXCI+PGJwbW5kaTpCUE1OUGxhbmUgYnBtbkVsZW1l\nbnQ9XCJ1bmRlZmluZWRcIiBpZD1cIkJQTU5QbGFuZV8xXCI+PGJwbW5kaTpCUE1OU2hhcGUgYnBt\nbkVsZW1lbnQ9XCJTdGFydEV2ZW50XzE1NWFzeG1cIiBpZD1cIlN0YXJ0RXZlbnRfMTU1YXN4bV9k\naVwiPjxvbWdkYzpCb3VuZHMgaGVpZ2h0PVwiMzZcIiB3aWR0aD1cIjM2XCIgeD1cIjE2MlwiIHk9\nXCIxODhcIi8+PGJwbW5kaTpCUE1OTGFiZWw+PG9tZ2RjOkJvdW5kcyBoZWlnaHQ9XCIwXCIgd2lk\ndGg9XCI5MFwiIHg9XCIxNTdcIiB5PVwiMjIzXCIvPjwvYnBtbmRpOkJQTU5MYWJlbD48L2JwbW5k\naTpCUE1OU2hhcGU+PGJwbW5kaTpCUE1OU2hhcGUgYnBtbkVsZW1lbnQ9XCJUZXh0QW5ub3RhdGlv\nbl8xa3h4aXl0XCIgaWQ9XCJUZXh0QW5ub3RhdGlvbl8xa3h4aXl0X2RpXCI+PG9tZ2RjOkJvdW5k\ncyBoZWlnaHQ9XCIzMFwiIHdpZHRoPVwiMTAwXCIgeD1cIjk5XCIgeT1cIjI1NFwiLz48L2JwbW5k\naTpCUE1OU2hhcGU+PGJwbW5kaTpCUE1ORWRnZSBicG1uRWxlbWVudD1cIkFzc29jaWF0aW9uXzFz\nZXVqNDhcIiBpZD1cIkFzc29jaWF0aW9uXzFzZXVqNDhfZGlcIj48b21nZGk6d2F5cG9pbnQgeD1c\nIjE2OVwiIHhzaTp0eXBlPVwib21nZGM6UG9pbnRcIiB5PVwiMjIwXCIvPjxvbWdkaTp3YXlwb2lu\ndCB4PVwiMTUzXCIgeHNpOnR5cGU9XCJvbWdkYzpQb2ludFwiIHk9XCIyNTRcIi8+PC9icG1uZGk6\nQlBNTkVkZ2U+PGJwbW5kaTpCUE1OU2hhcGUgYnBtbkVsZW1lbnQ9XCJTZXJ2aWNlVGFza18xZ2xu\nbDgzXCIgaWQ9XCJTZXJ2aWNlVGFza18xZ2xubDgzX2RpXCI+PG9tZ2RjOkJvdW5kcyBoZWlnaHQ9\nXCI4MFwiIHdpZHRoPVwiMTAwXCIgeD1cIjI1MFwiIHk9XCIxNjZcIi8+PC9icG1uZGk6QlBNTlNo\nYXBlPjxicG1uZGk6QlBNTkVkZ2UgYnBtbkVsZW1lbnQ9XCJTZXF1ZW5jZUZsb3dfMDh0emJud1wi\nIGlkPVwiU2VxdWVuY2VGbG93XzA4dHpibndfZGlcIj48b21nZGk6d2F5cG9pbnQgeD1cIjE5OFwi\nIHhzaTp0eXBlPVwib21nZGM6UG9pbnRcIiB5PVwiMjA2XCIvPjxvbWdkaTp3YXlwb2ludCB4PVwi\nMjUwXCIgeHNpOnR5cGU9XCJvbWdkYzpQb2ludFwiIHk9XCIyMDZcIi8+PGJwbW5kaTpCUE1OTGFi\nZWw+PG9tZ2RjOkJvdW5kcyBoZWlnaHQ9XCIxM1wiIHdpZHRoPVwiMFwiIHg9XCIyMjRcIiB5PVwi\nMTg0XCIvPjwvYnBtbmRpOkJQTU5MYWJlbD48L2JwbW5kaTpCUE1ORWRnZT48YnBtbmRpOkJQTU5T\naGFwZSBicG1uRWxlbWVudD1cIkVuZEV2ZW50XzBzYzgybW1cIiBpZD1cIkVuZEV2ZW50XzBzYzgy\nbW1fZGlcIj48b21nZGM6Qm91bmRzIGhlaWdodD1cIjM2XCIgd2lkdGg9XCIzNlwiIHg9XCI0MjNc\nIiB5PVwiMTg4XCIvPjxicG1uZGk6QlBNTkxhYmVsPjxvbWdkYzpCb3VuZHMgaGVpZ2h0PVwiMTNc\nIiB3aWR0aD1cIjBcIiB4PVwiNDQxXCIgeT1cIjIyN1wiLz48L2JwbW5kaTpCUE1OTGFiZWw+PC9i\ncG1uZGk6QlBNTlNoYXBlPjxicG1uZGk6QlBNTkVkZ2UgYnBtbkVsZW1lbnQ9XCJTZXF1ZW5jZUZs\nb3dfMHIwamEycFwiIGlkPVwiU2VxdWVuY2VGbG93XzByMGphMnBfZGlcIj48b21nZGk6d2F5cG9p\nbnQgeD1cIjM1MFwiIHhzaTp0eXBlPVwib21nZGM6UG9pbnRcIiB5PVwiMjA2XCIvPjxvbWdkaTp3\nYXlwb2ludCB4PVwiNDIzXCIgeHNpOnR5cGU9XCJvbWdkYzpQb2ludFwiIHk9XCIyMDZcIi8+PGJw\nbW5kaTpCUE1OTGFiZWw+PG9tZ2RjOkJvdW5kcyBoZWlnaHQ9XCIxM1wiIHdpZHRoPVwiMFwiIHg9\nXCIzODYuNVwiIHk9XCIxODRcIi8+PC9icG1uZGk6QlBNTkxhYmVsPjwvYnBtbmRpOkJQTU5FZGdl\nPjxicG1uZGk6QlBNTlNoYXBlIGJwbW5FbGVtZW50PVwiVGV4dEFubm90YXRpb25fMGl6MHdvMFwi\nIGlkPVwiVGV4dEFubm90YXRpb25fMGl6MHdvMF9kaVwiPjxvbWdkYzpCb3VuZHMgaGVpZ2h0PVwi\nMzBcIiB3aWR0aD1cIjEwMFwiIHg9XCIzNTZcIiB5PVwiODFcIi8+PC9icG1uZGk6QlBNTlNoYXBl\nPjxicG1uZGk6QlBNTkVkZ2UgYnBtbkVsZW1lbnQ9XCJBc3NvY2lhdGlvbl8xcHF0NHU0XCIgaWQ9\nXCJBc3NvY2lhdGlvbl8xcHF0NHU0X2RpXCI+PG9tZ2RpOndheXBvaW50IHg9XCIzMzlcIiB4c2k6\ndHlwZT1cIm9tZ2RjOlBvaW50XCIgeT1cIjE2NlwiLz48b21nZGk6d2F5cG9pbnQgeD1cIjM5Mlwi\nIHhzaTp0eXBlPVwib21nZGM6UG9pbnRcIiB5PVwiMTExXCIvPjwvYnBtbmRpOkJQTU5FZGdlPjxi\ncG1uZGk6QlBNTlNoYXBlIGJwbW5FbGVtZW50PVwiVGV4dEFubm90YXRpb25fMDdyczl4cFwiIGlk\nPVwiVGV4dEFubm90YXRpb25fMDdyczl4cF9kaVwiPjxvbWdkYzpCb3VuZHMgaGVpZ2h0PVwiNDhc\nIiB3aWR0aD1cIjEzNFwiIHg9XCIxMzhcIiB5PVwiNzJcIi8+PC9icG1uZGk6QlBNTlNoYXBlPjxi\ncG1uZGk6QlBNTkVkZ2UgYnBtbkVsZW1lbnQ9XCJBc3NvY2lhdGlvbl8xY3k0ZHlwXCIgaWQ9XCJB\nc3NvY2lhdGlvbl8xY3k0ZHlwX2RpXCI+PG9tZ2RpOndheXBvaW50IHg9XCIyNjVcIiB4c2k6dHlw\nZT1cIm9tZ2RjOlBvaW50XCIgeT1cIjE2NlwiLz48b21nZGk6d2F5cG9pbnQgeD1cIjIyNlwiIHhz\naTp0eXBlPVwib21nZGM6UG9pbnRcIiB5PVwiMTIwXCIvPjwvYnBtbmRpOkJQTU5FZGdlPjwvYnBt\nbmRpOkJQTU5QbGFuZT48L2JwbW5kaTpCUE1ORGlhZ3JhbT48L2RlZmluaXRpb25zPiJ9LCAiY29u\ndGVudF92ZXJzaW9uIjogMSwgImNyZWF0b3JfaWQiOiAiYUBleGFtcGxlLmNvbSIsICJkZXNjcmlw\ndGlvbiI6ICJSZXR1cm4gYWRkcmVzcyBmcm9tIGxhdGl0dWRlL2xvbmdpdHVkZSBpbnB1dCIsICJl\neHBvcnRfa2V5IjogImV4YW1wbGVfZ2VvY29kaW5nX2dldF9hZGRyZXNzIiwgImxhc3RfbW9kaWZp\nZWRfYnkiOiAiYUBleGFtcGxlLmNvbSIsICJsYXN0X21vZGlmaWVkX3RpbWUiOiAxNjAwNjE1Njg1\nNTg5LCAibmFtZSI6ICJFeGFtcGxlOiBHZW9jb2RpbmcgR2V0IEFkZHJlc3MiLCAib2JqZWN0X3R5\ncGUiOiAiYXJ0aWZhY3QiLCAicHJvZ3JhbW1hdGljX25hbWUiOiAiZXhhbXBsZV9nZW9jb2Rpbmdf\nZ2V0X2FkZHJlc3MiLCAidGFncyI6IFtdLCAidXVpZCI6ICI4NjM0NGJkOC04ZWNjLTRjNjAtYjc5\nMC0zMTUxMDRkY2NkMjUiLCAid29ya2Zsb3dfaWQiOiA5NX1dLCAid29ya3NwYWNlcyI6IFtdfQ==\n\"\"\")", "title": "" }, { "docid": "22a6de565b7f950fe725794957498846", "score": "0.48302647", "text": "def convert(language='c'):", "title": "" }, { "docid": "93533628ab23979412e286c40dc02a01", "score": "0.48167065", "text": "def tamper(payload, **kwargs):\n result=[]\n result.append(re.sub(r\"[\\s+]\",'&#x09;',payload))\n result.append(re.sub(r\"[\\s+]\",'\\t',payload))\n result.append(re.sub(r\"[\\s+]\",'\\v',payload))\n\n# all string to comments(this out first brackets)\n if re.search('^\\w',payload):\n result.append('/*!'+payload+'*/')\n elif re.search('\\W',payload):\n i=0\n string=''\n while re.search('\\W',payload[i]):\n string=string+payload[i]\n i+=1\n string=string+'/*!'+payload[i:]+'*/'\n result.append(string)\n \n\n# all words to comments\n string=re.sub(r\"\\w*\",convert_this,str(payload))\n result.append(string)\n\n\n return result if payload else payload", "title": "" }, { "docid": "110df644fd97f4a58322df43e3f01ac0", "score": "0.48162887", "text": "def test_javascript_vocabulary():\n vocabulary = javascript.vocabulary\n LENGTH = 101 # includes <UNK>, <s>, </s>\n assert len(vocabulary) == LENGTH\n assert vocabulary.to_text(0) == vocabulary.unk_token\n assert vocabulary.to_text(1) == vocabulary.start_token\n assert vocabulary.to_text(2) == vocabulary.end_token", "title": "" }, { "docid": "5666f18c6d62a986fd7be1f9db44f054", "score": "0.48156017", "text": "def json_to_escpos(json_data):\n\n data = json.loads(json_data)\n data = json.loads(data['data']) ## XXX: This bug needs to be fixed in mKassa.\n\n escpos = []\n escpos.append(const.INIT)\n escpos.append(const.BOLD['true'])\n escpos.append(const.ALIGN['center'])\n escpos.append(const.FONT_SIZE['2x'])\n escpos.append(b'WonderLAN')\n escpos.append(const.LINE_BREAK)\n escpos.append(const.FONT_SIZE['1x'])\n escpos.append(const.BOLD['false'])\n escpos.append(str.encode(data['date']))\n escpos.append(const.LINE_BREAK)\n escpos.append(str.encode(data['method']))\n escpos.append(const.LINE_BREAK)\n escpos.append(const.LINE_BREAK)\n escpos.append(const.HORIZONTAL_RULE)\n escpos.append(const.LINE_BREAK)\n for e in products(data['products']):\n escpos.append(e)\n\n escpos.append(const.LINE_BREAK)\n escpos.append(const.HORIZONTAL_RULE)\n escpos.append(const.ALIGN['right'])\n escpos.append(str.encode(str(data['sum'])))\n escpos.append(b'kr')\n escpos.append(const.LINE_BREAK)\n escpos.append(const.LINE_BREAK)\n escpos.append(b'Org-nummer: 802460-7155')\n escpos.append(const.LINE_BREAK)\n escpos.append(const.LINE_BREAK)\n escpos.append(b\"Tack for ditt kop!\")\n escpos.append(const.LINE_BREAK)\n escpos.append(const.LINE_BREAK)\n escpos.append(const.LINE_BREAK)\n escpos.append(const.LINE_BREAK)\n escpos.append(const.CUT_PAPER)\n\n return escpos", "title": "" }, { "docid": "0e0482fa89a28cbb151ae091f5ca078a", "score": "0.4803234", "text": "def get_citations_js(self):\n\n def unquote(s):\n \"\"\"Change quites (\") to fancy quotes (“)\n Change new lines to '¤' symbol\n \"\"\"\n return s.replace('\"', \"“\").replace(\"\\n\", \"¤\")\n\n notes = []\n js = \"var citations = {};\\nvar sources = {};\\n\"\n js += \"var repositories = {};\\nvar notes = {};\\n\"\n for o in self.obj_catalog.values():\n if isinstance(o, Citation):\n page = unquote(o.page)\n js += f\"citations[{o.uniq_id}] = {{ \"\n js += f'confidence:\"{o.confidence}\", dates:\"{o.dates}\", '\n js += f'id:\"{o.id}\", note_ref:{o.note_ref}, '\n js += f'page:\"{page}\", source_id:{o.source_id}, uuid:\"{o.uuid}\" '\n js += \"};\\n\"\n notes.extend(o.note_ref)\n\n elif isinstance(o, SourceBl):\n sauthor = unquote(o.sauthor)\n spubinfo = unquote(o.spubinfo)\n stitle = unquote(o.stitle)\n js += f\"sources[{o.uniq_id}] = {{ \"\n js += f'id:\"{o.id}\", note_ref:{o.note_ref}, '\n js += f'repositories:{o.repositories}, sauthor:\"{sauthor}\", '\n js += f'spubinfo:\"{spubinfo}\", stitle:\"{stitle}\", '\n js += f'uuid:\"{o.uuid}\" '\n js += \"};\\n\"\n notes.extend(o.note_ref)\n\n elif isinstance(o, Repository):\n medium = translate(o.medium, \"medium\")\n atype = translate(o.type, \"rept\")\n js += f\"repositories[{o.uniq_id}] = {{ \"\n js += (\n f'uuid:\"{o.uuid}\", id:\"{o.id}\", type:\"{atype}\", rname:\"{o.rname}\", '\n )\n # Media type\n js += f'medium:\"{medium}\", notes:{o.notes}, sources:{o.sources}'\n js += \"};\\n\"\n notes.extend(o.notes)\n\n else:\n continue\n\n # Find referenced Notes; conversion to set removes duplicates\n for uniq_id in set(notes):\n o = self.obj_catalog[uniq_id]\n text = unquote(o.text)\n url = unquote(o.url)\n js += f\"notes[{o.uniq_id}] = {{ \"\n js += f'uuid:\"{o.uuid}\", id:\"{o.id}\", type:\"{o.type}\", '\n js += f'priv:\"{o.priv}\", text:\"{text}\", url:\"{url}\" '\n js += \"};\\n\"\n\n return js", "title": "" }, { "docid": "40a1ca6c76eb103fbf0b2c57eeab48de", "score": "0.47883597", "text": "def essen(self):", "title": "" }, { "docid": "40a1ca6c76eb103fbf0b2c57eeab48de", "score": "0.47883597", "text": "def essen(self):", "title": "" }, { "docid": "9ffff73dec3fbbffa11076d6038f6218", "score": "0.47873056", "text": "def counter2json(counter):", "title": "" }, { "docid": "0148de7d81c66dbd7d5b33218cf6e3db", "score": "0.47867513", "text": "def get_lhs(json):\n pass # PLACEHOLDER. FIRST PUT TEST CASES IN a2test.py, THEN WRITE THE BODY", "title": "" }, { "docid": "d7816de778fa4179b54be557536e691c", "score": "0.4781393", "text": "def get_json_string(self):\n\t\tshomate_dict = self.__dict__\n\t\tfor key, phase in self.phases.items():\n\t\t\tphase_dict = phase.__dict__\n\t\t\tphase_dict['a'] = phase.a.tolist()\n\t\t\tshomate_dict['phases'][key] = phase_dict\n\t\treturn json.dumps(shomate_dict)", "title": "" }, { "docid": "31afa117baab91b38cb7db8a1dfe1a87", "score": "0.47766036", "text": "def format_jq_security_code(code) -> str:\r\n\r\n code = str(code)\r\n\r\n # eg: 600511 300807\r\n pure_num_pattern = re.compile(r'(\\d+)')\r\n\r\n # eg: 600511.sh 300807.sz\r\n two_bit_suffix_pattern = re.compile(r'(\\d{6}).(sh|sz)')\r\n # eg sh600511 sz300807\r\n two_bit_prefix_pattern = re.compile(r'(sh|sz)(\\d{6})')\r\n\r\n num_mat = pure_num_pattern.match(code)\r\n tbs_mat = two_bit_suffix_pattern.match(code)\r\n tbp_mat = two_bit_prefix_pattern.match(code)\r\n\r\n num_code = None\r\n if num_mat:\r\n num_code = num_mat.group(0)\r\n elif tbs_mat:\r\n num_code = tbs_mat.group(0)\r\n elif tbp_mat:\r\n num_code = tbs_mat.group(1)\r\n else:\r\n return code\r\n\r\n num_code = num_code.zfill(6)\r\n\r\n if num_code[0] == '6' or num_code[0] == '5':\r\n return num_code + '.XSHG'\r\n else:\r\n return num_code + '.XSHE'", "title": "" }, { "docid": "be8f9e02417dc321555d64ff40abe4c9", "score": "0.47698426", "text": "def thg_encode(self, args):\n arg_mensage = args.split(\" \")\n if arg_mensage[0] == \"\":\n print(\n \"\"\"suporte encode:\nEste módulo fornece funções para codificar dados binários em caracteres ASCII \nimprimíveis e decodificar essas codificações de volta para dados binários.\nEle fornece funções de codificação e decodificação para as codificações \nespecificadas em RFC 3548 ,que define os algoritmos Base16, Base32 e Base64,\ne para as codificações Ascii85 e Base85 padrão de fato.\na2b_uu\nb2a_uu\na2b_base64\nb2a_base64\na2b_qp\nb2a_qp\na2b_hqx\nrledecode_hqx\nrlecode_hqx\nb2a_hqx\ncrc_hqx\ncrc32\nb2a_hex\na2b_hex\nhexlify\nunhexlify\nCharcode\nbinary\nbase62\nbasen\nbcd\nur\nunicode_normalize\nqp_encoding\n encode type[2,16,32,64] str\n\n \"\"\".format(\n YELLOW=Fore.YELLOW, BLUE=Fore.BLUE, RED=Fore.RED\n )\n )\n\n elif arg_mensage[0] == \"64\":\n arg_mensage[1] = arg_mensage[1].encode(\"ascii\")\n base64_bytes = base64.b64encode(arg_mensage[1])\n by_to_st(base64_bytes)\n elif arg_mensage[0] == \"32\":\n arg_mensage[1] = arg_mensage[1].encode(\"ascii\")\n b32encode_bytes = base64.b32encode(arg_mensage[1])\n by_to_st(b32encode_bytes)\n elif arg_mensage[0] == \"16\":\n arg_mensage[1] = arg_mensage[1].encode(\"ascii\")\n b16encode_bytes = base64.b16encode(arg_mensage[1])\n by_to_st(b16encode_bytes)\n elif arg_mensage[0] == \"a85encode\":\n arg_mensage[1] = arg_mensage[1].encode(\"ascii\")\n a85encode_bytes = base64.a85encode(arg_mensage[1])\n by_to_st(a85encode_bytes)\n elif arg_mensage[0] == \"b85encode\":\n arg_mensage[1] = arg_mensage[1].encode(\"ascii\")\n b85encode_bytes = base64.b85encode(arg_mensage[1])\n by_to_st(b85encode_bytes)\n elif arg_mensage[0] == \"a2b_uu\":\n if arg_mensage[1] == \"help\":\n print(\n \"\"\"{YELLOW}a2b_uu{YELLOW}{BLUE} =>{BLUE}{RED} Converta uma única linha de dados uuencodificados de volta em binários e retorne os dados binários. As linhas normalmente contêm 45 bytes (binários), exceto a última linha. Os dados da linha podem ser seguidos de espaços em branco.\"\"\".format(\n YELLOW=Fore.YELLOW, BLUE=Fore.BLUE, RED=Fore.RED\n )\n )\n else:\n by_to_st((binascii.a2b_uu(arg_mensage[1])))\n elif arg_mensage[0] == \"a2b_base64\":\n if arg_mensage[1] == \"help\":\n print(\n \"\"\"{YELLOW}a2b_uu{YELLOW}{BLUE} =>{BLUE}{RED}Converta dados binários em uma linha de caracteres ASCII na codificação base64. O valor de retorno é a linha convertida, incluindo um caractere de nova linha. O comprimento dos dados deve ser de no máximo 57 para aderir ao padrão base64.\"\"\".format(\n YELLOW=Fore.YELLOW, BLUE=Fore.BLUE, RED=Fore.RED\n )\n )\n else:\n by_to_st(binascii.a2b_base64(arg_mensage[1]))\n elif arg_mensage[0] == \"b2a_base64\":\n if arg_mensage[1] == \"help\":\n print(\n \"\"\"{YELLOW}a2b_uu{YELLOW}{BLUE} =>{BLUE}{RED} Converta dados binários em uma linha de caracteres ASCII na codificação base64. O valor de retorno é a linha convertida, incluindo um caractere de nova linha. O comprimento dos dados deve ser de no máximo 57 para aderir ao padrão base64.\"\"\".format(\n YELLOW=Fore.YELLOW, BLUE=Fore.BLUE, RED=Fore.RED\n )\n )\n else:\n by_to_st(binascii.b2a_base64(b\"arg_mensage[1]\"))\n elif arg_mensage[0] == \"a2b_qp\":\n if arg_mensage[1] == \"help\":\n print(\n \"\"\"{YELLOW}a2b_uu{YELLOW}{BLUE} =>{BLUE}{RED}Converta um bloco de dados imprimíveis entre aspas de volta em binários e retorne os dados binários. Mais de uma linha pode ser passada por vez. Se o cabeçalho do argumento opcional estiver presente e verdadeiro, os sublinhados serão decodificados como espaços.\"\"\".format(\n YELLOW=Fore.YELLOW, BLUE=Fore.BLUE, RED=Fore.RED\n )\n )\n else:\n by_to_st(binascii.a2b_qp(arg_mensage[1]))\n elif arg_mensage[0] == \"b2a_qp\":\n if arg_mensage[1] == \"help\":\n print(\n \"\"\"{YELLOW}a2b_uu{YELLOW}{BLUE} =>{BLUE}{RED}Converta dados binários em uma (s) linha (s) de caracteres ASCII em codificação imprimível entre aspas. O valor de retorno é a (s) linha (s) convertida (s). Se o argumento opcional quotetabs estiver presente e verdadeiro, todas as tabulações e espaços serão codificados. Se o argumento opcional istext estiver presente e verdadeiro, as novas linhas não serão codificadas, mas os espaços em branco finais serão codificados. Se o cabeçalho do argumento opcional estiver presente e verdadeiro, os espaços serão codificados como sublinhados de acordo com RFC1522. Se o cabeçalho do argumento opcional estiver presente e for falso, os caracteres de nova linha também serão codificados; caso contrário, a conversão de alimentação de linha pode corromper o fluxo de dados binários.\"\"\".format(\n YELLOW=Fore.YELLOW, BLUE=Fore.BLUE, RED=Fore.RED\n )\n )\n else:\n by_to_st(binascii.a2b_qp(arg_mensage[1].encode()))\n elif arg_mensage[0] == \"a2b_hqx\":\n if arg_mensage[1] == \"help\":\n print(\n \"\"\"{YELLOW}a2b_uu{YELLOW}{BLUE} =>{BLUE}{RED}Converta dados ASCII formatados de binhex4 em binários, sem fazer a descompressão RLE. A string deve conter um número completo de bytes binários ou (no caso da última parte dos dados binhex4) ter os bits restantes zero.\n\"\"\".format(\n YELLOW=Fore.YELLOW, BLUE=Fore.BLUE, RED=Fore.RED\n )\n )\n else:\n by_to_st(binascii.a2b_hqx(arg_mensage[1]))\n elif arg_mensage[0] == \"rledecode_hqx\":\n if arg_mensage[1] == \"help\":\n print(\n \"\"\"{YELLOW}a2b_uu{YELLOW}{BLUE} =>{BLUE}{RED} Execute a descompressão RLE nos dados, de acordo com o padrão binhex4. O algoritmo usa 0x90 após um byte como um indicador de repetição, seguido por uma contagem. Uma contagem de 0 especifica um valor de byte de 0x90 . A rotina retorna os dados descompactados, a menos que os dados de entrada de dados terminem em um indicador de repetição órfão, caso em que a exceção Incompleta é levantada.\"\"\".format(\n YELLOW=Fore.YELLOW, BLUE=Fore.BLUE, RED=Fore.RED\n )\n )\n else:\n by_to_st((binascii.rledecode_hqx(arg_mensage[1].encode())))\n elif arg_mensage[0] == \"rlecode_hqx\":\n if arg_mensage[1] == \"help\":\n print(\n \"\"\"{YELLOW}a2b_uu{YELLOW}{BLUE} =>{BLUE}{RED} Execute a compactação RLE no estilo binhex4 nos dados e retorne o resultado.\"\"\".format(\n YELLOW=Fore.YELLOW, BLUE=Fore.BLUE, RED=Fore.RED\n )\n )\n else:\n by_to_st((binascii.rlecode_hqx(arg_mensage[1].encode())))\n elif arg_mensage[0] == \"b2a_hqx\":\n if arg_mensage[1] == \"help\":\n print(\n \"\"\"{YELLOW}a2b_uu{YELLOW}{BLUE} =>{BLUE}{RED} Execute a conversão hexbin4 binário para ASCII e retorne a string resultante. O argumento já deve ser codificado por RLE e ter um comprimento divisível por 3 (exceto possivelmente o último fragmento).\n\"\"\".format(\n YELLOW=Fore.YELLOW, BLUE=Fore.BLUE, RED=Fore.RED\n )\n )\n else:\n by_to_st((binascii.b2a_hqx(arg_mensage[1].encode())))\n elif arg_mensage[0] == \"crc_hqx\":\n if arg_mensage[1] == \"help\":\n print(\n \"\"\"{YELLOW}a2b_uu{YELLOW}{BLUE} =>{BLUE}{RED} Calcule o valor binhex4 crc dos dados , começando com um crc inicial e retornando o resultado.\n\"\"\".format(\n YELLOW=Fore.YELLOW, BLUE=Fore.BLUE, RED=Fore.RED\n )\n )\n else:\n by_to_st(\n (binascii.crc_hqx(arg_mensage[1].encode(), int(arg_mensage[2])))\n )\n elif arg_mensage[0] == \"crc32\":\n if arg_mensage[1] == \"help\":\n print(\n \"\"\"{YELLOW}a2b_uu{YELLOW}{BLUE} =>{BLUE}{RED} Calcule CRC-32, a soma de verificação de dados de \n 32 bits, começando com um crc inicial. Isso é consistente com a soma de verificação do arquivo ZIP. \n Uma vez que o algoritmo é projetado para uso como um algoritmo de soma de verificação, não é adequado \n para uso como um algoritmo de hash geral. \n{YELLOW}Nota{YELLOW}{RED} Para gerar o mesmo valor numérico em todas as versões e plataformas Python, {RED}{BLUE}use crc32 (dados) & 0xffffffff{BLUE}{RED}. Se você estiver usando apenas a soma de verificação no formato binário compactado, isso não é necessário, pois o valor de retorno é a representação binária correta de 32 bits, independentemente do sinal.\n \"\"\".format(\n YELLOW=Fore.YELLOW, BLUE=Fore.BLUE, RED=Fore.RED\n )\n )\n else:\n by_to_st((binascii.crc32(arg_mensage[1].encode())))\n elif arg_mensage[0] == \"hexlify\":\n if arg_mensage[1] == \"help\":\n print(\n \"\"\"{YELLOW}a2b_uu{YELLOW}{BLUE} =>{BLUE}{RED} Retorna a representação hexadecimal dos dados \n binários . Cada byte de dados é convertido na representação hexadecimal de 2 dígitos correspondente. \n A string resultante é, portanto, o dobro do comprimento dos dados . \n \"\"\".format(\n YELLOW=Fore.YELLOW, BLUE=Fore.BLUE, RED=Fore.RED\n )\n )\n else:\n by_to_st(\n (binascii.hexlify(arg_mensage[1].encode(), arg_mensage[2].encode()))\n )\n elif arg_mensage[0] == \"b2a_hex\":\n if arg_mensage[1] == \"help\":\n print(\n \"\"\"{YELLOW}a2b_uu{YELLOW}{BLUE} =>{BLUE}{RED} hex\n \"\"\".format(\n YELLOW=Fore.YELLOW, BLUE=Fore.BLUE, RED=Fore.RED\n )\n )\n else:\n by_to_st(\n (binascii.b2a_hex(arg_mensage[1].encode(), int(arg_mensage[2])))\n )\n elif arg_mensage[0] == \"unhexlify\":\n if arg_mensage[1] == \"help\":\n print(\n \"\"\"{YELLOW}a2b_uu{YELLOW}{BLUE} =>{BLUE}{RED} Retorna os dados binários representados pela string hexadecimal hexstr . Esta função é o inverso de b2a_hex () . hexstr deve conter um número par de dígitos hexadecimais (que podem ser maiúsculas ou minúsculas), caso contrário, um TypeError é gerado.\n \"\"\".format(\n YELLOW=Fore.YELLOW, BLUE=Fore.BLUE, RED=Fore.RED\n )\n )\n else:\n by_to_st((binascii.unhexlify(arg_mensage[1].encode())))\n elif arg_mensage[0] == \"b2a_uu\":\n if arg_mensage[1] == \"help\":\n print(\n \"\"\"{YELLOW}a2b_uu{YELLOW}{BLUE} =>{BLUE}{RED} Converta dados binários em uma linha de caracteres ASCII, o valor de retorno é a linha convertida, incluindo um caractere de nova linha. O comprimento dos dados deve ser de no máximo 45.\n \"\"\".format(\n YELLOW=Fore.YELLOW, BLUE=Fore.BLUE, RED=Fore.RED\n )\n )\n else:\n by_to_st(\n (binascii.b2a_uu(arg_mensage[1].encode(), int(arg_mensage[2])))\n )\n elif arg_mensage[0] == \"charcode\":\n if arg_mensage[1] == \"help\":\n print(\n \"\"\"{YELLOW}charcode{YELLOW}{BLUE} =>{BLUE}{RED}converte string em charcode\n \"\"\".format(\n YELLOW=Fore.YELLOW, BLUE=Fore.BLUE, RED=Fore.RED\n )\n )\n else:\n print(ord(arg_mensage[1].encode()))\n elif arg_mensage[0] == \"binary\":\n if arg_mensage[1] == \"help\":\n print(\n \"\"\"{YELLOW}binary{YELLOW}{BLUE} =>{BLUE}{RED}converte string em binary\n \"\"\".format(\n YELLOW=Fore.YELLOW, BLUE=Fore.BLUE, RED=Fore.RED\n )\n )\n else:\n print(\" \".join(format(ord(x), \"b\") for x in arg_mensage[1]))\n elif arg_mensage[0] == \"base62\":\n if arg_mensage[1] == \"help\":\n print(\n \"\"\"{YELLOW}base62{YELLOW}{BLUE} =>{BLUE}{RED}converte string em base62\n \"\"\".format(\n YELLOW=Fore.YELLOW, BLUE=Fore.BLUE, RED=Fore.RED\n )\n )\n else:\n print(decode62(arg_mensage[1]))\n elif arg_mensage[0] == \"basen\":\n if arg_mensage[1] == \"help\":\n print(\n \"\"\"{YELLOW}basen{YELLOW}{BLUE} =>{BLUE}{RED}converte decimal em basen\n \"\"\".format(\n YELLOW=Fore.YELLOW, BLUE=Fore.BLUE, RED=Fore.RED\n )\n )\n else:\n print(numpy.base_repr(int(arg_mensage[1]), base=int(arg_mensage[2])))\n elif arg_mensage[0] == \"url\":\n try:\n if arg_mensage[1] == \"help\":\n print(\n \"\"\"{YELLOW}url_encode{YELLOW}{BLUE} =>{BLUE}{RED}encode personalidado para url\\nencode url_encode safa[] encoding\"\"\".format(\n YELLOW=Fore.YELLOW, BLUE=Fore.BLUE, RED=Fore.RED\n )\n )\n else:\n print(\n quote(\n arg_mensage[1], safe=arg_mensage[2], encoding=arg_mensage[3]\n )\n )\n except IndexError:\n print(\n \"digite a sintaxe correta\\nncode url_encode safa[] encoding\\n ou use o comando help\"\n )\n elif arg_mensage[0] == \"unicode_normalize\":\n try:\n if arg_mensage[1] == \"help\":\n print(\n \"\"\"{YELLOW}unicode_normalize{YELLOW}{BLUE} =>{BLUE}{RED}Transforme caracteres Unicode em uma das formas de normalização['NFC', 'NFKC', 'NFD','NFKD']\\n \n{YELLOW}NFD{YELLOW}{BLUE} =>{BLUE}{RED}Normalisation Form Canonical Decomposition\n{YELLOW}NFC{YELLOW}{BLUE} =>{BLUE}{RED}Normalisation Form Canonical Composition\n{YELLOW}NFKD{YELLOW}{BLUE} =>{BLUE}{RED}Normalisation Form Compatibility Decomposition\n{YELLOW}NFKC{YELLOW}{BLUE} =>{BLUE}{RED}Normalisation Form Compatibility Composition \nencode unicode_normalize str encoding['NFC', 'NFKC', 'NFD','NFKD']\\n\"\"\".format(\n YELLOW=Fore.YELLOW, BLUE=Fore.BLUE, RED=Fore.RED\n )\n )\n else:\n print(unicodedata.normalize(arg_mensage[1], arg_mensage[2]))\n except IndexError:\n print(\n \"digite a sintaxe correta\\nncode url_encode safa[] encoding\\n ou use o comando help\"\n )\n elif arg_mensage[0] == \"qp_encoding\":\n try:\n if arg_mensage[1] == \"help\":\n print(\n \"\"\"{YELLOW}qp_encoding{YELLOW}{BLUE} =>{BLUE}{RED}\n Quoted-Printable, ou QP encoding, \n é uma codificação que usa caracteres ASCII imprimíveis (alfanuméricos e o sinal de igual '=') \n para transmitir dados de 8 bits em um caminho de dados de 7 bits ou, geralmente, em um meio que não é 8- um pouco limpo. \n É definido como uma codificação de transferência de conteúdo MIME para uso em e-mail.\n QP funciona usando o sinal de igual '=' como um caractere de escape. Ele também limita o comprimento da linha a 76, pois alguns softwares têm limites no comprimento da linha\\nencode qp_encoding TXT encode\"\"\".format(\n YELLOW=Fore.YELLOW, BLUE=Fore.BLUE, RED=Fore.RED\n )\n )\n else:\n encoded = quopri.encodestring(arg_mensage[1].encode(arg_mensage[2]))\n print(encoded.decode())\n except IndexError:\n print(\n \"digite a sintaxe correta\\nencode qp_encoding é utf-16\\n ou use o comando help\"\n )\n elif arg_mensage[0] == \"idna\":\n try:\n if arg_mensage[1] == \"help\":\n print(\n \"\"\"{YELLOW}idna{YELLOW}{BLUE} =>{BLUE}{RED}encode personalidado para url\\nencode url_encode safa[] encoding\"\"\".format(\n YELLOW=Fore.YELLOW, BLUE=Fore.BLUE, RED=Fore.RED\n )\n )\n else:\n print(idna.encode(arg_mensage[1]).decode(arg_mensage[2]))\n except IndexError:\n print(\n \"digite a sintaxe correta\\nncode idna string encoding\\n ou use o comando help\"\n )\n\n else:\n pass\n try:\n pass\n\n except IndexError:\n print(\"verificar a saida\")", "title": "" }, { "docid": "d34d4887da34f69ae6c2227db92600cc", "score": "0.47648984", "text": "def js_output(self, attrs):\n\t\tpass", "title": "" }, { "docid": "d34d4887da34f69ae6c2227db92600cc", "score": "0.47648984", "text": "def js_output(self, attrs):\n\t\tpass", "title": "" }, { "docid": "6350f548860f8bb4e2e8e65b14df0bff", "score": "0.4738176", "text": "def perl_string_to_python(s):\n s = s.replace(\"=>\",\":\").replace(\"\\\\$\",\"$\").replace(\"\\\\@\",\"@\")\n try:\n res = json.loads(s)\n except json.JSONDecodeError as e:\n beg = max(0, e.pos - 25)\n end = min(len(e.doc), e.pos + 25)\n raise ValueError('Invalid JSON: {}. --> {} <--'.format(e, e.doc[beg:end]))\n return res", "title": "" }, { "docid": "cd2b669992fdc1f3cadf5ac6b47341bd", "score": "0.47359172", "text": "def analyse_script(js, jsr, java='/usr/bin/java', feature_extractor=\"/home/acas/src/extract-features.jar\", desired_encoding = 'utf-8'):\n is_bytes = isinstance(js, bytes)\n if is_bytes:\n tmpfile, fname = save_temp_file(js)\n else:\n fname = js\n tmpfile = None\n\n # TODO FIXME add support for packed JS here... with autounpacking (just changes the feature vectors but NOT the script content in Mongo)\n try:\n is_packed, unpacked_byte_content = is_artefact_packed(fname, js if is_bytes else None)\n if is_packed:\n if tmpfile is not None:\n os.unlink(tmpfile.name)\n assert len(unpacked_byte_content) > 0\n tmpfile, fname = save_temp_file(unpacked_byte_content)\n # FALLTHRU... although it will probably fail since unpacker is VERY unreliable... and often produces syntax errors\n except Exception as e:\n # if we get an exception, we assume that its bad packed artefact and reject the whole analysis\n print(\"WARNING: unpack failed for {} - ignored.\".format(jsr))\n return (None, True, str(e)) # (no bytes, failed, stderr == str(e))\n\n # ensure we get lossless output using recommendations reported at: https://bugs.python.org/issue34618\n environ = os.environ.copy()\n environ['PYTHONIOENCODING'] = desired_encoding\n\n # save to file and run extract-features.jar to identify the javascript features\n process = subprocess.run([java, \"-jar\", feature_extractor, fname, jsr.url],\n env=environ, capture_output=True, encoding=desired_encoding, errors='strict')\n\n # turn process stdout into something we can save\n ret = process.stdout\n assert isinstance(ret, str)\n bytes_content = ret.encode(desired_encoding, errors='strict') # any errors will hopefully get caught, despite double-encoding\n assert isinstance(bytes_content, bytes)\n\n # cleanup\n if tmpfile is not None:\n os.unlink(tmpfile.name)\n\n return (bytes_content, process.returncode != 0, process.stderr) # JSON (iff successful else None), failed (boolean), stderr capture", "title": "" }, { "docid": "9829d3f36bd9e55d84e34daf14124ce6", "score": "0.47320876", "text": "def pretty_print(js):\n try:\n return json.dumps(js, indent=4, separators=(\",\", \":\"), sort_keys=True)\n except Exception as e:\n return \"%s\" % js", "title": "" }, { "docid": "ef5703aafb8042ce7b312a88d8ad7a8f", "score": "0.4720636", "text": "def jsescape(string):\n return string.replace('<script','$BEGINSCRIPT').replace('</script>','$ENDSCRIPT').replace('\\n', '$NEWLINE').replace('\\r','')", "title": "" }, { "docid": "c56f2c8adb19d80726784c6edcbb15a7", "score": "0.47205964", "text": "def js_obfuscated_text(text):\r\n return \"\"\"<noscript>(%s)</noscript>\r\n <script type=\"text/javascript\">\r\n <!--\r\n document.write(\"%s\".replace(/[a-zA-Z]/g,\r\n function(c){\r\n return String.fromCharCode(\r\n (c<=\"Z\"?90:122)>=(c=c.charCodeAt(0)+13)?c:c-26);}));\r\n -->\r\n </script>\"\"\" % (UIStr.MAIL_HIDDEN_BY_JAVASCRIPT, rot_13_encrypt(text))", "title": "" }, { "docid": "4ae8680ecc927e6942b8cf605537eba1", "score": "0.47175834", "text": "def encode(self):\n \n \n return None", "title": "" }, { "docid": "9e0bc16de900323f27cc8d8e057410b5", "score": "0.47157207", "text": "def code(self) -> str:", "title": "" }, { "docid": "52fd56ee2487a001b6122c2dd32ac677", "score": "0.47033927", "text": "def encoded(self):\n ...", "title": "" }, { "docid": "5edf5cfc50a39f9d90660cbd5d02af11", "score": "0.46959898", "text": "def mogrify(filter, jsn):\n return filter + ' ' + json.dumps(jsn)", "title": "" }, { "docid": "caade81f3fed9e668b2aa018118514e2", "score": "0.46923295", "text": "def pack(data):\n return json.dumps(data, sort_keys=True, indent=2)", "title": "" }, { "docid": "995a15a633fc2090a41d644abe14a56f", "score": "0.4691845", "text": "def code():", "title": "" }, { "docid": "4fc2e502294232055b5910d8454dff8b", "score": "0.46836093", "text": "def _boilerplate_to_python(indent):\n indent_str = \" \" * indent\n boilerplate = indent_str + \"import core.vba_library\\n\"\n boilerplate = indent_str + \"import core.vba_context\\n\"\n boilerplate += indent_str + \"from core.utils import safe_print\\n\"\n boilerplate += indent_str + \"from core.utils import plus\\n\"\n boilerplate += indent_str + \"from core.utils import eq\\n\"\n boilerplate += indent_str + \"from core.utils import neq\\n\"\n boilerplate += indent_str + \"import core.utils\\n\"\n boilerplate += indent_str + \"from core.vba_object import update_array\\n\"\n boilerplate += indent_str + \"from core.vba_object import coerce_to_num\\n\"\n boilerplate += indent_str + \"from core.vba_object import coerce_to_int\\n\"\n boilerplate += indent_str + \"from core.vba_object import coerce_to_str\\n\"\n boilerplate += indent_str + \"from core.vba_object import coerce_to_int_list\\n\\n\"\n boilerplate += indent_str + \"try:\\n\"\n boilerplate += indent_str + \" \" * 4 + \"vm_context\\n\"\n boilerplate += indent_str + \"except (NameError, UnboundLocalError):\\n\"\n boilerplate += indent_str + \" \" * 4 + \"vm_context = context\\n\"\n return boilerplate", "title": "" }, { "docid": "b68680d87b784b5bfbb1dfbc349c380e", "score": "0.46822724", "text": "def minify(javascript: str) -> str:\n url = \"https://javascript-minifier.com/raw\"\n return requests.post(url, {\"input\": javascript}).text", "title": "" }, { "docid": "254f2c0584f421371f98afbd79b2c8b5", "score": "0.46597657", "text": "def encode(self):\n ...", "title": "" }, { "docid": "43ecf4bb4df024ba608e16802e4367f7", "score": "0.4654132", "text": "def _repr_javascript_(self):\n result = super(Javascript, self)._repr_javascript_()\n if hasattr(self, 'metadata') and self.metadata:\n metadata = self.metadata\n else:\n metadata = {}\n orig_result = result\n if not isinstance(result, basestring):\n try:\n if len(result) > 1:\n metadata.update(result[1])\n result = result[0]\n except:\n pass # do nothing\n\n if metadata:\n return result, metadata\n else:\n return orig_result", "title": "" }, { "docid": "694d93dd679acf8eb07ad1cc764386b9", "score": "0.4633443", "text": "def to_json(self) -> str:", "title": "" }, { "docid": "c3e875c92f808a4be9263af62565b9d0", "score": "0.46261075", "text": "def deserialize(script):\n\n start = \"CScript([\"\n end = \"])\"\n\n ps = CScript(unhexlify(script)).__repr__()\n ps = ps[ps.index(start) + len(start): ps.index(end)].split(\", \")\n\n for i in range(len(ps)):\n if ps[i].startswith('x('):\n ps[i] = ps[i][3:-2]\n ps[i] = '<' + ps[i] + '>'\n\n return \" \".join(ps)", "title": "" }, { "docid": "0d30a77f8b7a2dd0b93861ba34797ae2", "score": "0.4625253", "text": "def generate_to_json(self, dict_: str, use_self: bool = True) -> str:\n self_ref = \"self.\" if use_self else \"\"\n assign = f\"{dict_}[\\\"{self.name}\\\"] = \"\n if self.items:\n if self.items.ref:\n assign += f\"[i.to_json() for i in {self_ref}{self.py_name}]\"\n else:\n assign += f\"list({self_ref}{self.py_name})\"\n else:\n if self.ref:\n assign += f\"{self_ref}{self.py_name}.to_json()\"\n else:\n assign += f\"{self_ref}{self.py_name}\"\n if self.optional:\n code = dedent(f\"\"\"\\\n if {self_ref}{self.py_name} is not None:\n {assign}\"\"\")\n else:\n code = assign\n return code", "title": "" }, { "docid": "54d8a4a777ad669f0527508fd20099a4", "score": "0.46206263", "text": "def json(self):\n json = ''\n json += '{'\n json += '\"accession\" : \"%s\",' % self.accession\n json += '\"date\" : \"%s\",' % self.getDate() # TODO / consider doing this on client-side\n json += '\"time\" : \"%s\",' % self.getTime() # TODO / consider doing this on client-side\n json += '\"status\" : \"%s\",' % self.getStatus()\n json += '\"codepairs\" : ['\n \n for i in range(len(self.codepairs)):\n json += self.codepairs[i].json()\n if i == len(self.codepairs) - 1:\n json += ''\n else:\n json += ','\n \n json += ']}'\n \n return json", "title": "" }, { "docid": "a539436a9b836a144ad290affd00a1d5", "score": "0.4604182", "text": "def _serialize(\n body\n) -> Dict:\n return body", "title": "" }, { "docid": "296475ed766d8ae1d913c5b66aa3f2bb", "score": "0.46024898", "text": "def create_access_code_jwe(self) -> str:\n from fidesops.util.oauth_util import generate_jwe\n\n payload = {\n # client id may not be necessary\n JWE_PAYLOAD_CLIENT_ID: self.id,\n JWE_PAYLOAD_SCOPES: self.scopes,\n JWE_ISSUED_AT: datetime.now().isoformat(),\n }\n return generate_jwe(json.dumps(payload))", "title": "" }, { "docid": "f30ed682e247df597ac9dc0781baf1b5", "score": "0.45967463", "text": "def serialize():", "title": "" }, { "docid": "2a555c2e6aebf868a07e7638f8880f16", "score": "0.45900583", "text": "def list_to_eslist(pylist):\n eslist = '[ '\n \n for item in pylist[:-1]:\n eslist += '\"' + item + '\", '\n \n eslist += '\"' + pylist[-1] + '\" ]'\n \n return eslist", "title": "" }, { "docid": "41535ceb4b9b507ef72a57618c9af6b3", "score": "0.45861098", "text": "def to_json(struc):\n\tencoder = json.JSONEncoder()\n\t# TODO format the struc in the way we want to sent to solar\n\treturn encoder.encode(struc)", "title": "" }, { "docid": "014ceaac5889a7c84cbf531ee5963951", "score": "0.4582217", "text": "def string_from_interwebs(input_value):\n \n return escape(unquote(input_value))", "title": "" }, { "docid": "014ceaac5889a7c84cbf531ee5963951", "score": "0.4582217", "text": "def string_from_interwebs(input_value):\n \n return escape(unquote(input_value))", "title": "" }, { "docid": "39d7c194e30acb49120ea1b39d9d1341", "score": "0.45488468", "text": "def convert_js2py(m):\n\n full_widget = widgets.VBox(layout=widgets.Layout(width=\"465px\", height=\"350px\"))\n\n text_widget = widgets.Textarea(\n placeholder=\"Paste your Earth Engine JavaScript into this textbox and click the Convert button below to convert the Javascript to Python\",\n layout=widgets.Layout(width=\"455px\", height=\"310px\"),\n )\n\n buttons = widgets.ToggleButtons(\n value=None,\n options=[\"Convert\", \"Clear\", \"Close\"],\n tooltips=[\"Convert\", \"Clear\", \"Close\"],\n button_style=\"primary\",\n )\n buttons.style.button_width = \"142px\"\n\n def button_clicked(change):\n if change[\"new\"] == \"Convert\":\n from .conversion import js_snippet_to_py, create_new_cell\n\n if len(text_widget.value) > 0:\n out_lines = js_snippet_to_py(\n text_widget.value,\n add_new_cell=False,\n import_ee=False,\n import_geemap=False,\n show_map=False,\n )\n if len(out_lines) > 0 and len(out_lines[0].strip()) == 0:\n out_lines = out_lines[1:]\n text_widget.value = \"\".join(out_lines)\n create_code_cell(text_widget.value)\n\n elif change[\"new\"] == \"Clear\":\n text_widget.value = \"\"\n elif change[\"new\"] == \"Close\":\n m.toolbar_reset()\n if m.convert_ctrl is not None and m.convert_ctrl in m.controls:\n m.remove_control(m.convert_ctrl)\n full_widget.close()\n buttons.value = None\n\n buttons.observe(button_clicked, \"value\")\n\n full_widget.children = [text_widget, buttons]\n widget_control = WidgetControl(widget=full_widget, position=\"topright\")\n m.add_control(widget_control)\n m.convert_ctrl = widget_control", "title": "" }, { "docid": "0d86e76fafa112c5416ff86992a6f6ee", "score": "0.4547814", "text": "def payload():\n return {\"iss\": \"jeff\", \"exp\": utc_timestamp() + 15, \"claim\": \"insanity\"}", "title": "" }, { "docid": "5e2d86130e0015cefd68604c38af6d96", "score": "0.45460388", "text": "def source(variables, parts, functions, fragment, gles):\n\n rv = [ ]\n\n if gles:\n rv.append(\"\"\"\n#version 100 es\n\"\"\")\n\n if fragment:\n rv.append(\"\"\"\nprecision mediump float;\n\"\"\")\n\n else:\n rv.append(\"\"\"\n#version 120\n\"\"\")\n\n rv.extend(functions)\n\n for storage, type_, name in sorted(variables):\n rv.append(\"{} {} {};\\n\".format(storage, type_, name))\n\n rv.append(\"\\nvoid main() {\\n\")\n\n parts.sort()\n\n for _, part in parts:\n rv.append(part)\n\n rv.append(\"}\\n\")\n\n return \"\".join(rv)", "title": "" }, { "docid": "600e0e4af94009454a97684da0ee4965", "score": "0.45280161", "text": "def customization_data(client=None):\n\n # This import data contains:\n # Action fields:\n # netwitness_end_time\n # netwitness_query\n # netwitness_start_time\n # Function inputs:\n # incident_id\n # nw_data_format\n # nw_end_time\n # nw_event_session_ids\n # nw_meta_id1\n # nw_meta_id2\n # nw_query\n # nw_results_size\n # nw_session_id1\n # nw_session_id2\n # nw_start_time\n # Message Destinations:\n # rsa_netwitness_message_destination\n # Functions:\n # netwitness_get_meta_id_ranges\n # netwitness_get_meta_values\n # netwitness_query\n # netwitness_retrieve_log_data\n # netwitness_retrieve_pcap_data\n # Workflows:\n # example_netwitness_get_meta_values\n # example_netwitness_retrieve_log_file\n # example_netwitness_retrieve_pcap_file\n # example_netwitness_retrieve_pcap_file_time\n # Rules:\n # (Example) NetWitness Get Meta Values\n # (Example) NetWitness Retrieve Log File\n # (Example) NetWitness Retrieve PCAP File\n # (Example) NetWitness Retrieve PCAP File (Time)\n\n\n yield ImportDefinition(u\"\"\"\neyJ0YXNrX29yZGVyIjogW10sICJ3b3JrZmxvd3MiOiBbeyJ1dWlkIjogImE3Y2FhYmE3LTVmNWIt\nNDlmNy04NTM0LWRlZmM3ZmNlOWZhNiIsICJkZXNjcmlwdGlvbiI6ICJBbiBleGFtcGxlIHRoYXQg\ndXNlcyBOZXRXaXRuZXNzIFJldHJpZXZlIExvZyBEYXRhIGZ1bmN0aW9uIHRvIHJldHVybiBsb2cg\nZGF0YSBkdXJpbmcgYSBzcGVjaWZpYyB0aW1lIGZyYW1lIGFuZCBhdHRhY2hlcyBpdCB0byB0aGUg\naW5jaWRlbnQuIiwgIm9iamVjdF90eXBlIjogImluY2lkZW50IiwgImV4cG9ydF9rZXkiOiAiZXhh\nbXBsZV9uZXR3aXRuZXNzX3JldHJpZXZlX2xvZ19maWxlIiwgIndvcmtmbG93X2lkIjogMTM5LCAi\nbGFzdF9tb2RpZmllZF9ieSI6ICJhZG1pbkBjbzNzeXMuY29tIiwgImNvbnRlbnQiOiB7InhtbCI6\nICI8P3htbCB2ZXJzaW9uPVwiMS4wXCIgZW5jb2Rpbmc9XCJVVEYtOFwiPz48ZGVmaW5pdGlvbnMg\neG1sbnM9XCJodHRwOi8vd3d3Lm9tZy5vcmcvc3BlYy9CUE1OLzIwMTAwNTI0L01PREVMXCIgeG1s\nbnM6YnBtbmRpPVwiaHR0cDovL3d3dy5vbWcub3JnL3NwZWMvQlBNTi8yMDEwMDUyNC9ESVwiIHht\nbG5zOm9tZ2RjPVwiaHR0cDovL3d3dy5vbWcub3JnL3NwZWMvREQvMjAxMDA1MjQvRENcIiB4bWxu\nczpvbWdkaT1cImh0dHA6Ly93d3cub21nLm9yZy9zcGVjL0RELzIwMTAwNTI0L0RJXCIgeG1sbnM6\ncmVzaWxpZW50PVwiaHR0cDovL3Jlc2lsaWVudC5pYm0uY29tL2JwbW5cIiB4bWxuczp4c2Q9XCJo\ndHRwOi8vd3d3LnczLm9yZy8yMDAxL1hNTFNjaGVtYVwiIHhtbG5zOnhzaT1cImh0dHA6Ly93d3cu\ndzMub3JnLzIwMDEvWE1MU2NoZW1hLWluc3RhbmNlXCIgdGFyZ2V0TmFtZXNwYWNlPVwiaHR0cDov\nL3d3dy5jYW11bmRhLm9yZy90ZXN0XCI+PHByb2Nlc3MgaWQ9XCJleGFtcGxlX25ldHdpdG5lc3Nf\ncmV0cmlldmVfbG9nX2ZpbGVcIiBpc0V4ZWN1dGFibGU9XCJ0cnVlXCIgbmFtZT1cIihFeGFtcGxl\nKSBOZXRXaXRuZXNzIFJldHJpZXZlIExvZyBGaWxlXCI+PGRvY3VtZW50YXRpb24+QW4gZXhhbXBs\nZSB0aGF0IHVzZXMgTmV0V2l0bmVzcyBSZXRyaWV2ZSBMb2cgRGF0YSBmdW5jdGlvbiB0byByZXR1\ncm4gbG9nIGRhdGEgZHVyaW5nIGEgc3BlY2lmaWMgdGltZSBmcmFtZSBhbmQgYXR0YWNoZXMgaXQg\ndG8gdGhlIGluY2lkZW50LjwvZG9jdW1lbnRhdGlvbj48c3RhcnRFdmVudCBpZD1cIlN0YXJ0RXZl\nbnRfMTU1YXN4bVwiPjxvdXRnb2luZz5TZXF1ZW5jZUZsb3dfMDYzdHFzOTwvb3V0Z29pbmc+PC9z\ndGFydEV2ZW50PjxzZXJ2aWNlVGFzayBpZD1cIlNlcnZpY2VUYXNrXzFlMGNldXJcIiBuYW1lPVwi\nTmV0V2l0bmVzcyBSZXRyaWV2ZSBMb2cgRGF0YVwiIHJlc2lsaWVudDp0eXBlPVwiZnVuY3Rpb25c\nIj48ZXh0ZW5zaW9uRWxlbWVudHM+PHJlc2lsaWVudDpmdW5jdGlvbiB1dWlkPVwiNDEwYzAwZTEt\nZDBlZS00ZjEzLTllZjYtNTNjOWU5NWRkMTE0XCI+e1wiaW5wdXRzXCI6e1wiZTExMjM3MTEtOTEw\nMy00OGQxLTg0ZDctMzY3MzNmZjc5ZGVkXCI6e1wiaW5wdXRfdHlwZVwiOlwic3RhdGljXCIsXCJz\ndGF0aWNfaW5wdXRcIjp7XCJtdWx0aXNlbGVjdF92YWx1ZVwiOltdLFwic2VsZWN0X3ZhbHVlXCI6\nXCI2NTk1ZDUxYy0xMjhlLTQ1OTctODNjOC1iY2RlN2UxN2VkOGZcIn19fSxcInByZV9wcm9jZXNz\naW5nX3NjcmlwdFwiOlwiaW5wdXRzLm53X3N0YXJ0X3RpbWUgPSBydWxlLnByb3BlcnRpZXMubmV0\nd2l0bmVzc19zdGFydF90aW1lXFxuaW5wdXRzLm53X2VuZF90aW1lID0gcnVsZS5wcm9wZXJ0aWVz\nLm5ldHdpdG5lc3NfZW5kX3RpbWVcIixcInJlc3VsdF9uYW1lXCI6XCJud19sb2dfZmlsZVwifTwv\ncmVzaWxpZW50OmZ1bmN0aW9uPjwvZXh0ZW5zaW9uRWxlbWVudHM+PGluY29taW5nPlNlcXVlbmNl\nRmxvd18wNjN0cXM5PC9pbmNvbWluZz48b3V0Z29pbmc+U2VxdWVuY2VGbG93XzB6ZXhwMjE8L291\ndGdvaW5nPjwvc2VydmljZVRhc2s+PHNlcXVlbmNlRmxvdyBpZD1cIlNlcXVlbmNlRmxvd18wNjN0\ncXM5XCIgc291cmNlUmVmPVwiU3RhcnRFdmVudF8xNTVhc3htXCIgdGFyZ2V0UmVmPVwiU2Vydmlj\nZVRhc2tfMWUwY2V1clwiLz48c2VydmljZVRhc2sgaWQ9XCJTZXJ2aWNlVGFza18wYzQ2cXNjXCIg\nbmFtZT1cIlV0aWxpdGllczogU3RyaW5nIHRvIEF0dGFjaG1lbnRcIiByZXNpbGllbnQ6dHlwZT1c\nImZ1bmN0aW9uXCI+PGV4dGVuc2lvbkVsZW1lbnRzPjxyZXNpbGllbnQ6ZnVuY3Rpb24gdXVpZD1c\nImNkOGEyM2NlLTYzYmEtNDJjZi04ZWJhLThiNjNkNWU3Yzg3MlwiPntcImlucHV0c1wiOnt9LFwi\ncHJlX3Byb2Nlc3Npbmdfc2NyaXB0XCI6XCJpbnB1dHMuaW5jaWRlbnRfaWQgPSBpbmNpZGVudC5p\nZFxcbmlucHV0cy5hdHRhY2htZW50X25hbWUgPSBcXFwiTG9nIGZpbGUgZm9yIHt9IC0ge31cXFwi\nLmZvcm1hdCh3b3JrZmxvdy5wcm9wZXJ0aWVzLm53X2xvZ19maWxlLmlucHV0cy5ud19zdGFydF90\naW1lLCB3b3JrZmxvdy5wcm9wZXJ0aWVzLm53X2xvZ19maWxlLmlucHV0cy5ud19lbmRfdGltZSlc\nXG5pbnB1dHMuc3RyaW5nX3RvX2NvbnZlcnRfdG9fYXR0YWNobWVudCA9IHN0cih3b3JrZmxvdy5w\ncm9wZXJ0aWVzLm53X2xvZ19maWxlLmNvbnRlbnQpXCJ9PC9yZXNpbGllbnQ6ZnVuY3Rpb24+PC9l\neHRlbnNpb25FbGVtZW50cz48aW5jb21pbmc+U2VxdWVuY2VGbG93XzE4dTZucWI8L2luY29taW5n\nPjxvdXRnb2luZz5TZXF1ZW5jZUZsb3dfMTFoc2VuZjwvb3V0Z29pbmc+PC9zZXJ2aWNlVGFzaz48\nZW5kRXZlbnQgaWQ9XCJFbmRFdmVudF8wbjVzNXlxXCI+PGluY29taW5nPlNlcXVlbmNlRmxvd18w\nM2xwYmhsPC9pbmNvbWluZz48L2VuZEV2ZW50PjxleGNsdXNpdmVHYXRld2F5IGlkPVwiRXhjbHVz\naXZlR2F0ZXdheV8xOGIzNWtsXCI+PGluY29taW5nPlNlcXVlbmNlRmxvd18wemV4cDIxPC9pbmNv\nbWluZz48b3V0Z29pbmc+U2VxdWVuY2VGbG93XzE4dTZucWI8L291dGdvaW5nPjxvdXRnb2luZz5T\nZXF1ZW5jZUZsb3dfMHhoc2I1Zzwvb3V0Z29pbmc+PC9leGNsdXNpdmVHYXRld2F5PjxzZXF1ZW5j\nZUZsb3cgaWQ9XCJTZXF1ZW5jZUZsb3dfMHpleHAyMVwiIHNvdXJjZVJlZj1cIlNlcnZpY2VUYXNr\nXzFlMGNldXJcIiB0YXJnZXRSZWY9XCJFeGNsdXNpdmVHYXRld2F5XzE4YjM1a2xcIi8+PHNlcXVl\nbmNlRmxvdyBpZD1cIlNlcXVlbmNlRmxvd18xOHU2bnFiXCIgbmFtZT1cIkxvZyBmaWxlIGlzIHBy\nZXNlbnRcIiBzb3VyY2VSZWY9XCJFeGNsdXNpdmVHYXRld2F5XzE4YjM1a2xcIiB0YXJnZXRSZWY9\nXCJTZXJ2aWNlVGFza18wYzQ2cXNjXCI+PGNvbmRpdGlvbkV4cHJlc3Npb24gbGFuZ3VhZ2U9XCJy\nZXNpbGllbnQtY29uZGl0aW9uc1wiIHhzaTp0eXBlPVwidEZvcm1hbEV4cHJlc3Npb25cIj48IVtD\nREFUQVt7XCJjb25kaXRpb25zXCI6W3tcImV2YWx1YXRpb25faWRcIjoxLFwiZmllbGRfbmFtZVwi\nOm51bGwsXCJtZXRob2RcIjpcInNjcmlwdFwiLFwidHlwZVwiOm51bGwsXCJ2YWx1ZVwiOntcInNj\ncmlwdF90ZXh0XCI6XCIjRW50ZXIgc3VwcGxlbWVudGFsIHNjcmlwdFxcbiNWYXJpYWJsZXMgaW5z\ndGFudGlhdGVkIGluIHRoaXMgZWRpdG9yIGFyZSBhdmFpbGFibGVcXG4jZm9yIHVzZSBpbiB0aGUg\nZXhwcmVzc2lvbiBhYm92ZVwiLFwiZmluYWxfZXhwcmVzc2lvbl90ZXh0XCI6XCJ3b3JrZmxvdy5w\ncm9wZXJ0aWVzLm53X2xvZ19maWxlLmNvbnRlbnQgaXMgbm90IFxcXCJcXFwiXCIsXCJsYW5ndWFn\nZVwiOlwicHl0aG9uXCJ9fV0sXCJjdXN0b21fY29uZGl0aW9uXCI6XCJcIixcImxvZ2ljX3R5cGVc\nIjpcImFsbFwifV1dPjwvY29uZGl0aW9uRXhwcmVzc2lvbj48L3NlcXVlbmNlRmxvdz48ZXhjbHVz\naXZlR2F0ZXdheSBpZD1cIkV4Y2x1c2l2ZUdhdGV3YXlfMTBsa3FwaVwiPjxpbmNvbWluZz5TZXF1\nZW5jZUZsb3dfMHhoc2I1ZzwvaW5jb21pbmc+PGluY29taW5nPlNlcXVlbmNlRmxvd18xMWhzZW5m\nPC9pbmNvbWluZz48b3V0Z29pbmc+U2VxdWVuY2VGbG93XzAzbHBiaGw8L291dGdvaW5nPjwvZXhj\nbHVzaXZlR2F0ZXdheT48c2VxdWVuY2VGbG93IGlkPVwiU2VxdWVuY2VGbG93XzB4aHNiNWdcIiBz\nb3VyY2VSZWY9XCJFeGNsdXNpdmVHYXRld2F5XzE4YjM1a2xcIiB0YXJnZXRSZWY9XCJFeGNsdXNp\ndmVHYXRld2F5XzEwbGtxcGlcIi8+PHNlcXVlbmNlRmxvdyBpZD1cIlNlcXVlbmNlRmxvd18xMWhz\nZW5mXCIgc291cmNlUmVmPVwiU2VydmljZVRhc2tfMGM0NnFzY1wiIHRhcmdldFJlZj1cIkV4Y2x1\nc2l2ZUdhdGV3YXlfMTBsa3FwaVwiLz48c2VxdWVuY2VGbG93IGlkPVwiU2VxdWVuY2VGbG93XzAz\nbHBiaGxcIiBzb3VyY2VSZWY9XCJFeGNsdXNpdmVHYXRld2F5XzEwbGtxcGlcIiB0YXJnZXRSZWY9\nXCJFbmRFdmVudF8wbjVzNXlxXCIvPjx0ZXh0QW5ub3RhdGlvbiBpZD1cIlRleHRBbm5vdGF0aW9u\nXzFreHhpeXRcIj48dGV4dD5TdGFydCB5b3VyIHdvcmtmbG93IGhlcmU8L3RleHQ+PC90ZXh0QW5u\nb3RhdGlvbj48YXNzb2NpYXRpb24gaWQ9XCJBc3NvY2lhdGlvbl8xc2V1ajQ4XCIgc291cmNlUmVm\nPVwiU3RhcnRFdmVudF8xNTVhc3htXCIgdGFyZ2V0UmVmPVwiVGV4dEFubm90YXRpb25fMWt4eGl5\ndFwiLz48L3Byb2Nlc3M+PGJwbW5kaTpCUE1ORGlhZ3JhbSBpZD1cIkJQTU5EaWFncmFtXzFcIj48\nYnBtbmRpOkJQTU5QbGFuZSBicG1uRWxlbWVudD1cInVuZGVmaW5lZFwiIGlkPVwiQlBNTlBsYW5l\nXzFcIj48YnBtbmRpOkJQTU5TaGFwZSBicG1uRWxlbWVudD1cIlN0YXJ0RXZlbnRfMTU1YXN4bVwi\nIGlkPVwiU3RhcnRFdmVudF8xNTVhc3htX2RpXCI+PG9tZ2RjOkJvdW5kcyBoZWlnaHQ9XCIzNlwi\nIHdpZHRoPVwiMzZcIiB4PVwiMTYyXCIgeT1cIjE4OFwiLz48YnBtbmRpOkJQTU5MYWJlbD48b21n\nZGM6Qm91bmRzIGhlaWdodD1cIjBcIiB3aWR0aD1cIjkwXCIgeD1cIjE1N1wiIHk9XCIyMjNcIi8+\nPC9icG1uZGk6QlBNTkxhYmVsPjwvYnBtbmRpOkJQTU5TaGFwZT48YnBtbmRpOkJQTU5TaGFwZSBi\ncG1uRWxlbWVudD1cIlRleHRBbm5vdGF0aW9uXzFreHhpeXRcIiBpZD1cIlRleHRBbm5vdGF0aW9u\nXzFreHhpeXRfZGlcIj48b21nZGM6Qm91bmRzIGhlaWdodD1cIjMwXCIgd2lkdGg9XCIxMDBcIiB4\nPVwiOTlcIiB5PVwiMjU0XCIvPjwvYnBtbmRpOkJQTU5TaGFwZT48YnBtbmRpOkJQTU5FZGdlIGJw\nbW5FbGVtZW50PVwiQXNzb2NpYXRpb25fMXNldWo0OFwiIGlkPVwiQXNzb2NpYXRpb25fMXNldWo0\nOF9kaVwiPjxvbWdkaTp3YXlwb2ludCB4PVwiMTY5XCIgeHNpOnR5cGU9XCJvbWdkYzpQb2ludFwi\nIHk9XCIyMjBcIi8+PG9tZ2RpOndheXBvaW50IHg9XCIxNTNcIiB4c2k6dHlwZT1cIm9tZ2RjOlBv\naW50XCIgeT1cIjI1NFwiLz48L2JwbW5kaTpCUE1ORWRnZT48YnBtbmRpOkJQTU5TaGFwZSBicG1u\nRWxlbWVudD1cIlNlcnZpY2VUYXNrXzFlMGNldXJcIiBpZD1cIlNlcnZpY2VUYXNrXzFlMGNldXJf\nZGlcIj48b21nZGM6Qm91bmRzIGhlaWdodD1cIjgwXCIgd2lkdGg9XCIxMDBcIiB4PVwiMjk2XCIg\neT1cIjE2NlwiLz48L2JwbW5kaTpCUE1OU2hhcGU+PGJwbW5kaTpCUE1ORWRnZSBicG1uRWxlbWVu\ndD1cIlNlcXVlbmNlRmxvd18wNjN0cXM5XCIgaWQ9XCJTZXF1ZW5jZUZsb3dfMDYzdHFzOV9kaVwi\nPjxvbWdkaTp3YXlwb2ludCB4PVwiMTk4XCIgeHNpOnR5cGU9XCJvbWdkYzpQb2ludFwiIHk9XCIy\nMDZcIi8+PG9tZ2RpOndheXBvaW50IHg9XCIyOTZcIiB4c2k6dHlwZT1cIm9tZ2RjOlBvaW50XCIg\neT1cIjIwNlwiLz48YnBtbmRpOkJQTU5MYWJlbD48b21nZGM6Qm91bmRzIGhlaWdodD1cIjEzXCIg\nd2lkdGg9XCIwXCIgeD1cIjI0N1wiIHk9XCIxODRcIi8+PC9icG1uZGk6QlBNTkxhYmVsPjwvYnBt\nbmRpOkJQTU5FZGdlPjxicG1uZGk6QlBNTlNoYXBlIGJwbW5FbGVtZW50PVwiU2VydmljZVRhc2tf\nMGM0NnFzY1wiIGlkPVwiU2VydmljZVRhc2tfMGM0NnFzY19kaVwiPjxvbWdkYzpCb3VuZHMgaGVp\nZ2h0PVwiODBcIiB3aWR0aD1cIjEwMFwiIHg9XCI1NTIuOTEwODgwODI5MDE1NlwiIHk9XCIxMDVc\nIi8+PC9icG1uZGk6QlBNTlNoYXBlPjxicG1uZGk6QlBNTlNoYXBlIGJwbW5FbGVtZW50PVwiRW5k\nRXZlbnRfMG41czV5cVwiIGlkPVwiRW5kRXZlbnRfMG41czV5cV9kaVwiPjxvbWdkYzpCb3VuZHMg\naGVpZ2h0PVwiMzZcIiB3aWR0aD1cIjM2XCIgeD1cIjgyOC45MTA4ODA4MjkwMTU2XCIgeT1cIjE4\nOFwiLz48YnBtbmRpOkJQTU5MYWJlbD48b21nZGM6Qm91bmRzIGhlaWdodD1cIjEzXCIgd2lkdGg9\nXCIwXCIgeD1cIjg0Ni45MTA4ODA4MjkwMTU2XCIgeT1cIjIyN1wiLz48L2JwbW5kaTpCUE1OTGFi\nZWw+PC9icG1uZGk6QlBNTlNoYXBlPjxicG1uZGk6QlBNTlNoYXBlIGJwbW5FbGVtZW50PVwiRXhj\nbHVzaXZlR2F0ZXdheV8xOGIzNWtsXCIgaWQ9XCJFeGNsdXNpdmVHYXRld2F5XzE4YjM1a2xfZGlc\nIiBpc01hcmtlclZpc2libGU9XCJ0cnVlXCI+PG9tZ2RjOkJvdW5kcyBoZWlnaHQ9XCI1MFwiIHdp\nZHRoPVwiNTBcIiB4PVwiNDM3Ljc5MTEwMjUxNDUwNjc1XCIgeT1cIjE4MVwiLz48YnBtbmRpOkJQ\nTU5MYWJlbD48b21nZGM6Qm91bmRzIGhlaWdodD1cIjEzXCIgd2lkdGg9XCIwXCIgeD1cIjQ2Mi43\nOTExMDI1MTQ1MDY3NVwiIHk9XCIyMzRcIi8+PC9icG1uZGk6QlBNTkxhYmVsPjwvYnBtbmRpOkJQ\nTU5TaGFwZT48YnBtbmRpOkJQTU5FZGdlIGJwbW5FbGVtZW50PVwiU2VxdWVuY2VGbG93XzB6ZXhw\nMjFcIiBpZD1cIlNlcXVlbmNlRmxvd18wemV4cDIxX2RpXCI+PG9tZ2RpOndheXBvaW50IHg9XCIz\nOTZcIiB4c2k6dHlwZT1cIm9tZ2RjOlBvaW50XCIgeT1cIjIwNlwiLz48b21nZGk6d2F5cG9pbnQg\neD1cIjQzOFwiIHhzaTp0eXBlPVwib21nZGM6UG9pbnRcIiB5PVwiMjA2XCIvPjxicG1uZGk6QlBN\nTkxhYmVsPjxvbWdkYzpCb3VuZHMgaGVpZ2h0PVwiMTNcIiB3aWR0aD1cIjBcIiB4PVwiNDE3XCIg\neT1cIjE4NFwiLz48L2JwbW5kaTpCUE1OTGFiZWw+PC9icG1uZGk6QlBNTkVkZ2U+PGJwbW5kaTpC\nUE1ORWRnZSBicG1uRWxlbWVudD1cIlNlcXVlbmNlRmxvd18xOHU2bnFiXCIgaWQ9XCJTZXF1ZW5j\nZUZsb3dfMTh1Nm5xYl9kaVwiPjxvbWdkaTp3YXlwb2ludCB4PVwiNDYzXCIgeHNpOnR5cGU9XCJv\nbWdkYzpQb2ludFwiIHk9XCIxODFcIi8+PG9tZ2RpOndheXBvaW50IHg9XCI0NjNcIiB4c2k6dHlw\nZT1cIm9tZ2RjOlBvaW50XCIgeT1cIjE0NVwiLz48b21nZGk6d2F5cG9pbnQgeD1cIjU1M1wiIHhz\naTp0eXBlPVwib21nZGM6UG9pbnRcIiB5PVwiMTQ1XCIvPjxicG1uZGk6QlBNTkxhYmVsPjxvbWdk\nYzpCb3VuZHMgaGVpZ2h0PVwiMTNcIiB3aWR0aD1cIjg3XCIgeD1cIjQxOVwiIHk9XCIxMTZcIi8+\nPC9icG1uZGk6QlBNTkxhYmVsPjwvYnBtbmRpOkJQTU5FZGdlPjxicG1uZGk6QlBNTlNoYXBlIGJw\nbW5FbGVtZW50PVwiRXhjbHVzaXZlR2F0ZXdheV8xMGxrcXBpXCIgaWQ9XCJFeGNsdXNpdmVHYXRl\nd2F5XzEwbGtxcGlfZGlcIiBpc01hcmtlclZpc2libGU9XCJ0cnVlXCI+PG9tZ2RjOkJvdW5kcyBo\nZWlnaHQ9XCI1MFwiIHdpZHRoPVwiNTBcIiB4PVwiNzEwLjc5MTEwMjUxNDUwNjdcIiB5PVwiMTgx\nXCIvPjxicG1uZGk6QlBNTkxhYmVsPjxvbWdkYzpCb3VuZHMgaGVpZ2h0PVwiMTNcIiB3aWR0aD1c\nIjBcIiB4PVwiNzM1Ljc5MTEwMjUxNDUwNjdcIiB5PVwiMjM0XCIvPjwvYnBtbmRpOkJQTU5MYWJl\nbD48L2JwbW5kaTpCUE1OU2hhcGU+PGJwbW5kaTpCUE1ORWRnZSBicG1uRWxlbWVudD1cIlNlcXVl\nbmNlRmxvd18weGhzYjVnXCIgaWQ9XCJTZXF1ZW5jZUZsb3dfMHhoc2I1Z19kaVwiPjxvbWdkaTp3\nYXlwb2ludCB4PVwiNDg4XCIgeHNpOnR5cGU9XCJvbWdkYzpQb2ludFwiIHk9XCIyMDZcIi8+PG9t\nZ2RpOndheXBvaW50IHg9XCI3MTFcIiB4c2k6dHlwZT1cIm9tZ2RjOlBvaW50XCIgeT1cIjIwNlwi\nLz48YnBtbmRpOkJQTU5MYWJlbD48b21nZGM6Qm91bmRzIGhlaWdodD1cIjEzXCIgd2lkdGg9XCIw\nXCIgeD1cIjU5OS41XCIgeT1cIjE4NFwiLz48L2JwbW5kaTpCUE1OTGFiZWw+PC9icG1uZGk6QlBN\nTkVkZ2U+PGJwbW5kaTpCUE1ORWRnZSBicG1uRWxlbWVudD1cIlNlcXVlbmNlRmxvd18xMWhzZW5m\nXCIgaWQ9XCJTZXF1ZW5jZUZsb3dfMTFoc2VuZl9kaVwiPjxvbWdkaTp3YXlwb2ludCB4PVwiNjUz\nXCIgeHNpOnR5cGU9XCJvbWdkYzpQb2ludFwiIHk9XCIxNDVcIi8+PG9tZ2RpOndheXBvaW50IHg9\nXCI3MzZcIiB4c2k6dHlwZT1cIm9tZ2RjOlBvaW50XCIgeT1cIjE0NVwiLz48b21nZGk6d2F5cG9p\nbnQgeD1cIjczNlwiIHhzaTp0eXBlPVwib21nZGM6UG9pbnRcIiB5PVwiMTgxXCIvPjxicG1uZGk6\nQlBNTkxhYmVsPjxvbWdkYzpCb3VuZHMgaGVpZ2h0PVwiMTNcIiB3aWR0aD1cIjBcIiB4PVwiNjk0\nLjVcIiB5PVwiMTIzLjVcIi8+PC9icG1uZGk6QlBNTkxhYmVsPjwvYnBtbmRpOkJQTU5FZGdlPjxi\ncG1uZGk6QlBNTkVkZ2UgYnBtbkVsZW1lbnQ9XCJTZXF1ZW5jZUZsb3dfMDNscGJobFwiIGlkPVwi\nU2VxdWVuY2VGbG93XzAzbHBiaGxfZGlcIj48b21nZGk6d2F5cG9pbnQgeD1cIjc2MVwiIHhzaTp0\neXBlPVwib21nZGM6UG9pbnRcIiB5PVwiMjA2XCIvPjxvbWdkaTp3YXlwb2ludCB4PVwiODI5XCIg\neHNpOnR5cGU9XCJvbWdkYzpQb2ludFwiIHk9XCIyMDZcIi8+PGJwbW5kaTpCUE1OTGFiZWw+PG9t\nZ2RjOkJvdW5kcyBoZWlnaHQ9XCIxM1wiIHdpZHRoPVwiMFwiIHg9XCI3OTVcIiB5PVwiMTg0XCIv\nPjwvYnBtbmRpOkJQTU5MYWJlbD48L2JwbW5kaTpCUE1ORWRnZT48L2JwbW5kaTpCUE1OUGxhbmU+\nPC9icG1uZGk6QlBNTkRpYWdyYW0+PC9kZWZpbml0aW9ucz4iLCAid29ya2Zsb3dfaWQiOiAiZXhh\nbXBsZV9uZXR3aXRuZXNzX3JldHJpZXZlX2xvZ19maWxlIiwgInZlcnNpb24iOiAxMn0sICJsYXN0\nX21vZGlmaWVkX3RpbWUiOiAxNTU0MTQ2NjEzNzYxLCAiY3JlYXRvcl9pZCI6ICJhZG1pbkBjbzNz\neXMuY29tIiwgImFjdGlvbnMiOiBbXSwgInByb2dyYW1tYXRpY19uYW1lIjogImV4YW1wbGVfbmV0\nd2l0bmVzc19yZXRyaWV2ZV9sb2dfZmlsZSIsICJuYW1lIjogIihFeGFtcGxlKSBOZXRXaXRuZXNz\nIFJldHJpZXZlIExvZyBGaWxlIn0sIHsidXVpZCI6ICJhM2ZjZDA5MS01NTU5LTQ0ZDktYWRiYS04\nNjllMmMxY2RiMDQiLCAiZGVzY3JpcHRpb24iOiAiQW4gZXhhbXBsZSB0aGF0IHJldHVybnMgYSBQ\nQ0FQIGZpbGUgb2YgcGFja2V0IGRhdGEgd2l0aGluIHRoZSBnaXZlbiBzZXNzaW9uIElEIHJhbmdl\nIGFuZCBhdHRhY2hlcyBpdCB0byB0aGUgaW5jaWRlbnQuIiwgIm9iamVjdF90eXBlIjogImluY2lk\nZW50IiwgImV4cG9ydF9rZXkiOiAiZXhhbXBsZV9uZXR3aXRuZXNzX3JldHJpZXZlX3BjYXBfZmls\nZSIsICJ3b3JrZmxvd19pZCI6IDEzNSwgImxhc3RfbW9kaWZpZWRfYnkiOiAiYWRtaW5AY28zc3lz\nLmNvbSIsICJjb250ZW50IjogeyJ4bWwiOiAiPD94bWwgdmVyc2lvbj1cIjEuMFwiIGVuY29kaW5n\nPVwiVVRGLThcIj8+PGRlZmluaXRpb25zIHhtbG5zPVwiaHR0cDovL3d3dy5vbWcub3JnL3NwZWMv\nQlBNTi8yMDEwMDUyNC9NT0RFTFwiIHhtbG5zOmJwbW5kaT1cImh0dHA6Ly93d3cub21nLm9yZy9z\ncGVjL0JQTU4vMjAxMDA1MjQvRElcIiB4bWxuczpvbWdkYz1cImh0dHA6Ly93d3cub21nLm9yZy9z\ncGVjL0RELzIwMTAwNTI0L0RDXCIgeG1sbnM6b21nZGk9XCJodHRwOi8vd3d3Lm9tZy5vcmcvc3Bl\nYy9ERC8yMDEwMDUyNC9ESVwiIHhtbG5zOnJlc2lsaWVudD1cImh0dHA6Ly9yZXNpbGllbnQuaWJt\nLmNvbS9icG1uXCIgeG1sbnM6eHNkPVwiaHR0cDovL3d3dy53My5vcmcvMjAwMS9YTUxTY2hlbWFc\nIiB4bWxuczp4c2k9XCJodHRwOi8vd3d3LnczLm9yZy8yMDAxL1hNTFNjaGVtYS1pbnN0YW5jZVwi\nIHRhcmdldE5hbWVzcGFjZT1cImh0dHA6Ly93d3cuY2FtdW5kYS5vcmcvdGVzdFwiPjxwcm9jZXNz\nIGlkPVwiZXhhbXBsZV9uZXR3aXRuZXNzX3JldHJpZXZlX3BjYXBfZmlsZVwiIGlzRXhlY3V0YWJs\nZT1cInRydWVcIiBuYW1lPVwiKEV4YW1wbGUpIE5ldFdpdG5lc3MgUmV0cmlldmUgUENBUCBGaWxl\nXCI+PGRvY3VtZW50YXRpb24+QW4gZXhhbXBsZSB0aGF0IHJldHVybnMgYSBQQ0FQIGZpbGUgb2Yg\ncGFja2V0IGRhdGEgd2l0aGluIHRoZSBnaXZlbiBzZXNzaW9uIElEIHJhbmdlIGFuZCBhdHRhY2hl\ncyBpdCB0byB0aGUgaW5jaWRlbnQuPC9kb2N1bWVudGF0aW9uPjxzdGFydEV2ZW50IGlkPVwiU3Rh\ncnRFdmVudF8xNTVhc3htXCI+PG91dGdvaW5nPlNlcXVlbmNlRmxvd18xNHN4N3NwPC9vdXRnb2lu\nZz48L3N0YXJ0RXZlbnQ+PHNlcnZpY2VUYXNrIGlkPVwiU2VydmljZVRhc2tfMDRsZzB3MFwiIG5h\nbWU9XCJOZXRXaXRuZXNzIFF1ZXJ5XCIgcmVzaWxpZW50OnR5cGU9XCJmdW5jdGlvblwiPjxleHRl\nbnNpb25FbGVtZW50cz48cmVzaWxpZW50OmZ1bmN0aW9uIHV1aWQ9XCIwMWRhYTYxYy1kYzIyLTRi\nMWEtOWMwMC0yNDgzNmY2MWU2ODdcIj57XCJpbnB1dHNcIjp7XCJiZDRkYzg5OC0yYjdlLTQ5YjIt\nODg1MS1mZDEwMzU5OTUxZWFcIjp7XCJpbnB1dF90eXBlXCI6XCJzdGF0aWNcIixcInN0YXRpY19p\nbnB1dFwiOntcIm11bHRpc2VsZWN0X3ZhbHVlXCI6W10sXCJudW1iZXJfdmFsdWVcIjoxMDB9fX0s\nXCJwcmVfcHJvY2Vzc2luZ19zY3JpcHRcIjpcImlucHV0cy5ud19xdWVyeSA9IHJ1bGUucHJvcGVy\ndGllcy5uZXR3aXRuZXNzX3F1ZXJ5XCIsXCJyZXN1bHRfbmFtZVwiOlwibndfcXVlcnlcIn08L3Jl\nc2lsaWVudDpmdW5jdGlvbj48L2V4dGVuc2lvbkVsZW1lbnRzPjxpbmNvbWluZz5TZXF1ZW5jZUZs\nb3dfMTRzeDdzcDwvaW5jb21pbmc+PG91dGdvaW5nPlNlcXVlbmNlRmxvd18xa2FnY3lxPC9vdXRn\nb2luZz48L3NlcnZpY2VUYXNrPjxzZXF1ZW5jZUZsb3cgaWQ9XCJTZXF1ZW5jZUZsb3dfMTRzeDdz\ncFwiIHNvdXJjZVJlZj1cIlN0YXJ0RXZlbnRfMTU1YXN4bVwiIHRhcmdldFJlZj1cIlNlcnZpY2VU\nYXNrXzA0bGcwdzBcIi8+PHNlcnZpY2VUYXNrIGlkPVwiU2VydmljZVRhc2tfMGd2YmpwZFwiIG5h\nbWU9XCJOZXRXaXRuZXNzIFJldHJpZXZlIFBDQVAgRGF0YVwiIHJlc2lsaWVudDp0eXBlPVwiZnVu\nY3Rpb25cIj48ZXh0ZW5zaW9uRWxlbWVudHM+PHJlc2lsaWVudDpmdW5jdGlvbiB1dWlkPVwiMWU2\nNjE5ZjUtY2E4OS00YTQzLTg3NDEtY2NjZmM2Yzc4Mjk5XCI+e1wiaW5wdXRzXCI6e30sXCJwb3N0\nX3Byb2Nlc3Npbmdfc2NyaXB0XCI6XCJpbmNpZGVudC5hZGROb3RlKFxcXCJBZGRlZCBQQ0FQIGZp\nbGUgZm9yIHNlc3Npb25zIHt9IGFzIGFuIGF0dGFjaG1lbnQgdG8gdGhlIEluY2lkZW50LlxcXCIu\nZm9ybWF0KHJlc3VsdHMuaW5wdXRzLm53X2V2ZW50X3Nlc3Npb25faWRzKSlcIixcInByZV9wcm9j\nZXNzaW5nX3NjcmlwdFwiOlwiZmllbGRzID0gd29ya2Zsb3cucHJvcGVydGllcy5ud19xdWVyeS5j\nb250ZW50LnJlc3VsdHMuZmllbGRzXFxuXFxuIyBQdWxsIHNlc3Npb24gSURzIG91dCBvZiByZXN1\nbHRzIGFuZCBjb252ZXJ0IGludG8gY29tbWEgc2VwZXJhdGVkIHN0cmluZ1xcbnNlc3Npb25faWRf\nbGlzdCA9IFtdXFxuZm9yIHNlc3Npb25faWQgaW4gZmllbGRzOlxcbiAgc2Vzc2lvbl9pZF9saXN0\nLmFwcGVuZChzdHIoc2Vzc2lvbl9pZC5nZXQoXFxcImdyb3VwXFxcIikpKVxcbnNlc3Npb25faWRf\nc3RyID0gXFxcIiwgXFxcIi5qb2luKHNlc3Npb25faWRfbGlzdClcXG5cXG5pbnB1dHMubndfZXZl\nbnRfc2Vzc2lvbl9pZHMgPSBzZXNzaW9uX2lkX3N0clxcbmlucHV0cy5pbmNpZGVudF9pZCA9IGlu\nY2lkZW50LmlkXCIsXCJyZXN1bHRfbmFtZVwiOlwiXCJ9PC9yZXNpbGllbnQ6ZnVuY3Rpb24+PC9l\neHRlbnNpb25FbGVtZW50cz48aW5jb21pbmc+U2VxdWVuY2VGbG93XzFrYWdjeXE8L2luY29taW5n\nPjxvdXRnb2luZz5TZXF1ZW5jZUZsb3dfMGJmc3Fmajwvb3V0Z29pbmc+PC9zZXJ2aWNlVGFzaz48\nc2VxdWVuY2VGbG93IGlkPVwiU2VxdWVuY2VGbG93XzFrYWdjeXFcIiBzb3VyY2VSZWY9XCJTZXJ2\naWNlVGFza18wNGxnMHcwXCIgdGFyZ2V0UmVmPVwiU2VydmljZVRhc2tfMGd2YmpwZFwiLz48ZW5k\nRXZlbnQgaWQ9XCJFbmRFdmVudF8wNWhqMWd1XCI+PGluY29taW5nPlNlcXVlbmNlRmxvd18wYmZz\ncWZqPC9pbmNvbWluZz48L2VuZEV2ZW50PjxzZXF1ZW5jZUZsb3cgaWQ9XCJTZXF1ZW5jZUZsb3df\nMGJmc3FmalwiIHNvdXJjZVJlZj1cIlNlcnZpY2VUYXNrXzBndmJqcGRcIiB0YXJnZXRSZWY9XCJF\nbmRFdmVudF8wNWhqMWd1XCIvPjx0ZXh0QW5ub3RhdGlvbiBpZD1cIlRleHRBbm5vdGF0aW9uXzFr\neHhpeXRcIj48dGV4dD5TdGFydCB5b3VyIHdvcmtmbG93IGhlcmU8L3RleHQ+PC90ZXh0QW5ub3Rh\ndGlvbj48YXNzb2NpYXRpb24gaWQ9XCJBc3NvY2lhdGlvbl8xc2V1ajQ4XCIgc291cmNlUmVmPVwi\nU3RhcnRFdmVudF8xNTVhc3htXCIgdGFyZ2V0UmVmPVwiVGV4dEFubm90YXRpb25fMWt4eGl5dFwi\nLz48L3Byb2Nlc3M+PGJwbW5kaTpCUE1ORGlhZ3JhbSBpZD1cIkJQTU5EaWFncmFtXzFcIj48YnBt\nbmRpOkJQTU5QbGFuZSBicG1uRWxlbWVudD1cInVuZGVmaW5lZFwiIGlkPVwiQlBNTlBsYW5lXzFc\nIj48YnBtbmRpOkJQTU5TaGFwZSBicG1uRWxlbWVudD1cIlN0YXJ0RXZlbnRfMTU1YXN4bVwiIGlk\nPVwiU3RhcnRFdmVudF8xNTVhc3htX2RpXCI+PG9tZ2RjOkJvdW5kcyBoZWlnaHQ9XCIzNlwiIHdp\nZHRoPVwiMzZcIiB4PVwiMTYyXCIgeT1cIjE4OFwiLz48YnBtbmRpOkJQTU5MYWJlbD48b21nZGM6\nQm91bmRzIGhlaWdodD1cIjBcIiB3aWR0aD1cIjkwXCIgeD1cIjE1N1wiIHk9XCIyMjNcIi8+PC9i\ncG1uZGk6QlBNTkxhYmVsPjwvYnBtbmRpOkJQTU5TaGFwZT48YnBtbmRpOkJQTU5TaGFwZSBicG1u\nRWxlbWVudD1cIlRleHRBbm5vdGF0aW9uXzFreHhpeXRcIiBpZD1cIlRleHRBbm5vdGF0aW9uXzFr\neHhpeXRfZGlcIj48b21nZGM6Qm91bmRzIGhlaWdodD1cIjMwXCIgd2lkdGg9XCIxMDBcIiB4PVwi\nOTlcIiB5PVwiMjU0XCIvPjwvYnBtbmRpOkJQTU5TaGFwZT48YnBtbmRpOkJQTU5FZGdlIGJwbW5F\nbGVtZW50PVwiQXNzb2NpYXRpb25fMXNldWo0OFwiIGlkPVwiQXNzb2NpYXRpb25fMXNldWo0OF9k\naVwiPjxvbWdkaTp3YXlwb2ludCB4PVwiMTY5XCIgeHNpOnR5cGU9XCJvbWdkYzpQb2ludFwiIHk9\nXCIyMjBcIi8+PG9tZ2RpOndheXBvaW50IHg9XCIxNTNcIiB4c2k6dHlwZT1cIm9tZ2RjOlBvaW50\nXCIgeT1cIjI1NFwiLz48L2JwbW5kaTpCUE1ORWRnZT48YnBtbmRpOkJQTU5TaGFwZSBicG1uRWxl\nbWVudD1cIlNlcnZpY2VUYXNrXzA0bGcwdzBcIiBpZD1cIlNlcnZpY2VUYXNrXzA0bGcwdzBfZGlc\nIj48b21nZGM6Qm91bmRzIGhlaWdodD1cIjgwXCIgd2lkdGg9XCIxMDBcIiB4PVwiMjg1XCIgeT1c\nIjE2NlwiLz48L2JwbW5kaTpCUE1OU2hhcGU+PGJwbW5kaTpCUE1ORWRnZSBicG1uRWxlbWVudD1c\nIlNlcXVlbmNlRmxvd18xNHN4N3NwXCIgaWQ9XCJTZXF1ZW5jZUZsb3dfMTRzeDdzcF9kaVwiPjxv\nbWdkaTp3YXlwb2ludCB4PVwiMTk4XCIgeHNpOnR5cGU9XCJvbWdkYzpQb2ludFwiIHk9XCIyMDZc\nIi8+PG9tZ2RpOndheXBvaW50IHg9XCIyODVcIiB4c2k6dHlwZT1cIm9tZ2RjOlBvaW50XCIgeT1c\nIjIwNlwiLz48YnBtbmRpOkJQTU5MYWJlbD48b21nZGM6Qm91bmRzIGhlaWdodD1cIjEzXCIgd2lk\ndGg9XCIwXCIgeD1cIjI0MS41XCIgeT1cIjE4NFwiLz48L2JwbW5kaTpCUE1OTGFiZWw+PC9icG1u\nZGk6QlBNTkVkZ2U+PGJwbW5kaTpCUE1OU2hhcGUgYnBtbkVsZW1lbnQ9XCJTZXJ2aWNlVGFza18w\nZ3ZianBkXCIgaWQ9XCJTZXJ2aWNlVGFza18wZ3ZianBkX2RpXCI+PG9tZ2RjOkJvdW5kcyBoZWln\naHQ9XCI4MFwiIHdpZHRoPVwiMTAwXCIgeD1cIjUxNlwiIHk9XCIxNjZcIi8+PC9icG1uZGk6QlBN\nTlNoYXBlPjxicG1uZGk6QlBNTkVkZ2UgYnBtbkVsZW1lbnQ9XCJTZXF1ZW5jZUZsb3dfMWthZ2N5\ncVwiIGlkPVwiU2VxdWVuY2VGbG93XzFrYWdjeXFfZGlcIj48b21nZGk6d2F5cG9pbnQgeD1cIjM4\nNVwiIHhzaTp0eXBlPVwib21nZGM6UG9pbnRcIiB5PVwiMjA2XCIvPjxvbWdkaTp3YXlwb2ludCB4\nPVwiNTE2XCIgeHNpOnR5cGU9XCJvbWdkYzpQb2ludFwiIHk9XCIyMDZcIi8+PGJwbW5kaTpCUE1O\nTGFiZWw+PG9tZ2RjOkJvdW5kcyBoZWlnaHQ9XCIxM1wiIHdpZHRoPVwiMFwiIHg9XCI0NTAuNVwi\nIHk9XCIxODRcIi8+PC9icG1uZGk6QlBNTkxhYmVsPjwvYnBtbmRpOkJQTU5FZGdlPjxicG1uZGk6\nQlBNTlNoYXBlIGJwbW5FbGVtZW50PVwiRW5kRXZlbnRfMDVoajFndVwiIGlkPVwiRW5kRXZlbnRf\nMDVoajFndV9kaVwiPjxvbWdkYzpCb3VuZHMgaGVpZ2h0PVwiMzZcIiB3aWR0aD1cIjM2XCIgeD1c\nIjc2MlwiIHk9XCIxODhcIi8+PGJwbW5kaTpCUE1OTGFiZWw+PG9tZ2RjOkJvdW5kcyBoZWlnaHQ9\nXCIxM1wiIHdpZHRoPVwiMFwiIHg9XCI3ODBcIiB5PVwiMjI3XCIvPjwvYnBtbmRpOkJQTU5MYWJl\nbD48L2JwbW5kaTpCUE1OU2hhcGU+PGJwbW5kaTpCUE1ORWRnZSBicG1uRWxlbWVudD1cIlNlcXVl\nbmNlRmxvd18wYmZzcWZqXCIgaWQ9XCJTZXF1ZW5jZUZsb3dfMGJmc3Fmal9kaVwiPjxvbWdkaTp3\nYXlwb2ludCB4PVwiNjE2XCIgeHNpOnR5cGU9XCJvbWdkYzpQb2ludFwiIHk9XCIyMDZcIi8+PG9t\nZ2RpOndheXBvaW50IHg9XCI3NjJcIiB4c2k6dHlwZT1cIm9tZ2RjOlBvaW50XCIgeT1cIjIwNlwi\nLz48YnBtbmRpOkJQTU5MYWJlbD48b21nZGM6Qm91bmRzIGhlaWdodD1cIjEzXCIgd2lkdGg9XCIw\nXCIgeD1cIjY4OVwiIHk9XCIxODRcIi8+PC9icG1uZGk6QlBNTkxhYmVsPjwvYnBtbmRpOkJQTU5F\nZGdlPjwvYnBtbmRpOkJQTU5QbGFuZT48L2JwbW5kaTpCUE1ORGlhZ3JhbT48L2RlZmluaXRpb25z\nPiIsICJ3b3JrZmxvd19pZCI6ICJleGFtcGxlX25ldHdpdG5lc3NfcmV0cmlldmVfcGNhcF9maWxl\nIiwgInZlcnNpb24iOiA1fSwgImxhc3RfbW9kaWZpZWRfdGltZSI6IDE1NTQxMzgyMDM0MzMsICJj\ncmVhdG9yX2lkIjogImFkbWluQGNvM3N5cy5jb20iLCAiYWN0aW9ucyI6IFtdLCAicHJvZ3JhbW1h\ndGljX25hbWUiOiAiZXhhbXBsZV9uZXR3aXRuZXNzX3JldHJpZXZlX3BjYXBfZmlsZSIsICJuYW1l\nIjogIihFeGFtcGxlKSBOZXRXaXRuZXNzIFJldHJpZXZlIFBDQVAgRmlsZSJ9LCB7InV1aWQiOiAi\nZTgwZDQxYTgtYmJkOC00NmMwLTliN2ItMzMyYjIyZDc1MjY4IiwgImRlc2NyaXB0aW9uIjogIkFu\nIGV4YW1wbGUgdGhhdCByZXR1cm5zIGEgUENBUCBmaWxlIG9mIHBhY2tldCBkYXRhIHdpdGhpbiB0\naGUgZ2l2ZW4gdGltZSBmcmFtZSBhbmQgYXR0YWNoZXMgaXQgdG8gdGhlIGluY2lkZW50LiIsICJv\nYmplY3RfdHlwZSI6ICJpbmNpZGVudCIsICJleHBvcnRfa2V5IjogImV4YW1wbGVfbmV0d2l0bmVz\nc19yZXRyaWV2ZV9wY2FwX2ZpbGVfdGltZSIsICJ3b3JrZmxvd19pZCI6IDEzOCwgImxhc3RfbW9k\naWZpZWRfYnkiOiAiYWRtaW5AY28zc3lzLmNvbSIsICJjb250ZW50IjogeyJ4bWwiOiAiPD94bWwg\ndmVyc2lvbj1cIjEuMFwiIGVuY29kaW5nPVwiVVRGLThcIj8+PGRlZmluaXRpb25zIHhtbG5zPVwi\naHR0cDovL3d3dy5vbWcub3JnL3NwZWMvQlBNTi8yMDEwMDUyNC9NT0RFTFwiIHhtbG5zOmJwbW5k\naT1cImh0dHA6Ly93d3cub21nLm9yZy9zcGVjL0JQTU4vMjAxMDA1MjQvRElcIiB4bWxuczpvbWdk\nYz1cImh0dHA6Ly93d3cub21nLm9yZy9zcGVjL0RELzIwMTAwNTI0L0RDXCIgeG1sbnM6b21nZGk9\nXCJodHRwOi8vd3d3Lm9tZy5vcmcvc3BlYy9ERC8yMDEwMDUyNC9ESVwiIHhtbG5zOnJlc2lsaWVu\ndD1cImh0dHA6Ly9yZXNpbGllbnQuaWJtLmNvbS9icG1uXCIgeG1sbnM6eHNkPVwiaHR0cDovL3d3\ndy53My5vcmcvMjAwMS9YTUxTY2hlbWFcIiB4bWxuczp4c2k9XCJodHRwOi8vd3d3LnczLm9yZy8y\nMDAxL1hNTFNjaGVtYS1pbnN0YW5jZVwiIHRhcmdldE5hbWVzcGFjZT1cImh0dHA6Ly93d3cuY2Ft\ndW5kYS5vcmcvdGVzdFwiPjxwcm9jZXNzIGlkPVwiZXhhbXBsZV9uZXR3aXRuZXNzX3JldHJpZXZl\nX3BjYXBfZmlsZV90aW1lXCIgaXNFeGVjdXRhYmxlPVwidHJ1ZVwiIG5hbWU9XCIoRXhhbXBsZSkg\nTmV0V2l0bmVzcyBSZXRyaWV2ZSBQQ0FQIEZpbGUgKFRpbWUpXCI+PGRvY3VtZW50YXRpb24+QW4g\nZXhhbXBsZSB0aGF0IHJldHVybnMgYSBQQ0FQIGZpbGUgb2YgcGFja2V0IGRhdGEgd2l0aGluIHRo\nZSBnaXZlbiB0aW1lIGZyYW1lIGFuZCBhdHRhY2hlcyBpdCB0byB0aGUgaW5jaWRlbnQuPC9kb2N1\nbWVudGF0aW9uPjxzdGFydEV2ZW50IGlkPVwiU3RhcnRFdmVudF8xNTVhc3htXCI+PG91dGdvaW5n\nPlNlcXVlbmNlRmxvd18xNWhubzhpPC9vdXRnb2luZz48L3N0YXJ0RXZlbnQ+PHNlcnZpY2VUYXNr\nIGlkPVwiU2VydmljZVRhc2tfMGtmNzEwblwiIG5hbWU9XCJOZXRXaXRuZXNzIFJldHJpZXZlIFBD\nQVAgRGF0YVwiIHJlc2lsaWVudDp0eXBlPVwiZnVuY3Rpb25cIj48ZXh0ZW5zaW9uRWxlbWVudHM+\nPHJlc2lsaWVudDpmdW5jdGlvbiB1dWlkPVwiMWU2NjE5ZjUtY2E4OS00YTQzLTg3NDEtY2NjZmM2\nYzc4Mjk5XCI+e1wiaW5wdXRzXCI6e30sXCJwcmVfcHJvY2Vzc2luZ19zY3JpcHRcIjpcImlucHV0\ncy5ud19zdGFydF90aW1lID0gcnVsZS5wcm9wZXJ0aWVzLm5ldHdpdG5lc3Nfc3RhcnRfdGltZVxc\nbmlucHV0cy5ud19lbmRfdGltZSA9IHJ1bGUucHJvcGVydGllcy5uZXR3aXRuZXNzX2VuZF90aW1l\nXFxuaW5wdXRzLmluY2lkZW50X2lkID0gaW5jaWRlbnQuaWRcIixcInJlc3VsdF9uYW1lXCI6XCJc\nIn08L3Jlc2lsaWVudDpmdW5jdGlvbj48L2V4dGVuc2lvbkVsZW1lbnRzPjxpbmNvbWluZz5TZXF1\nZW5jZUZsb3dfMTVobm84aTwvaW5jb21pbmc+PG91dGdvaW5nPlNlcXVlbmNlRmxvd18xbTB2dXZq\nPC9vdXRnb2luZz48L3NlcnZpY2VUYXNrPjxzZXF1ZW5jZUZsb3cgaWQ9XCJTZXF1ZW5jZUZsb3df\nMTVobm84aVwiIHNvdXJjZVJlZj1cIlN0YXJ0RXZlbnRfMTU1YXN4bVwiIHRhcmdldFJlZj1cIlNl\ncnZpY2VUYXNrXzBrZjcxMG5cIi8+PGVuZEV2ZW50IGlkPVwiRW5kRXZlbnRfMTByN205bVwiPjxp\nbmNvbWluZz5TZXF1ZW5jZUZsb3dfMW0wdnV2ajwvaW5jb21pbmc+PC9lbmRFdmVudD48c2VxdWVu\nY2VGbG93IGlkPVwiU2VxdWVuY2VGbG93XzFtMHZ1dmpcIiBzb3VyY2VSZWY9XCJTZXJ2aWNlVGFz\na18wa2Y3MTBuXCIgdGFyZ2V0UmVmPVwiRW5kRXZlbnRfMTByN205bVwiLz48dGV4dEFubm90YXRp\nb24gaWQ9XCJUZXh0QW5ub3RhdGlvbl8xa3h4aXl0XCI+PHRleHQ+U3RhcnQgeW91ciB3b3JrZmxv\ndyBoZXJlPC90ZXh0PjwvdGV4dEFubm90YXRpb24+PGFzc29jaWF0aW9uIGlkPVwiQXNzb2NpYXRp\nb25fMXNldWo0OFwiIHNvdXJjZVJlZj1cIlN0YXJ0RXZlbnRfMTU1YXN4bVwiIHRhcmdldFJlZj1c\nIlRleHRBbm5vdGF0aW9uXzFreHhpeXRcIi8+PC9wcm9jZXNzPjxicG1uZGk6QlBNTkRpYWdyYW0g\naWQ9XCJCUE1ORGlhZ3JhbV8xXCI+PGJwbW5kaTpCUE1OUGxhbmUgYnBtbkVsZW1lbnQ9XCJ1bmRl\nZmluZWRcIiBpZD1cIkJQTU5QbGFuZV8xXCI+PGJwbW5kaTpCUE1OU2hhcGUgYnBtbkVsZW1lbnQ9\nXCJTdGFydEV2ZW50XzE1NWFzeG1cIiBpZD1cIlN0YXJ0RXZlbnRfMTU1YXN4bV9kaVwiPjxvbWdk\nYzpCb3VuZHMgaGVpZ2h0PVwiMzZcIiB3aWR0aD1cIjM2XCIgeD1cIjE2MlwiIHk9XCIxODhcIi8+\nPGJwbW5kaTpCUE1OTGFiZWw+PG9tZ2RjOkJvdW5kcyBoZWlnaHQ9XCIwXCIgd2lkdGg9XCI5MFwi\nIHg9XCIxNTdcIiB5PVwiMjIzXCIvPjwvYnBtbmRpOkJQTU5MYWJlbD48L2JwbW5kaTpCUE1OU2hh\ncGU+PGJwbW5kaTpCUE1OU2hhcGUgYnBtbkVsZW1lbnQ9XCJUZXh0QW5ub3RhdGlvbl8xa3h4aXl0\nXCIgaWQ9XCJUZXh0QW5ub3RhdGlvbl8xa3h4aXl0X2RpXCI+PG9tZ2RjOkJvdW5kcyBoZWlnaHQ9\nXCIzMFwiIHdpZHRoPVwiMTAwXCIgeD1cIjk5XCIgeT1cIjI1NFwiLz48L2JwbW5kaTpCUE1OU2hh\ncGU+PGJwbW5kaTpCUE1ORWRnZSBicG1uRWxlbWVudD1cIkFzc29jaWF0aW9uXzFzZXVqNDhcIiBp\nZD1cIkFzc29jaWF0aW9uXzFzZXVqNDhfZGlcIj48b21nZGk6d2F5cG9pbnQgeD1cIjE2OVwiIHhz\naTp0eXBlPVwib21nZGM6UG9pbnRcIiB5PVwiMjIwXCIvPjxvbWdkaTp3YXlwb2ludCB4PVwiMTUz\nXCIgeHNpOnR5cGU9XCJvbWdkYzpQb2ludFwiIHk9XCIyNTRcIi8+PC9icG1uZGk6QlBNTkVkZ2U+\nPGJwbW5kaTpCUE1OU2hhcGUgYnBtbkVsZW1lbnQ9XCJTZXJ2aWNlVGFza18wa2Y3MTBuXCIgaWQ9\nXCJTZXJ2aWNlVGFza18wa2Y3MTBuX2RpXCI+PG9tZ2RjOkJvdW5kcyBoZWlnaHQ9XCI4MFwiIHdp\nZHRoPVwiMTAwXCIgeD1cIjI4NlwiIHk9XCIxNjZcIi8+PC9icG1uZGk6QlBNTlNoYXBlPjxicG1u\nZGk6QlBNTkVkZ2UgYnBtbkVsZW1lbnQ9XCJTZXF1ZW5jZUZsb3dfMTVobm84aVwiIGlkPVwiU2Vx\ndWVuY2VGbG93XzE1aG5vOGlfZGlcIj48b21nZGk6d2F5cG9pbnQgeD1cIjE5OFwiIHhzaTp0eXBl\nPVwib21nZGM6UG9pbnRcIiB5PVwiMjA2XCIvPjxvbWdkaTp3YXlwb2ludCB4PVwiMjg2XCIgeHNp\nOnR5cGU9XCJvbWdkYzpQb2ludFwiIHk9XCIyMDZcIi8+PGJwbW5kaTpCUE1OTGFiZWw+PG9tZ2Rj\nOkJvdW5kcyBoZWlnaHQ9XCIxM1wiIHdpZHRoPVwiMFwiIHg9XCIyNDJcIiB5PVwiMTg0XCIvPjwv\nYnBtbmRpOkJQTU5MYWJlbD48L2JwbW5kaTpCUE1ORWRnZT48YnBtbmRpOkJQTU5TaGFwZSBicG1u\nRWxlbWVudD1cIkVuZEV2ZW50XzEwcjdtOW1cIiBpZD1cIkVuZEV2ZW50XzEwcjdtOW1fZGlcIj48\nb21nZGM6Qm91bmRzIGhlaWdodD1cIjM2XCIgd2lkdGg9XCIzNlwiIHg9XCI1NDVcIiB5PVwiMTg4\nXCIvPjxicG1uZGk6QlBNTkxhYmVsPjxvbWdkYzpCb3VuZHMgaGVpZ2h0PVwiMTNcIiB3aWR0aD1c\nIjBcIiB4PVwiNTYzXCIgeT1cIjIyN1wiLz48L2JwbW5kaTpCUE1OTGFiZWw+PC9icG1uZGk6QlBN\nTlNoYXBlPjxicG1uZGk6QlBNTkVkZ2UgYnBtbkVsZW1lbnQ9XCJTZXF1ZW5jZUZsb3dfMW0wdnV2\nalwiIGlkPVwiU2VxdWVuY2VGbG93XzFtMHZ1dmpfZGlcIj48b21nZGk6d2F5cG9pbnQgeD1cIjM4\nNlwiIHhzaTp0eXBlPVwib21nZGM6UG9pbnRcIiB5PVwiMjA2XCIvPjxvbWdkaTp3YXlwb2ludCB4\nPVwiNTQ1XCIgeHNpOnR5cGU9XCJvbWdkYzpQb2ludFwiIHk9XCIyMDZcIi8+PGJwbW5kaTpCUE1O\nTGFiZWw+PG9tZ2RjOkJvdW5kcyBoZWlnaHQ9XCIxM1wiIHdpZHRoPVwiMFwiIHg9XCI0NjUuNVwi\nIHk9XCIxODRcIi8+PC9icG1uZGk6QlBNTkxhYmVsPjwvYnBtbmRpOkJQTU5FZGdlPjwvYnBtbmRp\nOkJQTU5QbGFuZT48L2JwbW5kaTpCUE1ORGlhZ3JhbT48L2RlZmluaXRpb25zPiIsICJ3b3JrZmxv\nd19pZCI6ICJleGFtcGxlX25ldHdpdG5lc3NfcmV0cmlldmVfcGNhcF9maWxlX3RpbWUiLCAidmVy\nc2lvbiI6IDh9LCAibGFzdF9tb2RpZmllZF90aW1lIjogMTU1NDE0Njc4MDQwMSwgImNyZWF0b3Jf\naWQiOiAiYWRtaW5AY28zc3lzLmNvbSIsICJhY3Rpb25zIjogW10sICJwcm9ncmFtbWF0aWNfbmFt\nZSI6ICJleGFtcGxlX25ldHdpdG5lc3NfcmV0cmlldmVfcGNhcF9maWxlX3RpbWUiLCAibmFtZSI6\nICIoRXhhbXBsZSkgTmV0V2l0bmVzcyBSZXRyaWV2ZSBQQ0FQIEZpbGUgKFRpbWUpIn0sIHsidXVp\nZCI6ICI5Y2Q2NTQ0ZC1hYWNjLTQ1OGQtOTMyOS1mNzE3OTU1MDAxODUiLCAiZGVzY3JpcHRpb24i\nOiAiQW4gZXhhbXBsZSB0aGF0IHJldHVybnMgdGhlIG1ldGEgdmFsdWVzIGJhc2VkIG9uIHNlc3Np\nb24gbWV0YSBJRCByYW5nZXMuIiwgIm9iamVjdF90eXBlIjogImluY2lkZW50IiwgImV4cG9ydF9r\nZXkiOiAiZXhhbXBsZV9uZXR3aXRuZXNzX2dldF9tZXRhX3ZhbHVlcyIsICJ3b3JrZmxvd19pZCI6\nIDEzNywgImxhc3RfbW9kaWZpZWRfYnkiOiAiYWRtaW5AY28zc3lzLmNvbSIsICJjb250ZW50Ijog\neyJ4bWwiOiAiPD94bWwgdmVyc2lvbj1cIjEuMFwiIGVuY29kaW5nPVwiVVRGLThcIj8+PGRlZmlu\naXRpb25zIHhtbG5zPVwiaHR0cDovL3d3dy5vbWcub3JnL3NwZWMvQlBNTi8yMDEwMDUyNC9NT0RF\nTFwiIHhtbG5zOmJwbW5kaT1cImh0dHA6Ly93d3cub21nLm9yZy9zcGVjL0JQTU4vMjAxMDA1MjQv\nRElcIiB4bWxuczpvbWdkYz1cImh0dHA6Ly93d3cub21nLm9yZy9zcGVjL0RELzIwMTAwNTI0L0RD\nXCIgeG1sbnM6b21nZGk9XCJodHRwOi8vd3d3Lm9tZy5vcmcvc3BlYy9ERC8yMDEwMDUyNC9ESVwi\nIHhtbG5zOnJlc2lsaWVudD1cImh0dHA6Ly9yZXNpbGllbnQuaWJtLmNvbS9icG1uXCIgeG1sbnM6\neHNkPVwiaHR0cDovL3d3dy53My5vcmcvMjAwMS9YTUxTY2hlbWFcIiB4bWxuczp4c2k9XCJodHRw\nOi8vd3d3LnczLm9yZy8yMDAxL1hNTFNjaGVtYS1pbnN0YW5jZVwiIHRhcmdldE5hbWVzcGFjZT1c\nImh0dHA6Ly93d3cuY2FtdW5kYS5vcmcvdGVzdFwiPjxwcm9jZXNzIGlkPVwiZXhhbXBsZV9uZXR3\naXRuZXNzX2dldF9tZXRhX3ZhbHVlc1wiIGlzRXhlY3V0YWJsZT1cInRydWVcIiBuYW1lPVwiKEV4\nYW1wbGUpIE5ldFdpdG5lc3MgR2V0IE1ldGEgVmFsdWVzXCI+PGRvY3VtZW50YXRpb24+QW4gZXhh\nbXBsZSB0aGF0IHJldHVybnMgdGhlIG1ldGEgdmFsdWVzIGJhc2VkIG9uIHNlc3Npb24gbWV0YSBJ\nRCByYW5nZXMuPC9kb2N1bWVudGF0aW9uPjxzdGFydEV2ZW50IGlkPVwiU3RhcnRFdmVudF8xNTVh\nc3htXCI+PG91dGdvaW5nPlNlcXVlbmNlRmxvd18wenRjNTRqPC9vdXRnb2luZz48L3N0YXJ0RXZl\nbnQ+PHNlcnZpY2VUYXNrIGlkPVwiU2VydmljZVRhc2tfMHNram9iYlwiIG5hbWU9XCJOZXRXaXRu\nZXNzIFF1ZXJ5XCIgcmVzaWxpZW50OnR5cGU9XCJmdW5jdGlvblwiPjxleHRlbnNpb25FbGVtZW50\ncz48cmVzaWxpZW50OmZ1bmN0aW9uIHV1aWQ9XCIwMWRhYTYxYy1kYzIyLTRiMWEtOWMwMC0yNDgz\nNmY2MWU2ODdcIj57XCJpbnB1dHNcIjp7XCJiZDRkYzg5OC0yYjdlLTQ5YjItODg1MS1mZDEwMzU5\nOTUxZWFcIjp7XCJpbnB1dF90eXBlXCI6XCJzdGF0aWNcIixcInN0YXRpY19pbnB1dFwiOntcIm11\nbHRpc2VsZWN0X3ZhbHVlXCI6W10sXCJudW1iZXJfdmFsdWVcIjo1MH19fSxcInByZV9wcm9jZXNz\naW5nX3NjcmlwdFwiOlwiaW5wdXRzLm53X3F1ZXJ5ID0gcnVsZS5wcm9wZXJ0aWVzLm5ldHdpdG5l\nc3NfcXVlcnlcIixcInJlc3VsdF9uYW1lXCI6XCJud19xdWVyeVwifTwvcmVzaWxpZW50OmZ1bmN0\naW9uPjwvZXh0ZW5zaW9uRWxlbWVudHM+PGluY29taW5nPlNlcXVlbmNlRmxvd18wenRjNTRqPC9p\nbmNvbWluZz48b3V0Z29pbmc+U2VxdWVuY2VGbG93XzBlYmw0dWo8L291dGdvaW5nPjwvc2Vydmlj\nZVRhc2s+PGVuZEV2ZW50IGlkPVwiRW5kRXZlbnRfMG5uZWlnNVwiPjxpbmNvbWluZz5TZXF1ZW5j\nZUZsb3dfMGp2M2wxNjwvaW5jb21pbmc+PC9lbmRFdmVudD48c2VydmljZVRhc2sgaWQ9XCJTZXJ2\naWNlVGFza18xYWp6dHNqXCIgbmFtZT1cIk5ldFdpdG5lc3MgR2V0IE1ldGEgSUQgcmFuZ2VzXCIg\ncmVzaWxpZW50OnR5cGU9XCJmdW5jdGlvblwiPjxleHRlbnNpb25FbGVtZW50cz48cmVzaWxpZW50\nOmZ1bmN0aW9uIHV1aWQ9XCI5NTE3ZjZhOS1iODczLTQwMTYtYTVmMy1hN2FiYTUzMzI5NzRcIj57\nXCJpbnB1dHNcIjp7fSxcInByZV9wcm9jZXNzaW5nX3NjcmlwdFwiOlwiaW5wdXRzLm53X3Nlc3Np\nb25faWQxID0gd29ya2Zsb3cucHJvcGVydGllcy5ud19xdWVyeS5jb250ZW50LnJlc3VsdHMuaWQx\nXFxuaW5wdXRzLm53X3Nlc3Npb25faWQyID0gd29ya2Zsb3cucHJvcGVydGllcy5ud19xdWVyeS5j\nb250ZW50LnJlc3VsdHMuaWQyXFxuXCIsXCJyZXN1bHRfbmFtZVwiOlwibndfbWV0YV9pZF9yYW5n\nZXNcIn08L3Jlc2lsaWVudDpmdW5jdGlvbj48L2V4dGVuc2lvbkVsZW1lbnRzPjxpbmNvbWluZz5T\nZXF1ZW5jZUZsb3dfMGVibDR1ajwvaW5jb21pbmc+PG91dGdvaW5nPlNlcXVlbmNlRmxvd18wamZo\nbjRsPC9vdXRnb2luZz48L3NlcnZpY2VUYXNrPjxzZXF1ZW5jZUZsb3cgaWQ9XCJTZXF1ZW5jZUZs\nb3dfMGVibDR1alwiIHNvdXJjZVJlZj1cIlNlcnZpY2VUYXNrXzBza2pvYmJcIiB0YXJnZXRSZWY9\nXCJTZXJ2aWNlVGFza18xYWp6dHNqXCIvPjxzZXJ2aWNlVGFzayBpZD1cIlNlcnZpY2VUYXNrXzFj\nMDd4ZzBcIiBuYW1lPVwiTmV0V2l0bmVzcyBHZXQgTWV0YSBWYWx1ZXNcIiByZXNpbGllbnQ6dHlw\nZT1cImZ1bmN0aW9uXCI+PGV4dGVuc2lvbkVsZW1lbnRzPjxyZXNpbGllbnQ6ZnVuY3Rpb24gdXVp\nZD1cIjE1MzVkNTE4LTIyMmItNDQ3Yi05OWI0LTBjYWYwMjAwZDZlYlwiPntcImlucHV0c1wiOntc\nImJkNGRjODk4LTJiN2UtNDliMi04ODUxLWZkMTAzNTk5NTFlYVwiOntcImlucHV0X3R5cGVcIjpc\nInN0YXRpY1wiLFwic3RhdGljX2lucHV0XCI6e1wibXVsdGlzZWxlY3RfdmFsdWVcIjpbXSxcIm51\nbWJlcl92YWx1ZVwiOjEwfX19LFwicG9zdF9wcm9jZXNzaW5nX3NjcmlwdFwiOlwiaW5jaWRlbnQu\nYWRkTm90ZShoZWxwZXIuY3JlYXRlUmljaFRleHQoXFxcIk1ldGEgdmFsdWVzIGJldHdlZW4ge30g\nYW5kIHt9IGFyZSBsaXN0ZWQgYmVsb3cuJmx0O2JyLyZndDsge31cXFwiLmZvcm1hdChyZXN1bHRz\nLmlucHV0cy5ud19tZXRhX2lkMSwgcmVzdWx0cy5pbnB1dHMubndfbWV0YV9pZDIsIHN0cihyZXN1\nbHRzLmNvbnRlbnQucmVzdWx0cykpKSlcIixcInByZV9wcm9jZXNzaW5nX3NjcmlwdFwiOlwiaW5w\ndXRzLm53X21ldGFfaWQxID0gd29ya2Zsb3cucHJvcGVydGllcy5ud19tZXRhX2lkX3Jhbmdlcy5j\nb250ZW50LnBhcmFtcy5maWVsZDFcXG5pbnB1dHMubndfbWV0YV9pZDIgPSB3b3JrZmxvdy5wcm9w\nZXJ0aWVzLm53X21ldGFfaWRfcmFuZ2VzLmNvbnRlbnQucGFyYW1zLmZpZWxkMlwifTwvcmVzaWxp\nZW50OmZ1bmN0aW9uPjwvZXh0ZW5zaW9uRWxlbWVudHM+PGluY29taW5nPlNlcXVlbmNlRmxvd18w\namZobjRsPC9pbmNvbWluZz48b3V0Z29pbmc+U2VxdWVuY2VGbG93XzBqdjNsMTY8L291dGdvaW5n\nPjwvc2VydmljZVRhc2s+PHNlcXVlbmNlRmxvdyBpZD1cIlNlcXVlbmNlRmxvd18wamZobjRsXCIg\nc291cmNlUmVmPVwiU2VydmljZVRhc2tfMWFqenRzalwiIHRhcmdldFJlZj1cIlNlcnZpY2VUYXNr\nXzFjMDd4ZzBcIi8+PHNlcXVlbmNlRmxvdyBpZD1cIlNlcXVlbmNlRmxvd18wanYzbDE2XCIgc291\ncmNlUmVmPVwiU2VydmljZVRhc2tfMWMwN3hnMFwiIHRhcmdldFJlZj1cIkVuZEV2ZW50XzBubmVp\nZzVcIi8+PHNlcXVlbmNlRmxvdyBpZD1cIlNlcXVlbmNlRmxvd18wenRjNTRqXCIgc291cmNlUmVm\nPVwiU3RhcnRFdmVudF8xNTVhc3htXCIgdGFyZ2V0UmVmPVwiU2VydmljZVRhc2tfMHNram9iYlwi\nLz48dGV4dEFubm90YXRpb24gaWQ9XCJUZXh0QW5ub3RhdGlvbl8xa3h4aXl0XCI+PHRleHQ+U3Rh\ncnQgeW91ciB3b3JrZmxvdyBoZXJlPC90ZXh0PjwvdGV4dEFubm90YXRpb24+PGFzc29jaWF0aW9u\nIGlkPVwiQXNzb2NpYXRpb25fMXNldWo0OFwiIHNvdXJjZVJlZj1cIlN0YXJ0RXZlbnRfMTU1YXN4\nbVwiIHRhcmdldFJlZj1cIlRleHRBbm5vdGF0aW9uXzFreHhpeXRcIi8+PC9wcm9jZXNzPjxicG1u\nZGk6QlBNTkRpYWdyYW0gaWQ9XCJCUE1ORGlhZ3JhbV8xXCI+PGJwbW5kaTpCUE1OUGxhbmUgYnBt\nbkVsZW1lbnQ9XCJ1bmRlZmluZWRcIiBpZD1cIkJQTU5QbGFuZV8xXCI+PGJwbW5kaTpCUE1OU2hh\ncGUgYnBtbkVsZW1lbnQ9XCJTdGFydEV2ZW50XzE1NWFzeG1cIiBpZD1cIlN0YXJ0RXZlbnRfMTU1\nYXN4bV9kaVwiPjxvbWdkYzpCb3VuZHMgaGVpZ2h0PVwiMzZcIiB3aWR0aD1cIjM2XCIgeD1cIjE2\nMlwiIHk9XCIxODhcIi8+PGJwbW5kaTpCUE1OTGFiZWw+PG9tZ2RjOkJvdW5kcyBoZWlnaHQ9XCIw\nXCIgd2lkdGg9XCI5MFwiIHg9XCIxNTdcIiB5PVwiMjIzXCIvPjwvYnBtbmRpOkJQTU5MYWJlbD48\nL2JwbW5kaTpCUE1OU2hhcGU+PGJwbW5kaTpCUE1OU2hhcGUgYnBtbkVsZW1lbnQ9XCJUZXh0QW5u\nb3RhdGlvbl8xa3h4aXl0XCIgaWQ9XCJUZXh0QW5ub3RhdGlvbl8xa3h4aXl0X2RpXCI+PG9tZ2Rj\nOkJvdW5kcyBoZWlnaHQ9XCIzMFwiIHdpZHRoPVwiMTAwXCIgeD1cIjk5XCIgeT1cIjI1NFwiLz48\nL2JwbW5kaTpCUE1OU2hhcGU+PGJwbW5kaTpCUE1ORWRnZSBicG1uRWxlbWVudD1cIkFzc29jaWF0\naW9uXzFzZXVqNDhcIiBpZD1cIkFzc29jaWF0aW9uXzFzZXVqNDhfZGlcIj48b21nZGk6d2F5cG9p\nbnQgeD1cIjE2OVwiIHhzaTp0eXBlPVwib21nZGM6UG9pbnRcIiB5PVwiMjIwXCIvPjxvbWdkaTp3\nYXlwb2ludCB4PVwiMTUzXCIgeHNpOnR5cGU9XCJvbWdkYzpQb2ludFwiIHk9XCIyNTRcIi8+PC9i\ncG1uZGk6QlBNTkVkZ2U+PGJwbW5kaTpCUE1OU2hhcGUgYnBtbkVsZW1lbnQ9XCJTZXJ2aWNlVGFz\na18wc2tqb2JiXCIgaWQ9XCJTZXJ2aWNlVGFza18wc2tqb2JiX2RpXCI+PG9tZ2RjOkJvdW5kcyBo\nZWlnaHQ9XCI4MFwiIHdpZHRoPVwiMTAwXCIgeD1cIjI3OVwiIHk9XCIxNjZcIi8+PC9icG1uZGk6\nQlBNTlNoYXBlPjxicG1uZGk6QlBNTlNoYXBlIGJwbW5FbGVtZW50PVwiRW5kRXZlbnRfMG5uZWln\nNVwiIGlkPVwiRW5kRXZlbnRfMG5uZWlnNV9kaVwiPjxvbWdkYzpCb3VuZHMgaGVpZ2h0PVwiMzZc\nIiB3aWR0aD1cIjM2XCIgeD1cIjg0MlwiIHk9XCIxODhcIi8+PGJwbW5kaTpCUE1OTGFiZWw+PG9t\nZ2RjOkJvdW5kcyBoZWlnaHQ9XCIxM1wiIHdpZHRoPVwiMFwiIHg9XCI4NjBcIiB5PVwiMjI3XCIv\nPjwvYnBtbmRpOkJQTU5MYWJlbD48L2JwbW5kaTpCUE1OU2hhcGU+PGJwbW5kaTpCUE1OU2hhcGUg\nYnBtbkVsZW1lbnQ9XCJTZXJ2aWNlVGFza18xYWp6dHNqXCIgaWQ9XCJTZXJ2aWNlVGFza18xYWp6\ndHNqX2RpXCI+PG9tZ2RjOkJvdW5kcyBoZWlnaHQ9XCI4MFwiIHdpZHRoPVwiMTAwXCIgeD1cIjQ2\nN1wiIHk9XCIxNjZcIi8+PC9icG1uZGk6QlBNTlNoYXBlPjxicG1uZGk6QlBNTkVkZ2UgYnBtbkVs\nZW1lbnQ9XCJTZXF1ZW5jZUZsb3dfMGVibDR1alwiIGlkPVwiU2VxdWVuY2VGbG93XzBlYmw0dWpf\nZGlcIj48b21nZGk6d2F5cG9pbnQgeD1cIjM3OVwiIHhzaTp0eXBlPVwib21nZGM6UG9pbnRcIiB5\nPVwiMjA2XCIvPjxvbWdkaTp3YXlwb2ludCB4PVwiNDY3XCIgeHNpOnR5cGU9XCJvbWdkYzpQb2lu\ndFwiIHk9XCIyMDZcIi8+PGJwbW5kaTpCUE1OTGFiZWw+PG9tZ2RjOkJvdW5kcyBoZWlnaHQ9XCIx\nM1wiIHdpZHRoPVwiMFwiIHg9XCI0MjNcIiB5PVwiMTg0LjVcIi8+PC9icG1uZGk6QlBNTkxhYmVs\nPjwvYnBtbmRpOkJQTU5FZGdlPjxicG1uZGk6QlBNTlNoYXBlIGJwbW5FbGVtZW50PVwiU2Vydmlj\nZVRhc2tfMWMwN3hnMFwiIGlkPVwiU2VydmljZVRhc2tfMWMwN3hnMF9kaVwiPjxvbWdkYzpCb3Vu\nZHMgaGVpZ2h0PVwiODBcIiB3aWR0aD1cIjEwMFwiIHg9XCI2NTAuNTAzMzQ1NzI0OTA3XCIgeT1c\nIjE2NlwiLz48L2JwbW5kaTpCUE1OU2hhcGU+PGJwbW5kaTpCUE1ORWRnZSBicG1uRWxlbWVudD1c\nIlNlcXVlbmNlRmxvd18wamZobjRsXCIgaWQ9XCJTZXF1ZW5jZUZsb3dfMGpmaG40bF9kaVwiPjxv\nbWdkaTp3YXlwb2ludCB4PVwiNTY3XCIgeHNpOnR5cGU9XCJvbWdkYzpQb2ludFwiIHk9XCIyMDZc\nIi8+PG9tZ2RpOndheXBvaW50IHg9XCI2NTFcIiB4c2k6dHlwZT1cIm9tZ2RjOlBvaW50XCIgeT1c\nIjIwNlwiLz48YnBtbmRpOkJQTU5MYWJlbD48b21nZGM6Qm91bmRzIGhlaWdodD1cIjEzXCIgd2lk\ndGg9XCIwXCIgeD1cIjYwOVwiIHk9XCIxODRcIi8+PC9icG1uZGk6QlBNTkxhYmVsPjwvYnBtbmRp\nOkJQTU5FZGdlPjxicG1uZGk6QlBNTkVkZ2UgYnBtbkVsZW1lbnQ9XCJTZXF1ZW5jZUZsb3dfMGp2\nM2wxNlwiIGlkPVwiU2VxdWVuY2VGbG93XzBqdjNsMTZfZGlcIj48b21nZGk6d2F5cG9pbnQgeD1c\nIjc1MVwiIHhzaTp0eXBlPVwib21nZGM6UG9pbnRcIiB5PVwiMjA2XCIvPjxvbWdkaTp3YXlwb2lu\ndCB4PVwiODQyXCIgeHNpOnR5cGU9XCJvbWdkYzpQb2ludFwiIHk9XCIyMDZcIi8+PGJwbW5kaTpC\nUE1OTGFiZWw+PG9tZ2RjOkJvdW5kcyBoZWlnaHQ9XCIxM1wiIHdpZHRoPVwiMFwiIHg9XCI3OTYu\nNVwiIHk9XCIxODRcIi8+PC9icG1uZGk6QlBNTkxhYmVsPjwvYnBtbmRpOkJQTU5FZGdlPjxicG1u\nZGk6QlBNTkVkZ2UgYnBtbkVsZW1lbnQ9XCJTZXF1ZW5jZUZsb3dfMHp0YzU0alwiIGlkPVwiU2Vx\ndWVuY2VGbG93XzB6dGM1NGpfZGlcIj48b21nZGk6d2F5cG9pbnQgeD1cIjE5OFwiIHhzaTp0eXBl\nPVwib21nZGM6UG9pbnRcIiB5PVwiMjA2XCIvPjxvbWdkaTp3YXlwb2ludCB4PVwiMjc5XCIgeHNp\nOnR5cGU9XCJvbWdkYzpQb2ludFwiIHk9XCIyMDZcIi8+PGJwbW5kaTpCUE1OTGFiZWw+PG9tZ2Rj\nOkJvdW5kcyBoZWlnaHQ9XCIxM1wiIHdpZHRoPVwiMFwiIHg9XCIyMzguNVwiIHk9XCIxODRcIi8+\nPC9icG1uZGk6QlBNTkxhYmVsPjwvYnBtbmRpOkJQTU5FZGdlPjwvYnBtbmRpOkJQTU5QbGFuZT48\nL2JwbW5kaTpCUE1ORGlhZ3JhbT48L2RlZmluaXRpb25zPiIsICJ3b3JrZmxvd19pZCI6ICJleGFt\ncGxlX25ldHdpdG5lc3NfZ2V0X21ldGFfdmFsdWVzIiwgInZlcnNpb24iOiAxNX0sICJsYXN0X21v\nZGlmaWVkX3RpbWUiOiAxNTU0MTM4MDk5NzA5LCAiY3JlYXRvcl9pZCI6ICJhZG1pbkBjbzNzeXMu\nY29tIiwgImFjdGlvbnMiOiBbXSwgInByb2dyYW1tYXRpY19uYW1lIjogImV4YW1wbGVfbmV0d2l0\nbmVzc19nZXRfbWV0YV92YWx1ZXMiLCAibmFtZSI6ICIoRXhhbXBsZSkgTmV0V2l0bmVzcyBHZXQg\nTWV0YSBWYWx1ZXMifV0sICJhY3Rpb25zIjogW3sibG9naWNfdHlwZSI6ICJhbGwiLCAibmFtZSI6\nICIoRXhhbXBsZSkgTmV0V2l0bmVzcyBHZXQgTWV0YSBWYWx1ZXMiLCAidmlld19pdGVtcyI6IFt7\nInNob3dfaWYiOiBudWxsLCAiZmllbGRfdHlwZSI6ICJhY3Rpb25pbnZvY2F0aW9uIiwgInNob3df\nbGlua19oZWFkZXIiOiBmYWxzZSwgImVsZW1lbnQiOiAiZmllbGRfdXVpZCIsICJjb250ZW50Ijog\nImI2YmE5YWFiLWE3YmUtNDFiNS05ZjA5LTU1MzU4MzcyZWU5OCIsICJzdGVwX2xhYmVsIjogbnVs\nbH1dLCAidHlwZSI6IDEsICJ3b3JrZmxvd3MiOiBbImV4YW1wbGVfbmV0d2l0bmVzc19nZXRfbWV0\nYV92YWx1ZXMiXSwgIm9iamVjdF90eXBlIjogImluY2lkZW50IiwgInRpbWVvdXRfc2Vjb25kcyI6\nIDg2NDAwLCAidXVpZCI6ICI1OWIzODUzZS0yNTQ0LTQwZWEtYWFhMS1lMGZkZDhiNmJhNDYiLCAi\nYXV0b21hdGlvbnMiOiBbXSwgImV4cG9ydF9rZXkiOiAiKEV4YW1wbGUpIE5ldFdpdG5lc3MgR2V0\nIE1ldGEgVmFsdWVzIiwgImNvbmRpdGlvbnMiOiBbXSwgImlkIjogMTYzLCAibWVzc2FnZV9kZXN0\naW5hdGlvbnMiOiBbXX0sIHsibG9naWNfdHlwZSI6ICJhbGwiLCAibmFtZSI6ICIoRXhhbXBsZSkg\nTmV0V2l0bmVzcyBSZXRyaWV2ZSBMb2cgRmlsZSIsICJ2aWV3X2l0ZW1zIjogW3sic2hvd19pZiI6\nIG51bGwsICJmaWVsZF90eXBlIjogImFjdGlvbmludm9jYXRpb24iLCAic2hvd19saW5rX2hlYWRl\nciI6IGZhbHNlLCAiZWxlbWVudCI6ICJmaWVsZF91dWlkIiwgImNvbnRlbnQiOiAiZTY4ZTEyMmQt\nYzI4OS00NzA2LTlkZmUtZjUwODBiYzllZGQxIiwgInN0ZXBfbGFiZWwiOiBudWxsfSwgeyJzaG93\nX2lmIjogbnVsbCwgImZpZWxkX3R5cGUiOiAiYWN0aW9uaW52b2NhdGlvbiIsICJzaG93X2xpbmtf\naGVhZGVyIjogZmFsc2UsICJlbGVtZW50IjogImZpZWxkX3V1aWQiLCAiY29udGVudCI6ICIyZGEx\nMDk3Yy1hMDhhLTQ4MDgtODYxOS04NjJjNWE1NGJlMTIiLCAic3RlcF9sYWJlbCI6IG51bGx9XSwg\nInR5cGUiOiAxLCAid29ya2Zsb3dzIjogWyJleGFtcGxlX25ldHdpdG5lc3NfcmV0cmlldmVfbG9n\nX2ZpbGUiXSwgIm9iamVjdF90eXBlIjogImluY2lkZW50IiwgInRpbWVvdXRfc2Vjb25kcyI6IDg2\nNDAwLCAidXVpZCI6ICJhOGQ2YTJiMi01OTYzLTQ1NTctYTQ2Yy1mYjE0MmRiZWRiMjciLCAiYXV0\nb21hdGlvbnMiOiBbXSwgImV4cG9ydF9rZXkiOiAiKEV4YW1wbGUpIE5ldFdpdG5lc3MgUmV0cmll\ndmUgTG9nIEZpbGUiLCAiY29uZGl0aW9ucyI6IFtdLCAiaWQiOiAxNjUsICJtZXNzYWdlX2Rlc3Rp\nbmF0aW9ucyI6IFtdfSwgeyJsb2dpY190eXBlIjogImFsbCIsICJuYW1lIjogIihFeGFtcGxlKSBO\nZXRXaXRuZXNzIFJldHJpZXZlIFBDQVAgRmlsZSIsICJ2aWV3X2l0ZW1zIjogW3sic2hvd19pZiI6\nIG51bGwsICJmaWVsZF90eXBlIjogImFjdGlvbmludm9jYXRpb24iLCAic2hvd19saW5rX2hlYWRl\nciI6IGZhbHNlLCAiZWxlbWVudCI6ICJmaWVsZF91dWlkIiwgImNvbnRlbnQiOiAiYjZiYTlhYWIt\nYTdiZS00MWI1LTlmMDktNTUzNTgzNzJlZTk4IiwgInN0ZXBfbGFiZWwiOiBudWxsfV0sICJ0eXBl\nIjogMSwgIndvcmtmbG93cyI6IFsiZXhhbXBsZV9uZXR3aXRuZXNzX3JldHJpZXZlX3BjYXBfZmls\nZSJdLCAib2JqZWN0X3R5cGUiOiAiaW5jaWRlbnQiLCAidGltZW91dF9zZWNvbmRzIjogODY0MDAs\nICJ1dWlkIjogImQ2ODczMTUwLTNiM2ItNDZjNC05M2U4LWZkMTI3NDQ2Yzk3MCIsICJhdXRvbWF0\naW9ucyI6IFtdLCAiZXhwb3J0X2tleSI6ICIoRXhhbXBsZSkgTmV0V2l0bmVzcyBSZXRyaWV2ZSBQ\nQ0FQIEZpbGUiLCAiY29uZGl0aW9ucyI6IFtdLCAiaWQiOiAxNDksICJtZXNzYWdlX2Rlc3RpbmF0\naW9ucyI6IFtdfSwgeyJsb2dpY190eXBlIjogImFsbCIsICJuYW1lIjogIihFeGFtcGxlKSBOZXRX\naXRuZXNzIFJldHJpZXZlIFBDQVAgRmlsZSAoVGltZSkiLCAidmlld19pdGVtcyI6IFt7InNob3df\naWYiOiBudWxsLCAiZmllbGRfdHlwZSI6ICJhY3Rpb25pbnZvY2F0aW9uIiwgInNob3dfbGlua19o\nZWFkZXIiOiBmYWxzZSwgImVsZW1lbnQiOiAiZmllbGRfdXVpZCIsICJjb250ZW50IjogImU2OGUx\nMjJkLWMyODktNDcwNi05ZGZlLWY1MDgwYmM5ZWRkMSIsICJzdGVwX2xhYmVsIjogbnVsbH0sIHsi\nc2hvd19pZiI6IG51bGwsICJmaWVsZF90eXBlIjogImFjdGlvbmludm9jYXRpb24iLCAic2hvd19s\naW5rX2hlYWRlciI6IGZhbHNlLCAiZWxlbWVudCI6ICJmaWVsZF91dWlkIiwgImNvbnRlbnQiOiAi\nMmRhMTA5N2MtYTA4YS00ODA4LTg2MTktODYyYzVhNTRiZTEyIiwgInN0ZXBfbGFiZWwiOiBudWxs\nfV0sICJ0eXBlIjogMSwgIndvcmtmbG93cyI6IFsiZXhhbXBsZV9uZXR3aXRuZXNzX3JldHJpZXZl\nX3BjYXBfZmlsZV90aW1lIl0sICJvYmplY3RfdHlwZSI6ICJpbmNpZGVudCIsICJ0aW1lb3V0X3Nl\nY29uZHMiOiA4NjQwMCwgInV1aWQiOiAiYjEzN2IxNTEtNzYzMS00MzcxLThhZjctMjc5MjJkNzNl\nNDZjIiwgImF1dG9tYXRpb25zIjogW10sICJleHBvcnRfa2V5IjogIihFeGFtcGxlKSBOZXRXaXRu\nZXNzIFJldHJpZXZlIFBDQVAgRmlsZSAoVGltZSkiLCAiY29uZGl0aW9ucyI6IFtdLCAiaWQiOiAx\nNjQsICJtZXNzYWdlX2Rlc3RpbmF0aW9ucyI6IFtdfV0sICJsYXlvdXRzIjogW10sICJleHBvcnRf\nZm9ybWF0X3ZlcnNpb24iOiAyLCAiaWQiOiA2MiwgImluZHVzdHJpZXMiOiBudWxsLCAicGhhc2Vz\nIjogW10sICJhY3Rpb25fb3JkZXIiOiBbXSwgImdlb3MiOiBudWxsLCAibG9jYWxlIjogbnVsbCwg\nInNlcnZlcl92ZXJzaW9uIjogeyJtYWpvciI6IDMxLCAidmVyc2lvbiI6ICIzMS4wLjQyNTQiLCAi\nYnVpbGRfbnVtYmVyIjogNDI1NCwgIm1pbm9yIjogMH0sICJ0aW1lZnJhbWVzIjogbnVsbCwgIndv\ncmtzcGFjZXMiOiBbXSwgImF1dG9tYXRpY190YXNrcyI6IFtdLCAiZnVuY3Rpb25zIjogW3siZGlz\ncGxheV9uYW1lIjogIk5ldFdpdG5lc3MgR2V0IE1ldGEgSUQgcmFuZ2VzIiwgImRlc2NyaXB0aW9u\nIjogeyJjb250ZW50IjogIlJldHVybnMgdGhlIG1ldGEgSUQgcmFuZ2VzIGdpdmVuIHRoZSBzdGFy\ndCBhbmQgZW5kIHNlc3Npb24gSURzLiIsICJmb3JtYXQiOiAidGV4dCJ9LCAiY3JlYXRvciI6IHsi\nZGlzcGxheV9uYW1lIjogIlJlc2lsaWVudCBTeXNhZG1pbiIsICJ0eXBlIjogInVzZXIiLCAiaWQi\nOiAxLCAibmFtZSI6ICJhZG1pbkBjbzNzeXMuY29tIn0sICJ2aWV3X2l0ZW1zIjogW3sic2hvd19p\nZiI6IG51bGwsICJmaWVsZF90eXBlIjogIl9fZnVuY3Rpb24iLCAic2hvd19saW5rX2hlYWRlciI6\nIGZhbHNlLCAiZWxlbWVudCI6ICJmaWVsZF91dWlkIiwgImNvbnRlbnQiOiAiMjMwYTcwYTMtNDg4\nNi00YmE5LWEwNGItZDRjNTBjODFkODM0IiwgInN0ZXBfbGFiZWwiOiBudWxsfSwgeyJzaG93X2lm\nIjogbnVsbCwgImZpZWxkX3R5cGUiOiAiX19mdW5jdGlvbiIsICJzaG93X2xpbmtfaGVhZGVyIjog\nZmFsc2UsICJlbGVtZW50IjogImZpZWxkX3V1aWQiLCAiY29udGVudCI6ICIyY2RjMTkxZS02YWIz\nLTQzNGEtOWI3OS0zNWM5NWY0MmRjM2IiLCAic3RlcF9sYWJlbCI6IG51bGx9LCB7InNob3dfaWYi\nOiBudWxsLCAiZmllbGRfdHlwZSI6ICJfX2Z1bmN0aW9uIiwgInNob3dfbGlua19oZWFkZXIiOiBm\nYWxzZSwgImVsZW1lbnQiOiAiZmllbGRfdXVpZCIsICJjb250ZW50IjogImJkNGRjODk4LTJiN2Ut\nNDliMi04ODUxLWZkMTAzNTk5NTFlYSIsICJzdGVwX2xhYmVsIjogbnVsbH1dLCAiZXhwb3J0X2tl\neSI6ICJuZXR3aXRuZXNzX2dldF9tZXRhX2lkX3JhbmdlcyIsICJ1dWlkIjogIjk1MTdmNmE5LWI4\nNzMtNDAxNi1hNWYzLWE3YWJhNTMzMjk3NCIsICJsYXN0X21vZGlmaWVkX2J5IjogeyJkaXNwbGF5\nX25hbWUiOiAiUmVzaWxpZW50IFN5c2FkbWluIiwgInR5cGUiOiAidXNlciIsICJpZCI6IDEsICJu\nYW1lIjogImFkbWluQGNvM3N5cy5jb20ifSwgInZlcnNpb24iOiA0LCAid29ya2Zsb3dzIjogW3si\nZGVzY3JpcHRpb24iOiBudWxsLCAib2JqZWN0X3R5cGUiOiAiaW5jaWRlbnQiLCAiYWN0aW9ucyI6\nIFtdLCAibmFtZSI6ICIoRXhhbXBsZSkgTmV0V2l0bmVzcyBHZXQgTWV0YSBWYWx1ZXMiLCAid29y\na2Zsb3dfaWQiOiAxMzcsICJwcm9ncmFtbWF0aWNfbmFtZSI6ICJleGFtcGxlX25ldHdpdG5lc3Nf\nZ2V0X21ldGFfdmFsdWVzIiwgInV1aWQiOiBudWxsfV0sICJsYXN0X21vZGlmaWVkX3RpbWUiOiAx\nNTU0MTM4MjUxMzYxLCAiZGVzdGluYXRpb25faGFuZGxlIjogInJzYV9uZXR3aXRuZXNzX21lc3Nh\nZ2VfZGVzdGluYXRpb24iLCAiaWQiOiA4NiwgIm5hbWUiOiAibmV0d2l0bmVzc19nZXRfbWV0YV9p\nZF9yYW5nZXMifSwgeyJkaXNwbGF5X25hbWUiOiAiTmV0V2l0bmVzcyBHZXQgTWV0YSBWYWx1ZXMi\nLCAiZGVzY3JpcHRpb24iOiB7ImNvbnRlbnQiOiAiUmV0dXJucyB0aGUgbWV0YSB2YWx1ZXMgZ2l2\nZW4gdGhlIHN0YXJ0IGFuZCBlbmQgbWV0YSBJRHMuIiwgImZvcm1hdCI6ICJ0ZXh0In0sICJjcmVh\ndG9yIjogeyJkaXNwbGF5X25hbWUiOiAiUmVzaWxpZW50IFN5c2FkbWluIiwgInR5cGUiOiAidXNl\nciIsICJpZCI6IDEsICJuYW1lIjogImFkbWluQGNvM3N5cy5jb20ifSwgInZpZXdfaXRlbXMiOiBb\neyJzaG93X2lmIjogbnVsbCwgImZpZWxkX3R5cGUiOiAiX19mdW5jdGlvbiIsICJzaG93X2xpbmtf\naGVhZGVyIjogZmFsc2UsICJlbGVtZW50IjogImZpZWxkX3V1aWQiLCAiY29udGVudCI6ICI0Zjg4\nYzZmYS04YTdlLTRhZGMtYWM2Mi03OWU0OGMwNDhmYzUiLCAic3RlcF9sYWJlbCI6IG51bGx9LCB7\nInNob3dfaWYiOiBudWxsLCAiZmllbGRfdHlwZSI6ICJfX2Z1bmN0aW9uIiwgInNob3dfbGlua19o\nZWFkZXIiOiBmYWxzZSwgImVsZW1lbnQiOiAiZmllbGRfdXVpZCIsICJjb250ZW50IjogIjFjMjUw\nNWY5LTQ3OTQtNDM1YS1hMWYzLTVlNjA4Yjc2OWNiOSIsICJzdGVwX2xhYmVsIjogbnVsbH0sIHsi\nc2hvd19pZiI6IG51bGwsICJmaWVsZF90eXBlIjogIl9fZnVuY3Rpb24iLCAic2hvd19saW5rX2hl\nYWRlciI6IGZhbHNlLCAiZWxlbWVudCI6ICJmaWVsZF91dWlkIiwgImNvbnRlbnQiOiAiYmQ0ZGM4\nOTgtMmI3ZS00OWIyLTg4NTEtZmQxMDM1OTk1MWVhIiwgInN0ZXBfbGFiZWwiOiBudWxsfV0sICJl\neHBvcnRfa2V5IjogIm5ldHdpdG5lc3NfZ2V0X21ldGFfdmFsdWVzIiwgInV1aWQiOiAiMTUzNWQ1\nMTgtMjIyYi00NDdiLTk5YjQtMGNhZjAyMDBkNmViIiwgImxhc3RfbW9kaWZpZWRfYnkiOiB7ImRp\nc3BsYXlfbmFtZSI6ICJSZXNpbGllbnQgU3lzYWRtaW4iLCAidHlwZSI6ICJ1c2VyIiwgImlkIjog\nMSwgIm5hbWUiOiAiYWRtaW5AY28zc3lzLmNvbSJ9LCAidmVyc2lvbiI6IDMsICJ3b3JrZmxvd3Mi\nOiBbeyJkZXNjcmlwdGlvbiI6IG51bGwsICJvYmplY3RfdHlwZSI6ICJpbmNpZGVudCIsICJhY3Rp\nb25zIjogW10sICJuYW1lIjogIihFeGFtcGxlKSBOZXRXaXRuZXNzIEdldCBNZXRhIFZhbHVlcyIs\nICJ3b3JrZmxvd19pZCI6IDEzNywgInByb2dyYW1tYXRpY19uYW1lIjogImV4YW1wbGVfbmV0d2l0\nbmVzc19nZXRfbWV0YV92YWx1ZXMiLCAidXVpZCI6IG51bGx9XSwgImxhc3RfbW9kaWZpZWRfdGlt\nZSI6IDE1NTQxMzgyNzYwNDEsICJkZXN0aW5hdGlvbl9oYW5kbGUiOiAicnNhX25ldHdpdG5lc3Nf\nbWVzc2FnZV9kZXN0aW5hdGlvbiIsICJpZCI6IDg3LCAibmFtZSI6ICJuZXR3aXRuZXNzX2dldF9t\nZXRhX3ZhbHVlcyJ9LCB7ImRpc3BsYXlfbmFtZSI6ICJOZXRXaXRuZXNzIFF1ZXJ5IiwgImRlc2Ny\naXB0aW9uIjogeyJjb250ZW50IjogIlF1ZXJpZXMgTmV0V2l0bmVzcyBhbmQgcmV0dXJucyBtZXRh\nZGF0YSByZWxhdGVkIHRvIHRoZSBxdWVyeS4iLCAiZm9ybWF0IjogInRleHQifSwgImNyZWF0b3Ii\nOiB7ImRpc3BsYXlfbmFtZSI6ICJSZXNpbGllbnQgU3lzYWRtaW4iLCAidHlwZSI6ICJ1c2VyIiwg\nImlkIjogMSwgIm5hbWUiOiAiYWRtaW5AY28zc3lzLmNvbSJ9LCAidmlld19pdGVtcyI6IFt7InNo\nb3dfaWYiOiBudWxsLCAiZmllbGRfdHlwZSI6ICJfX2Z1bmN0aW9uIiwgInNob3dfbGlua19oZWFk\nZXIiOiBmYWxzZSwgImVsZW1lbnQiOiAiZmllbGRfdXVpZCIsICJjb250ZW50IjogIjBmMzM1MWM4\nLWFjZDEtNGFlNC05ODQwLWI3ZTkzMTkyZDU2NiIsICJzdGVwX2xhYmVsIjogbnVsbH0sIHsic2hv\nd19pZiI6IG51bGwsICJmaWVsZF90eXBlIjogIl9fZnVuY3Rpb24iLCAic2hvd19saW5rX2hlYWRl\nciI6IGZhbHNlLCAiZWxlbWVudCI6ICJmaWVsZF91dWlkIiwgImNvbnRlbnQiOiAiYmQ0ZGM4OTgt\nMmI3ZS00OWIyLTg4NTEtZmQxMDM1OTk1MWVhIiwgInN0ZXBfbGFiZWwiOiBudWxsfV0sICJleHBv\ncnRfa2V5IjogIm5ldHdpdG5lc3NfcXVlcnkiLCAidXVpZCI6ICIwMWRhYTYxYy1kYzIyLTRiMWEt\nOWMwMC0yNDgzNmY2MWU2ODciLCAibGFzdF9tb2RpZmllZF9ieSI6IHsiZGlzcGxheV9uYW1lIjog\nIlJlc2lsaWVudCBTeXNhZG1pbiIsICJ0eXBlIjogInVzZXIiLCAiaWQiOiAxLCAibmFtZSI6ICJh\nZG1pbkBjbzNzeXMuY29tIn0sICJ2ZXJzaW9uIjogMywgIndvcmtmbG93cyI6IFt7ImRlc2NyaXB0\naW9uIjogbnVsbCwgIm9iamVjdF90eXBlIjogImluY2lkZW50IiwgImFjdGlvbnMiOiBbXSwgIm5h\nbWUiOiAiKEV4YW1wbGUpIE5ldFdpdG5lc3MgR2V0IE1ldGEgVmFsdWVzIiwgIndvcmtmbG93X2lk\nIjogMTM3LCAicHJvZ3JhbW1hdGljX25hbWUiOiAiZXhhbXBsZV9uZXR3aXRuZXNzX2dldF9tZXRh\nX3ZhbHVlcyIsICJ1dWlkIjogbnVsbH0sIHsiZGVzY3JpcHRpb24iOiBudWxsLCAib2JqZWN0X3R5\ncGUiOiAiaW5jaWRlbnQiLCAiYWN0aW9ucyI6IFtdLCAibmFtZSI6ICIoRXhhbXBsZSkgTmV0V2l0\nbmVzcyBSZXRyaWV2ZSBQQ0FQIEZpbGUiLCAid29ya2Zsb3dfaWQiOiAxMzUsICJwcm9ncmFtbWF0\naWNfbmFtZSI6ICJleGFtcGxlX25ldHdpdG5lc3NfcmV0cmlldmVfcGNhcF9maWxlIiwgInV1aWQi\nOiBudWxsfSwgeyJkZXNjcmlwdGlvbiI6IG51bGwsICJvYmplY3RfdHlwZSI6ICJpbmNpZGVudCIs\nICJhY3Rpb25zIjogW10sICJuYW1lIjogIk5ldFdpdG5lc3MgUXVlcnkiLCAid29ya2Zsb3dfaWQi\nOiAxMjIsICJwcm9ncmFtbWF0aWNfbmFtZSI6ICJuZXR3aXRuZXNzX3F1ZXJ5IiwgInV1aWQiOiBu\ndWxsfV0sICJsYXN0X21vZGlmaWVkX3RpbWUiOiAxNTUwMjA0NTA1NjQwLCAiZGVzdGluYXRpb25f\naGFuZGxlIjogInJzYV9uZXR3aXRuZXNzX21lc3NhZ2VfZGVzdGluYXRpb24iLCAiaWQiOiA3Nywg\nIm5hbWUiOiAibmV0d2l0bmVzc19xdWVyeSJ9LCB7ImRpc3BsYXlfbmFtZSI6ICJOZXRXaXRuZXNz\nIFJldHJpZXZlIExvZyBEYXRhIiwgImRlc2NyaXB0aW9uIjogeyJjb250ZW50IjogIlJldHVybnMg\nbG9nIGZpbGUgZnJvbSBOZXRXaXRuZXNzIGluIHRoZSBzcGVjaWZpZWQgZm9ybWF0IGJhc2VkIG9u\nIHRoZSBnaXZlbiB0aW1lIGZyYW1lLiIsICJmb3JtYXQiOiAidGV4dCJ9LCAiY3JlYXRvciI6IHsi\nZGlzcGxheV9uYW1lIjogIlJlc2lsaWVudCBTeXNhZG1pbiIsICJ0eXBlIjogInVzZXIiLCAiaWQi\nOiAxLCAibmFtZSI6ICJhZG1pbkBjbzNzeXMuY29tIn0sICJ2aWV3X2l0ZW1zIjogW3sic2hvd19p\nZiI6IG51bGwsICJmaWVsZF90eXBlIjogIl9fZnVuY3Rpb24iLCAic2hvd19saW5rX2hlYWRlciI6\nIGZhbHNlLCAiZWxlbWVudCI6ICJmaWVsZF91dWlkIiwgImNvbnRlbnQiOiAiY2JkY2RlMGEtYzA5\nOS00ZTM3LTgzYWMtYzRiZGJiZjNjZGRlIiwgInN0ZXBfbGFiZWwiOiBudWxsfSwgeyJzaG93X2lm\nIjogbnVsbCwgImZpZWxkX3R5cGUiOiAiX19mdW5jdGlvbiIsICJzaG93X2xpbmtfaGVhZGVyIjog\nZmFsc2UsICJlbGVtZW50IjogImZpZWxkX3V1aWQiLCAiY29udGVudCI6ICJhYjNmZTczNy00OWRh\nLTRlMjgtYWU1Mi1iNGVlZjJhYWUwOTUiLCAic3RlcF9sYWJlbCI6IG51bGx9LCB7InNob3dfaWYi\nOiBudWxsLCAiZmllbGRfdHlwZSI6ICJfX2Z1bmN0aW9uIiwgInNob3dfbGlua19oZWFkZXIiOiBm\nYWxzZSwgImVsZW1lbnQiOiAiZmllbGRfdXVpZCIsICJjb250ZW50IjogImUxMTIzNzExLTkxMDMt\nNDhkMS04NGQ3LTM2NzMzZmY3OWRlZCIsICJzdGVwX2xhYmVsIjogbnVsbH1dLCAiZXhwb3J0X2tl\neSI6ICJuZXR3aXRuZXNzX3JldHJpZXZlX2xvZ19kYXRhIiwgInV1aWQiOiAiNDEwYzAwZTEtZDBl\nZS00ZjEzLTllZjYtNTNjOWU5NWRkMTE0IiwgImxhc3RfbW9kaWZpZWRfYnkiOiB7ImRpc3BsYXlf\nbmFtZSI6ICJSZXNpbGllbnQgU3lzYWRtaW4iLCAidHlwZSI6ICJ1c2VyIiwgImlkIjogMSwgIm5h\nbWUiOiAiYWRtaW5AY28zc3lzLmNvbSJ9LCAidmVyc2lvbiI6IDksICJ3b3JrZmxvd3MiOiBbeyJk\nZXNjcmlwdGlvbiI6IG51bGwsICJvYmplY3RfdHlwZSI6ICJpbmNpZGVudCIsICJhY3Rpb25zIjog\nW10sICJuYW1lIjogIihFeGFtcGxlKSBOZXRXaXRuZXNzIFJldHJpZXZlIExvZyBGaWxlIiwgIndv\ncmtmbG93X2lkIjogMTM5LCAicHJvZ3JhbW1hdGljX25hbWUiOiAiZXhhbXBsZV9uZXR3aXRuZXNz\nX3JldHJpZXZlX2xvZ19maWxlIiwgInV1aWQiOiBudWxsfV0sICJsYXN0X21vZGlmaWVkX3RpbWUi\nOiAxNTU0MTQ2NzM3NjczLCAiZGVzdGluYXRpb25faGFuZGxlIjogInJzYV9uZXR3aXRuZXNzX21l\nc3NhZ2VfZGVzdGluYXRpb24iLCAiaWQiOiA4OCwgIm5hbWUiOiAibmV0d2l0bmVzc19yZXRyaWV2\nZV9sb2dfZGF0YSJ9LCB7ImRpc3BsYXlfbmFtZSI6ICJOZXRXaXRuZXNzIFJldHJpZXZlIFBDQVAg\nRGF0YSIsICJkZXNjcmlwdGlvbiI6IHsiY29udGVudCI6ICJSZXR1cm5zIGEgUENBUCBmaWxlIGZy\nb20gTmV0V2l0bmVzcyBiYXNlZCBvbiBzZXNzaW9uIElEcyBvciBhIHRpbWUgZnJhbWUgYW5kIGF0\ndGFjaGVzIHRvIGFuIGluY2lkZW50LiIsICJmb3JtYXQiOiAidGV4dCJ9LCAiY3JlYXRvciI6IHsi\nZGlzcGxheV9uYW1lIjogIlJlc2lsaWVudCBTeXNhZG1pbiIsICJ0eXBlIjogInVzZXIiLCAiaWQi\nOiAxLCAibmFtZSI6ICJhZG1pbkBjbzNzeXMuY29tIn0sICJ2aWV3X2l0ZW1zIjogW3sic2hvd19p\nZiI6IG51bGwsICJmaWVsZF90eXBlIjogIl9fZnVuY3Rpb24iLCAic2hvd19saW5rX2hlYWRlciI6\nIGZhbHNlLCAiZWxlbWVudCI6ICJmaWVsZF91dWlkIiwgImNvbnRlbnQiOiAiZTVkZDBhYjEtOGU2\nMi00YTAzLTk2ZDItYmIyMDcwMzM4MDBiIiwgInN0ZXBfbGFiZWwiOiBudWxsfSwgeyJzaG93X2lm\nIjogbnVsbCwgImZpZWxkX3R5cGUiOiAiX19mdW5jdGlvbiIsICJzaG93X2xpbmtfaGVhZGVyIjog\nZmFsc2UsICJlbGVtZW50IjogImZpZWxkX3V1aWQiLCAiY29udGVudCI6ICI4MTFlOTlkNy1kMTk0\nLTRjZTgtODZjYy1hZmY1ZTAxYWI4NWMiLCAic3RlcF9sYWJlbCI6IG51bGx9LCB7InNob3dfaWYi\nOiBudWxsLCAiZmllbGRfdHlwZSI6ICJfX2Z1bmN0aW9uIiwgInNob3dfbGlua19oZWFkZXIiOiBm\nYWxzZSwgImVsZW1lbnQiOiAiZmllbGRfdXVpZCIsICJjb250ZW50IjogImNiZGNkZTBhLWMwOTkt\nNGUzNy04M2FjLWM0YmRiYmYzY2RkZSIsICJzdGVwX2xhYmVsIjogbnVsbH0sIHsic2hvd19pZiI6\nIG51bGwsICJmaWVsZF90eXBlIjogIl9fZnVuY3Rpb24iLCAic2hvd19saW5rX2hlYWRlciI6IGZh\nbHNlLCAiZWxlbWVudCI6ICJmaWVsZF91dWlkIiwgImNvbnRlbnQiOiAiYWIzZmU3MzctNDlkYS00\nZTI4LWFlNTItYjRlZWYyYWFlMDk1IiwgInN0ZXBfbGFiZWwiOiBudWxsfV0sICJleHBvcnRfa2V5\nIjogIm5ldHdpdG5lc3NfcmV0cmlldmVfcGNhcF9kYXRhIiwgInV1aWQiOiAiMWU2NjE5ZjUtY2E4\nOS00YTQzLTg3NDEtY2NjZmM2Yzc4Mjk5IiwgImxhc3RfbW9kaWZpZWRfYnkiOiB7ImRpc3BsYXlf\nbmFtZSI6ICJSZXNpbGllbnQgU3lzYWRtaW4iLCAidHlwZSI6ICJ1c2VyIiwgImlkIjogMSwgIm5h\nbWUiOiAiYWRtaW5AY28zc3lzLmNvbSJ9LCAidmVyc2lvbiI6IDE1LCAid29ya2Zsb3dzIjogW3si\nZGVzY3JpcHRpb24iOiBudWxsLCAib2JqZWN0X3R5cGUiOiAiaW5jaWRlbnQiLCAiYWN0aW9ucyI6\nIFtdLCAibmFtZSI6ICIoRXhhbXBsZSkgTmV0V2l0bmVzcyBSZXRyaWV2ZSBQQ0FQIEZpbGUiLCAi\nd29ya2Zsb3dfaWQiOiAxMzUsICJwcm9ncmFtbWF0aWNfbmFtZSI6ICJleGFtcGxlX25ldHdpdG5l\nc3NfcmV0cmlldmVfcGNhcF9maWxlIiwgInV1aWQiOiBudWxsfSwgeyJkZXNjcmlwdGlvbiI6IG51\nbGwsICJvYmplY3RfdHlwZSI6ICJpbmNpZGVudCIsICJhY3Rpb25zIjogW10sICJuYW1lIjogIihF\neGFtcGxlKSBOZXRXaXRuZXNzIFJldHJpZXZlIFBDQVAgRmlsZSAoVGltZSkiLCAid29ya2Zsb3df\naWQiOiAxMzgsICJwcm9ncmFtbWF0aWNfbmFtZSI6ICJleGFtcGxlX25ldHdpdG5lc3NfcmV0cmll\ndmVfcGNhcF9maWxlX3RpbWUiLCAidXVpZCI6IG51bGx9XSwgImxhc3RfbW9kaWZpZWRfdGltZSI6\nIDE1NTQxNDY3MjYyNjAsICJkZXN0aW5hdGlvbl9oYW5kbGUiOiAicnNhX25ldHdpdG5lc3NfbWVz\nc2FnZV9kZXN0aW5hdGlvbiIsICJpZCI6IDc2LCAibmFtZSI6ICJuZXR3aXRuZXNzX3JldHJpZXZl\nX3BjYXBfZGF0YSJ9XSwgIm5vdGlmaWNhdGlvbnMiOiBudWxsLCAicmVndWxhdG9ycyI6IG51bGws\nICJpbmNpZGVudF90eXBlcyI6IFt7ImNyZWF0ZV9kYXRlIjogMTU1NDE0NzgzMTgwMCwgImRlc2Ny\naXB0aW9uIjogIkN1c3RvbWl6YXRpb24gUGFja2FnZXMgKGludGVybmFsKSIsICJleHBvcnRfa2V5\nIjogIkN1c3RvbWl6YXRpb24gUGFja2FnZXMgKGludGVybmFsKSIsICJpZCI6IDAsICJuYW1lIjog\nIkN1c3RvbWl6YXRpb24gUGFja2FnZXMgKGludGVybmFsKSIsICJ1cGRhdGVfZGF0ZSI6IDE1NTQx\nNDc4MzE4MDAsICJ1dWlkIjogImJmZWVjMmQ0LTM3NzAtMTFlOC1hZDM5LTRhMDAwNDA0NGFhMCIs\nICJlbmFibGVkIjogZmFsc2UsICJzeXN0ZW0iOiBmYWxzZSwgInBhcmVudF9pZCI6IG51bGwsICJo\naWRkZW4iOiBmYWxzZX1dLCAic2NyaXB0cyI6IFtdLCAidHlwZXMiOiBbXSwgIm1lc3NhZ2VfZGVz\ndGluYXRpb25zIjogW3sidXVpZCI6ICJiYzc4NmFmYS1iMzA1LTQ3YWUtOTkwMy0zYjU0MDBiMjY5\nYzYiLCAiZXhwb3J0X2tleSI6ICJyc2FfbmV0d2l0bmVzc19tZXNzYWdlX2Rlc3RpbmF0aW9uIiwg\nIm5hbWUiOiAiUlNBIE5ldFdpdG5lc3MgTWVzc2FnZSBEZXN0aW5hdGlvbiIsICJkZXN0aW5hdGlv\nbl90eXBlIjogMCwgInByb2dyYW1tYXRpY19uYW1lIjogInJzYV9uZXR3aXRuZXNzX21lc3NhZ2Vf\nZGVzdGluYXRpb24iLCAiZXhwZWN0X2FjayI6IHRydWUsICJ1c2VycyI6IFsiYWRtaW5AY28zc3lz\nLmNvbSJdfV0sICJpbmNpZGVudF9hcnRpZmFjdF90eXBlcyI6IFtdLCAicm9sZXMiOiBbXSwgImZp\nZWxkcyI6IFt7Im9wZXJhdGlvbnMiOiBbXSwgInR5cGVfaWQiOiAwLCAib3BlcmF0aW9uX3Blcm1z\nIjoge30sICJ0ZXh0IjogIlNpbXVsYXRpb24iLCAiYmxhbmtfb3B0aW9uIjogZmFsc2UsICJwcmVm\naXgiOiBudWxsLCAiY2hhbmdlYWJsZSI6IHRydWUsICJpZCI6IDM4LCAicmVhZF9vbmx5IjogdHJ1\nZSwgInV1aWQiOiAiYzNmMGUzZWQtMjFlMS00ZDUzLWFmZmItZmU1Y2EzMzA4Y2NhIiwgImNob3Nl\nbiI6IGZhbHNlLCAiaW5wdXRfdHlwZSI6ICJib29sZWFuIiwgInRvb2x0aXAiOiAiV2hldGhlciB0\naGUgaW5jaWRlbnQgaXMgYSBzaW11bGF0aW9uIG9yIGEgcmVndWxhciBpbmNpZGVudC4gIFRoaXMg\nZmllbGQgaXMgcmVhZC1vbmx5LiIsICJpbnRlcm5hbCI6IGZhbHNlLCAicmljaF90ZXh0IjogZmFs\nc2UsICJ0ZW1wbGF0ZXMiOiBbXSwgImV4cG9ydF9rZXkiOiAiaW5jaWRlbnQvaW5jX3RyYWluaW5n\nIiwgImhpZGVfbm90aWZpY2F0aW9uIjogZmFsc2UsICJuYW1lIjogImluY190cmFpbmluZyIsICJk\nZXByZWNhdGVkIjogZmFsc2UsICJkZWZhdWx0X2Nob3Nlbl9ieV9zZXJ2ZXIiOiBmYWxzZSwgInZh\nbHVlcyI6IFtdfSwgeyJvcGVyYXRpb25zIjogW10sICJ0eXBlX2lkIjogNiwgIm9wZXJhdGlvbl9w\nZXJtcyI6IHt9LCAidGV4dCI6ICJOZXRXaXRuZXNzIEVuZCBUaW1lIiwgImJsYW5rX29wdGlvbiI6\nIGZhbHNlLCAicHJlZml4IjogInByb3BlcnRpZXMiLCAiY2hhbmdlYWJsZSI6IHRydWUsICJpZCI6\nIDQxNywgInJlYWRfb25seSI6IGZhbHNlLCAidXVpZCI6ICIyZGExMDk3Yy1hMDhhLTQ4MDgtODYx\nOS04NjJjNWE1NGJlMTIiLCAiY2hvc2VuIjogZmFsc2UsICJpbnB1dF90eXBlIjogImRhdGV0aW1l\ncGlja2VyIiwgInRvb2x0aXAiOiAiIiwgImludGVybmFsIjogZmFsc2UsICJyaWNoX3RleHQiOiBm\nYWxzZSwgInRlbXBsYXRlcyI6IFtdLCAiZXhwb3J0X2tleSI6ICJhY3Rpb25pbnZvY2F0aW9uL25l\ndHdpdG5lc3NfZW5kX3RpbWUiLCAiaGlkZV9ub3RpZmljYXRpb24iOiBmYWxzZSwgInBsYWNlaG9s\nZGVyIjogIiIsICJuYW1lIjogIm5ldHdpdG5lc3NfZW5kX3RpbWUiLCAiZGVwcmVjYXRlZCI6IGZh\nbHNlLCAiZGVmYXVsdF9jaG9zZW5fYnlfc2VydmVyIjogZmFsc2UsICJ2YWx1ZXMiOiBbXX0sIHsi\nb3BlcmF0aW9ucyI6IFtdLCAidHlwZV9pZCI6IDYsICJvcGVyYXRpb25fcGVybXMiOiB7fSwgInRl\neHQiOiAiTmV0V2l0bmVzcyBTdGFydCBUaW1lIiwgImJsYW5rX29wdGlvbiI6IGZhbHNlLCAicHJl\nZml4IjogInByb3BlcnRpZXMiLCAiY2hhbmdlYWJsZSI6IHRydWUsICJpZCI6IDQxNiwgInJlYWRf\nb25seSI6IGZhbHNlLCAidXVpZCI6ICJlNjhlMTIyZC1jMjg5LTQ3MDYtOWRmZS1mNTA4MGJjOWVk\nZDEiLCAiY2hvc2VuIjogZmFsc2UsICJpbnB1dF90eXBlIjogImRhdGV0aW1lcGlja2VyIiwgInRv\nb2x0aXAiOiAiIiwgImludGVybmFsIjogZmFsc2UsICJyaWNoX3RleHQiOiBmYWxzZSwgInRlbXBs\nYXRlcyI6IFtdLCAiZXhwb3J0X2tleSI6ICJhY3Rpb25pbnZvY2F0aW9uL25ldHdpdG5lc3Nfc3Rh\ncnRfdGltZSIsICJoaWRlX25vdGlmaWNhdGlvbiI6IGZhbHNlLCAicGxhY2Vob2xkZXIiOiAiIiwg\nIm5hbWUiOiAibmV0d2l0bmVzc19zdGFydF90aW1lIiwgImRlcHJlY2F0ZWQiOiBmYWxzZSwgImRl\nZmF1bHRfY2hvc2VuX2J5X3NlcnZlciI6IGZhbHNlLCAidmFsdWVzIjogW119LCB7Im9wZXJhdGlv\nbnMiOiBbXSwgInR5cGVfaWQiOiA2LCAib3BlcmF0aW9uX3Blcm1zIjoge30sICJ0ZXh0IjogIk5l\ndFdpdG5lc3MgUXVlcnkiLCAiYmxhbmtfb3B0aW9uIjogZmFsc2UsICJwcmVmaXgiOiAicHJvcGVy\ndGllcyIsICJjaGFuZ2VhYmxlIjogdHJ1ZSwgImlkIjogMzkxLCAicmVhZF9vbmx5IjogZmFsc2Us\nICJ1dWlkIjogImI2YmE5YWFiLWE3YmUtNDFiNS05ZjA5LTU1MzU4MzcyZWU5OCIsICJjaG9zZW4i\nOiBmYWxzZSwgImlucHV0X3R5cGUiOiAidGV4dCIsICJ0b29sdGlwIjogIiIsICJpbnRlcm5hbCI6\nIGZhbHNlLCAicmljaF90ZXh0IjogZmFsc2UsICJ0ZW1wbGF0ZXMiOiBbXSwgImV4cG9ydF9rZXki\nOiAiYWN0aW9uaW52b2NhdGlvbi9uZXR3aXRuZXNzX3F1ZXJ5IiwgImhpZGVfbm90aWZpY2F0aW9u\nIjogZmFsc2UsICJwbGFjZWhvbGRlciI6ICJzZWxlY3Qgc2Vzc2lvbmlkIHdoZXJlIHRpbWU9JzIw\nMTgtRGVjLTA2IDA4OjAwOjAwJy0nMjAxOC1EZWMtMDYgMDk6MDA6MDAnJiZpcC5zcmM9MTAuMTAu\nMTAuMTIzJiZhbGlhcy5ob3N0PWV4YW1wbGUudGVzdC5jb20iLCAibmFtZSI6ICJuZXR3aXRuZXNz\nX3F1ZXJ5IiwgImRlcHJlY2F0ZWQiOiBmYWxzZSwgImRlZmF1bHRfY2hvc2VuX2J5X3NlcnZlciI6\nIGZhbHNlLCAidmFsdWVzIjogW119LCB7Im9wZXJhdGlvbnMiOiBbXSwgInR5cGVfaWQiOiAxMSwg\nIm9wZXJhdGlvbl9wZXJtcyI6IHt9LCAidGV4dCI6ICJud19zdGFydF90aW1lIiwgImJsYW5rX29w\ndGlvbiI6IGZhbHNlLCAicHJlZml4IjogbnVsbCwgImNoYW5nZWFibGUiOiB0cnVlLCAiaWQiOiA0\nNTEsICJyZWFkX29ubHkiOiBmYWxzZSwgInV1aWQiOiAiY2JkY2RlMGEtYzA5OS00ZTM3LTgzYWMt\nYzRiZGJiZjNjZGRlIiwgImNob3NlbiI6IGZhbHNlLCAiaW5wdXRfdHlwZSI6ICJkYXRldGltZXBp\nY2tlciIsICJ0b29sdGlwIjogIlN0YXJ0IHRpbWUgZm9yIFBDQVAgb3IgbG9nIGZpbGUuIiwgImlu\ndGVybmFsIjogZmFsc2UsICJyaWNoX3RleHQiOiBmYWxzZSwgInRlbXBsYXRlcyI6IFtdLCAiZXhw\nb3J0X2tleSI6ICJfX2Z1bmN0aW9uL253X3N0YXJ0X3RpbWUiLCAiaGlkZV9ub3RpZmljYXRpb24i\nOiBmYWxzZSwgInBsYWNlaG9sZGVyIjogIiIsICJuYW1lIjogIm53X3N0YXJ0X3RpbWUiLCAiZGVw\ncmVjYXRlZCI6IGZhbHNlLCAiZGVmYXVsdF9jaG9zZW5fYnlfc2VydmVyIjogZmFsc2UsICJ2YWx1\nZXMiOiBbXX0sIHsib3BlcmF0aW9ucyI6IFtdLCAidHlwZV9pZCI6IDExLCAib3BlcmF0aW9uX3Bl\ncm1zIjoge30sICJ0ZXh0IjogIm53X2RhdGFfZm9ybWF0IiwgImJsYW5rX29wdGlvbiI6IGZhbHNl\nLCAicHJlZml4IjogbnVsbCwgImNoYW5nZWFibGUiOiB0cnVlLCAiaWQiOiAzOTIsICJyZWFkX29u\nbHkiOiBmYWxzZSwgInV1aWQiOiAiZTExMjM3MTEtOTEwMy00OGQxLTg0ZDctMzY3MzNmZjc5ZGVk\nIiwgImNob3NlbiI6IGZhbHNlLCAiaW5wdXRfdHlwZSI6ICJzZWxlY3QiLCAidG9vbHRpcCI6ICJG\nb3JtYXQgYW5kIGRhdGEgdG8gcmV0dXJuLiIsICJpbnRlcm5hbCI6IGZhbHNlLCAicmljaF90ZXh0\nIjogZmFsc2UsICJ0ZW1wbGF0ZXMiOiBbXSwgImV4cG9ydF9rZXkiOiAiX19mdW5jdGlvbi9ud19k\nYXRhX2Zvcm1hdCIsICJoaWRlX25vdGlmaWNhdGlvbiI6IGZhbHNlLCAicGxhY2Vob2xkZXIiOiAi\nIiwgIm5hbWUiOiAibndfZGF0YV9mb3JtYXQiLCAiZGVwcmVjYXRlZCI6IGZhbHNlLCAiZGVmYXVs\ndF9jaG9zZW5fYnlfc2VydmVyIjogZmFsc2UsICJ2YWx1ZXMiOiBbeyJ1dWlkIjogIjY1OTVkNTFj\nLTEyOGUtNDU5Ny04M2M4LWJjZGU3ZTE3ZWQ4ZiIsICJkZWZhdWx0IjogZmFsc2UsICJlbmFibGVk\nIjogdHJ1ZSwgInZhbHVlIjogNjUwLCAibGFiZWwiOiAibG9nc190ZXh0IiwgImhpZGRlbiI6IGZh\nbHNlLCAicHJvcGVydGllcyI6IG51bGx9LCB7InV1aWQiOiAiNGE1MmIwNzMtYTgxYy00MjlmLTg1\nODYtYTU4ZDIyZTkwZWNjIiwgImRlZmF1bHQiOiBmYWxzZSwgImVuYWJsZWQiOiB0cnVlLCAidmFs\ndWUiOiA1NTIsICJsYWJlbCI6ICJsb2dzX2NzdiIsICJoaWRkZW4iOiBmYWxzZSwgInByb3BlcnRp\nZXMiOiBudWxsfSwgeyJ1dWlkIjogImE1ZjA0ZmUwLTJiZjctNGYyYy04Yzg4LTBlMDE3NTFhNmE1\nMSIsICJkZWZhdWx0IjogZmFsc2UsICJlbmFibGVkIjogdHJ1ZSwgInZhbHVlIjogNjUxLCAibGFi\nZWwiOiAibG9nc194bWwiLCAiaGlkZGVuIjogZmFsc2UsICJwcm9wZXJ0aWVzIjogbnVsbH0sIHsi\ndXVpZCI6ICI2YjA1NzAzZi05NTA4LTQyM2YtOGM5OC00OGE3MDc3NTY2YTQiLCAiZGVmYXVsdCI6\nIGZhbHNlLCAiZW5hYmxlZCI6IHRydWUsICJ2YWx1ZSI6IDU1MSwgImxhYmVsIjogImxvZ3NfanNv\nbiIsICJoaWRkZW4iOiBmYWxzZSwgInByb3BlcnRpZXMiOiBudWxsfV19LCB7Im9wZXJhdGlvbnMi\nOiBbXSwgInR5cGVfaWQiOiAxMSwgIm9wZXJhdGlvbl9wZXJtcyI6IHt9LCAidGV4dCI6ICJud19l\nbmRfdGltZSIsICJibGFua19vcHRpb24iOiBmYWxzZSwgInByZWZpeCI6IG51bGwsICJjaGFuZ2Vh\nYmxlIjogdHJ1ZSwgImlkIjogNDUyLCAicmVhZF9vbmx5IjogZmFsc2UsICJ1dWlkIjogImFiM2Zl\nNzM3LTQ5ZGEtNGUyOC1hZTUyLWI0ZWVmMmFhZTA5NSIsICJjaG9zZW4iOiBmYWxzZSwgImlucHV0\nX3R5cGUiOiAiZGF0ZXRpbWVwaWNrZXIiLCAidG9vbHRpcCI6ICJFbmQgdGltZSBmb3IgUENBUCBm\nb3IgbG9nIGZpbGUuIiwgImludGVybmFsIjogZmFsc2UsICJyaWNoX3RleHQiOiBmYWxzZSwgInRl\nbXBsYXRlcyI6IFtdLCAiZXhwb3J0X2tleSI6ICJfX2Z1bmN0aW9uL253X2VuZF90aW1lIiwgImhp\nZGVfbm90aWZpY2F0aW9uIjogZmFsc2UsICJwbGFjZWhvbGRlciI6ICIiLCAibmFtZSI6ICJud19l\nbmRfdGltZSIsICJkZXByZWNhdGVkIjogZmFsc2UsICJkZWZhdWx0X2Nob3Nlbl9ieV9zZXJ2ZXIi\nOiBmYWxzZSwgInZhbHVlcyI6IFtdfSwgeyJvcGVyYXRpb25zIjogW10sICJ0eXBlX2lkIjogMTEs\nICJvcGVyYXRpb25fcGVybXMiOiB7fSwgInRleHQiOiAibndfcmVzdWx0c19zaXplIiwgImJsYW5r\nX29wdGlvbiI6IGZhbHNlLCAicHJlZml4IjogbnVsbCwgImNoYW5nZWFibGUiOiB0cnVlLCAiaWQi\nOiAzOTMsICJyZWFkX29ubHkiOiBmYWxzZSwgInV1aWQiOiAiYmQ0ZGM4OTgtMmI3ZS00OWIyLTg4\nNTEtZmQxMDM1OTk1MWVhIiwgImNob3NlbiI6IGZhbHNlLCAiaW5wdXRfdHlwZSI6ICJudW1iZXIi\nLCAidG9vbHRpcCI6ICJOdW1iZXIgb2YgcmVzdWx0cyB0byByZXR1cm4gYmFjayBmcm9tIHRoZSBx\ndWVyeSwgbm8gbGltaXQgaXMgaW1wbGllZCBpZiBub3RoaW5nIGlzIHNldC4iLCAiaW50ZXJuYWwi\nOiBmYWxzZSwgInJpY2hfdGV4dCI6IGZhbHNlLCAidGVtcGxhdGVzIjogW10sICJleHBvcnRfa2V5\nIjogIl9fZnVuY3Rpb24vbndfcmVzdWx0c19zaXplIiwgImhpZGVfbm90aWZpY2F0aW9uIjogZmFs\nc2UsICJwbGFjZWhvbGRlciI6ICIxMDAiLCAibmFtZSI6ICJud19yZXN1bHRzX3NpemUiLCAiZGVw\ncmVjYXRlZCI6IGZhbHNlLCAiZGVmYXVsdF9jaG9zZW5fYnlfc2VydmVyIjogZmFsc2UsICJ2YWx1\nZXMiOiBbXX0sIHsib3BlcmF0aW9ucyI6IFtdLCAidHlwZV9pZCI6IDExLCAib3BlcmF0aW9uX3Bl\ncm1zIjoge30sICJ0ZXh0IjogIm53X2V2ZW50X3Nlc3Npb25faWRzIiwgImJsYW5rX29wdGlvbiI6\nIGZhbHNlLCAicHJlZml4IjogbnVsbCwgImNoYW5nZWFibGUiOiB0cnVlLCAiaWQiOiAzODYsICJy\nZWFkX29ubHkiOiBmYWxzZSwgInV1aWQiOiAiZTVkZDBhYjEtOGU2Mi00YTAzLTk2ZDItYmIyMDcw\nMzM4MDBiIiwgImNob3NlbiI6IGZhbHNlLCAiaW5wdXRfdHlwZSI6ICJ0ZXh0IiwgInRvb2x0aXAi\nOiAiQ29tbWEgc2VwYXJhdGVkIHN0cmluZyBvZiBzZXNzaW9uIElEcy4iLCAiaW50ZXJuYWwiOiBm\nYWxzZSwgInJpY2hfdGV4dCI6IGZhbHNlLCAidGVtcGxhdGVzIjogW10sICJleHBvcnRfa2V5Ijog\nIl9fZnVuY3Rpb24vbndfZXZlbnRfc2Vzc2lvbl9pZHMiLCAiaGlkZV9ub3RpZmljYXRpb24iOiBm\nYWxzZSwgInBsYWNlaG9sZGVyIjogIjk5MTg4NDY5OTIsOTkxNzI0MDIxMiIsICJuYW1lIjogIm53\nX2V2ZW50X3Nlc3Npb25faWRzIiwgImRlcHJlY2F0ZWQiOiBmYWxzZSwgImRlZmF1bHRfY2hvc2Vu\nX2J5X3NlcnZlciI6IGZhbHNlLCAidmFsdWVzIjogW119LCB7Im9wZXJhdGlvbnMiOiBbXSwgInR5\ncGVfaWQiOiAxMSwgIm9wZXJhdGlvbl9wZXJtcyI6IHt9LCAidGV4dCI6ICJpbmNpZGVudF9pZCIs\nICJibGFua19vcHRpb24iOiBmYWxzZSwgInByZWZpeCI6IG51bGwsICJjaGFuZ2VhYmxlIjogdHJ1\nZSwgImlkIjogOTQsICJyZWFkX29ubHkiOiBmYWxzZSwgInV1aWQiOiAiODExZTk5ZDctZDE5NC00\nY2U4LTg2Y2MtYWZmNWUwMWFiODVjIiwgImNob3NlbiI6IGZhbHNlLCAiaW5wdXRfdHlwZSI6ICJu\ndW1iZXIiLCAidG9vbHRpcCI6ICIiLCAiaW50ZXJuYWwiOiBmYWxzZSwgInJpY2hfdGV4dCI6IGZh\nbHNlLCAidGVtcGxhdGVzIjogW10sICJleHBvcnRfa2V5IjogIl9fZnVuY3Rpb24vaW5jaWRlbnRf\naWQiLCAiaGlkZV9ub3RpZmljYXRpb24iOiBmYWxzZSwgInBsYWNlaG9sZGVyIjogIiIsICJuYW1l\nIjogImluY2lkZW50X2lkIiwgImRlcHJlY2F0ZWQiOiBmYWxzZSwgImRlZmF1bHRfY2hvc2VuX2J5\nX3NlcnZlciI6IGZhbHNlLCAicmVxdWlyZWQiOiAiYWx3YXlzIiwgInZhbHVlcyI6IFtdfSwgeyJv\ncGVyYXRpb25zIjogW10sICJ0eXBlX2lkIjogMTEsICJvcGVyYXRpb25fcGVybXMiOiB7fSwgInRl\neHQiOiAibndfbWV0YV9pZDIiLCAiYmxhbmtfb3B0aW9uIjogZmFsc2UsICJwcmVmaXgiOiBudWxs\nLCAiY2hhbmdlYWJsZSI6IHRydWUsICJpZCI6IDQxMywgInJlYWRfb25seSI6IGZhbHNlLCAidXVp\nZCI6ICIxYzI1MDVmOS00Nzk0LTQzNWEtYTFmMy01ZTYwOGI3NjljYjkiLCAiY2hvc2VuIjogZmFs\nc2UsICJpbnB1dF90eXBlIjogIm51bWJlciIsICJ0b29sdGlwIjogIkxhc3QgbWV0YSBJRCB2YWx1\nZSBpbiB0aGUgcmFuZ2UuIiwgImludGVybmFsIjogZmFsc2UsICJyaWNoX3RleHQiOiBmYWxzZSwg\nInRlbXBsYXRlcyI6IFtdLCAiZXhwb3J0X2tleSI6ICJfX2Z1bmN0aW9uL253X21ldGFfaWQyIiwg\nImhpZGVfbm90aWZpY2F0aW9uIjogZmFsc2UsICJwbGFjZWhvbGRlciI6ICIyMzI2IiwgIm5hbWUi\nOiAibndfbWV0YV9pZDIiLCAiZGVwcmVjYXRlZCI6IGZhbHNlLCAiZGVmYXVsdF9jaG9zZW5fYnlf\nc2VydmVyIjogZmFsc2UsICJ2YWx1ZXMiOiBbXX0sIHsib3BlcmF0aW9ucyI6IFtdLCAidHlwZV9p\nZCI6IDExLCAib3BlcmF0aW9uX3Blcm1zIjoge30sICJ0ZXh0IjogIm53X3Nlc3Npb25faWQxIiwg\nImJsYW5rX29wdGlvbiI6IGZhbHNlLCAicHJlZml4IjogbnVsbCwgImNoYW5nZWFibGUiOiB0cnVl\nLCAiaWQiOiA0MTAsICJyZWFkX29ubHkiOiBmYWxzZSwgInV1aWQiOiAiMjMwYTcwYTMtNDg4Ni00\nYmE5LWEwNGItZDRjNTBjODFkODM0IiwgImNob3NlbiI6IGZhbHNlLCAiaW5wdXRfdHlwZSI6ICJu\ndW1iZXIiLCAidG9vbHRpcCI6ICJGaXJzdCBzZXNzaW9uIElEIGluIHRoZSByYW5nZS4iLCAiaW50\nZXJuYWwiOiBmYWxzZSwgInJpY2hfdGV4dCI6IGZhbHNlLCAidGVtcGxhdGVzIjogW10sICJleHBv\ncnRfa2V5IjogIl9fZnVuY3Rpb24vbndfc2Vzc2lvbl9pZDEiLCAiaGlkZV9ub3RpZmljYXRpb24i\nOiBmYWxzZSwgInBsYWNlaG9sZGVyIjogIjEwMCIsICJuYW1lIjogIm53X3Nlc3Npb25faWQxIiwg\nImRlcHJlY2F0ZWQiOiBmYWxzZSwgImRlZmF1bHRfY2hvc2VuX2J5X3NlcnZlciI6IGZhbHNlLCAi\ndmFsdWVzIjogW119LCB7Im9wZXJhdGlvbnMiOiBbXSwgInR5cGVfaWQiOiAxMSwgIm9wZXJhdGlv\nbl9wZXJtcyI6IHt9LCAidGV4dCI6ICJud19xdWVyeSIsICJibGFua19vcHRpb24iOiBmYWxzZSwg\nInByZWZpeCI6IG51bGwsICJjaGFuZ2VhYmxlIjogdHJ1ZSwgImlkIjogMzkwLCAicmVhZF9vbmx5\nIjogZmFsc2UsICJ1dWlkIjogIjBmMzM1MWM4LWFjZDEtNGFlNC05ODQwLWI3ZTkzMTkyZDU2NiIs\nICJjaG9zZW4iOiBmYWxzZSwgImlucHV0X3R5cGUiOiAidGV4dGFyZWEiLCAidG9vbHRpcCI6ICIi\nLCAiaW50ZXJuYWwiOiBmYWxzZSwgInJpY2hfdGV4dCI6IGZhbHNlLCAidGVtcGxhdGVzIjogW3si\ndXVpZCI6ICJlNTYxMTAwOS0wYjIyLTQ0YzQtYTU5Yy05ODU5ZjExZmRjZjgiLCAiaWQiOiAxNywg\nInRlbXBsYXRlIjogeyJjb250ZW50IjogInNlbGVjdCBzZXNzaW9uaWQgd2hlcmUgYWxpYXMuaXA9\nMjAzLjIwNS4xNzkuMTgxIiwgImZvcm1hdCI6ICJ0ZXh0In0sICJuYW1lIjogImFsYWlzLmlwID0g\neC54LngueCJ9XSwgImV4cG9ydF9rZXkiOiAiX19mdW5jdGlvbi9ud19xdWVyeSIsICJoaWRlX25v\ndGlmaWNhdGlvbiI6IGZhbHNlLCAicGxhY2Vob2xkZXIiOiAiIiwgIm5hbWUiOiAibndfcXVlcnki\nLCAiZGVwcmVjYXRlZCI6IGZhbHNlLCAiZGVmYXVsdF9jaG9zZW5fYnlfc2VydmVyIjogZmFsc2Us\nICJyZXF1aXJlZCI6ICJhbHdheXMiLCAidmFsdWVzIjogW119LCB7Im9wZXJhdGlvbnMiOiBbXSwg\nInR5cGVfaWQiOiAxMSwgIm9wZXJhdGlvbl9wZXJtcyI6IHt9LCAidGV4dCI6ICJud19zZXNzaW9u\nX2lkMiIsICJibGFua19vcHRpb24iOiBmYWxzZSwgInByZWZpeCI6IG51bGwsICJjaGFuZ2VhYmxl\nIjogdHJ1ZSwgImlkIjogNDExLCAicmVhZF9vbmx5IjogZmFsc2UsICJ1dWlkIjogIjJjZGMxOTFl\nLTZhYjMtNDM0YS05Yjc5LTM1Yzk1ZjQyZGMzYiIsICJjaG9zZW4iOiBmYWxzZSwgImlucHV0X3R5\ncGUiOiAibnVtYmVyIiwgInRvb2x0aXAiOiAiTGFzdCBzZXNzaW9uIElEIGluIHRoZSByYW5nZS4i\nLCAiaW50ZXJuYWwiOiBmYWxzZSwgInJpY2hfdGV4dCI6IGZhbHNlLCAidGVtcGxhdGVzIjogW10s\nICJleHBvcnRfa2V5IjogIl9fZnVuY3Rpb24vbndfc2Vzc2lvbl9pZDIiLCAiaGlkZV9ub3RpZmlj\nYXRpb24iOiBmYWxzZSwgInBsYWNlaG9sZGVyIjogIjEwMiIsICJuYW1lIjogIm53X3Nlc3Npb25f\naWQyIiwgImRlcHJlY2F0ZWQiOiBmYWxzZSwgImRlZmF1bHRfY2hvc2VuX2J5X3NlcnZlciI6IGZh\nbHNlLCAidmFsdWVzIjogW119LCB7Im9wZXJhdGlvbnMiOiBbXSwgInR5cGVfaWQiOiAxMSwgIm9w\nZXJhdGlvbl9wZXJtcyI6IHt9LCAidGV4dCI6ICJud19tZXRhX2lkMSIsICJibGFua19vcHRpb24i\nOiBmYWxzZSwgInByZWZpeCI6IG51bGwsICJjaGFuZ2VhYmxlIjogdHJ1ZSwgImlkIjogNDEyLCAi\ncmVhZF9vbmx5IjogZmFsc2UsICJ1dWlkIjogIjRmODhjNmZhLThhN2UtNGFkYy1hYzYyLTc5ZTQ4\nYzA0OGZjNSIsICJjaG9zZW4iOiBmYWxzZSwgImlucHV0X3R5cGUiOiAibnVtYmVyIiwgInRvb2x0\naXAiOiAiRmlyc3QgbWV0YSBJRCB2YWx1ZSBpbiB0aGUgcmFuZ2UuIiwgImludGVybmFsIjogZmFs\nc2UsICJyaWNoX3RleHQiOiBmYWxzZSwgInRlbXBsYXRlcyI6IFtdLCAiZXhwb3J0X2tleSI6ICJf\nX2Z1bmN0aW9uL253X21ldGFfaWQxIiwgImhpZGVfbm90aWZpY2F0aW9uIjogZmFsc2UsICJwbGFj\nZWhvbGRlciI6ICIyMjU4IiwgIm5hbWUiOiAibndfbWV0YV9pZDEiLCAiZGVwcmVjYXRlZCI6IGZh\nbHNlLCAiZGVmYXVsdF9jaG9zZW5fYnlfc2VydmVyIjogZmFsc2UsICJ2YWx1ZXMiOiBbXX1dLCAi\nb3ZlcnJpZGVzIjogW10sICJleHBvcnRfZGF0ZSI6IDE1NTQxNDc2NDg5Njl9\n\"\"\"\n )", "title": "" }, { "docid": "71aa05a4b4968b4a1f5c8a83e79014b8", "score": "0.45267016", "text": "def main():\n form = cgi.FieldStorage()\n cid = form.getvalue(\"cid\", 'KCCI-016')\n start_ts = form.getvalue('start_ts', None)\n end_ts = form.getvalue('end_ts', None)\n date = form.getvalue('date', None)\n if date is not None:\n start_ts = datetime.datetime.strptime(date, '%Y%m%d')\n start_ts = start_ts.replace(tzinfo=pytz.timezone(\"America/Chicago\"))\n end_ts = start_ts + datetime.timedelta(days=1)\n else:\n start_ts = datetime.datetime.strptime(start_ts, '%Y%m%d%H%M')\n start_ts = start_ts.replace(tzinfo=pytz.utc)\n end_ts = datetime.datetime.strptime(end_ts, '%Y%m%d%H%M')\n end_ts = end_ts.replace(tzinfo=pytz.utc)\n\n ssw(\"Content-type: application/json\\n\\n\")\n ssw(json.dumps(dance(cid, start_ts, end_ts)))", "title": "" }, { "docid": "45e6303acf92106ed29fe9fd9983dedd", "score": "0.45244452", "text": "def __init__(self, jsondict=None, strict=True):\n \n self.defaultValue = None\n \"\"\" \n D\n e\n f\n a\n u\n l\n t\n ,\n h\n a\n r\n d\n -\n c\n o\n d\n e\n d\n ,\n o\n r\n u\n s\n e\n r\n -\n d\n e\n f\n i\n n\n e\n d\n v\n a\n l\n u\n e\n f\n o\n r\n t\n h\n i\n s\n v\n a\n r\n i\n a\n b\n l\n e\n .\n Type `str`. \"\"\"\n \n self.description = None\n \"\"\" \n N\n a\n t\n u\n r\n a\n l\n l\n a\n n\n g\n u\n a\n g\n e\n d\n e\n s\n c\n r\n i\n p\n t\n i\n o\n n\n o\n f\n t\n h\n e\n v\n a\n r\n i\n a\n b\n l\n e\n .\n Type `str`. \"\"\"\n \n self.expression = None\n \"\"\" \n T\n h\n e\n F\n H\n I\n R\n P\n a\n t\n h\n e\n x\n p\n r\n e\n s\n s\n i\n o\n n\n a\n g\n a\n i\n n\n s\n t\n t\n h\n e\n f\n i\n x\n t\n u\n r\n e\n b\n o\n d\n y\n .\n Type `str`. \"\"\"\n \n self.headerField = None\n \"\"\" \n H\n T\n T\n P\n h\n e\n a\n d\n e\n r\n f\n i\n e\n l\n d\n n\n a\n m\n e\n f\n o\n r\n s\n o\n u\n r\n c\n e\n .\n Type `str`. \"\"\"\n \n self.hint = None\n \"\"\" \n H\n i\n n\n t\n h\n e\n l\n p\n t\n e\n x\n t\n f\n o\n r\n d\n e\n f\n a\n u\n l\n t\n v\n a\n l\n u\n e\n t\n o\n e\n n\n t\n e\n r\n .\n Type `str`. \"\"\"\n \n self.name = None\n \"\"\" \n D\n e\n s\n c\n r\n i\n p\n t\n i\n v\n e\n n\n a\n m\n e\n f\n o\n r\n t\n h\n i\n s\n v\n a\n r\n i\n a\n b\n l\n e\n .\n Type `str`. \"\"\"\n \n self.path = None\n \"\"\" \n X\n P\n a\n t\n h\n o\n r\n J\n S\n O\n N\n P\n a\n t\n h\n a\n g\n a\n i\n n\n s\n t\n t\n h\n e\n f\n i\n x\n t\n u\n r\n e\n b\n o\n d\n y\n .\n Type `str`. \"\"\"\n \n self.sourceId = None\n \"\"\" \n F\n i\n x\n t\n u\n r\n e\n I\n d\n o\n f\n s\n o\n u\n r\n c\n e\n e\n x\n p\n r\n e\n s\n s\n i\n o\n n\n o\n r\n h\n e\n a\n d\n e\n r\n F\n i\n e\n l\n d\n w\n i\n t\n h\n i\n n\n t\n h\n i\n s\n v\n a\n r\n i\n a\n b\n l\n e\n .\n Type `str`. \"\"\"\n \n super(TestScriptVariable, self).__init__(jsondict=jsondict, strict=strict)", "title": "" }, { "docid": "f672c0167637899385be275c7d676abd", "score": "0.45236546", "text": "def __call__(content):", "title": "" }, { "docid": "a353a29689672e70b0acb60944fcd4a4", "score": "0.45135614", "text": "def Modif(f):\n js = win.JSON.stringify(f)\n dict_type = json.loads(js)\n return dict_type", "title": "" }, { "docid": "4c506d64ad6e1679a02995848af0b0da", "score": "0.4512839", "text": "def get_gpt_script(offer_markup):\n html_parser = BeautifulSoup(offer_markup, \"html.parser\")\n scripts = html_parser.find_all('script')\n data = ''\n for script in scripts:\n if script.string and \"GPT.targeting\" in script.string:\n data = script.string\n break\n try:\n data_dict = json.loads((re.split('GPT.targeting = |;', data))[2].replace(\";\", \"\"))\n except json.JSONDecodeError as e:\n logging.info(\"JSON failed to parse GPT offer attributes. Error: {0}\".format(e))\n data_dict = {}\n return data_dict", "title": "" }, { "docid": "3524ff5a5e2f8b8e5638b3ee2eb4d33f", "score": "0.4505119", "text": "def generate_har(entries: List[dict]) -> str:\n har = {\n \"log\": {\n \"version\": \"1.2\",\n \"creator\": {\n \"name\": \"Selenium Wire HAR dump\",\n \"version\": seleniumwire.__version__,\n \"comment\": f\"Selenium Wire version {seleniumwire.__version__}\",\n },\n \"entries\": entries,\n }\n }\n\n return json.dumps(har, indent=2)", "title": "" }, { "docid": "4480883aee03e7f5428cbf113e82cfcf", "score": "0.45038185", "text": "def customization_data(client=None):\n\n # This import data contains:\n # Function inputs:\n # artifact_value\n # phishai_scan_id\n # Message Destinations:\n # phish_ai_message_destination\n # Functions:\n # phish_ai_get_report\n # phish_ai_scan_url\n # Workflows:\n # example_phishai_scan_url\n # Rules:\n # Example: Phish.AI URL scan\n\n\n yield ImportDefinition(u\"\"\"\neyJzZXJ2ZXJfdmVyc2lvbiI6IHsibWFqb3IiOiAzMSwgIm1pbm9yIjogMCwgImJ1aWxkX251bWJl\nciI6IDQyNTQsICJ2ZXJzaW9uIjogIjMxLjAuNDI1NCJ9LCAiZXhwb3J0X2Zvcm1hdF92ZXJzaW9u\nIjogMiwgImlkIjogNDAsICJleHBvcnRfZGF0ZSI6IDE1NDUxNTI1NjE1MjMsICJmaWVsZHMiOiBb\neyJpZCI6IDM4LCAibmFtZSI6ICJpbmNfdHJhaW5pbmciLCAidGV4dCI6ICJTaW11bGF0aW9uIiwg\nInByZWZpeCI6IG51bGwsICJ0eXBlX2lkIjogMCwgInRvb2x0aXAiOiAiV2hldGhlciB0aGUgaW5j\naWRlbnQgaXMgYSBzaW11bGF0aW9uIG9yIGEgcmVndWxhciBpbmNpZGVudC4gIFRoaXMgZmllbGQg\naXMgcmVhZC1vbmx5LiIsICJpbnB1dF90eXBlIjogImJvb2xlYW4iLCAiaGlkZV9ub3RpZmljYXRp\nb24iOiBmYWxzZSwgImNob3NlbiI6IGZhbHNlLCAiZGVmYXVsdF9jaG9zZW5fYnlfc2VydmVyIjog\nZmFsc2UsICJibGFua19vcHRpb24iOiBmYWxzZSwgImludGVybmFsIjogZmFsc2UsICJ1dWlkIjog\nImMzZjBlM2VkLTIxZTEtNGQ1My1hZmZiLWZlNWNhMzMwOGNjYSIsICJvcGVyYXRpb25zIjogW10s\nICJvcGVyYXRpb25fcGVybXMiOiB7fSwgInZhbHVlcyI6IFtdLCAicmVhZF9vbmx5IjogdHJ1ZSwg\nImNoYW5nZWFibGUiOiB0cnVlLCAicmljaF90ZXh0IjogZmFsc2UsICJleHBvcnRfa2V5IjogImlu\nY2lkZW50L2luY190cmFpbmluZyIsICJ0ZW1wbGF0ZXMiOiBbXSwgImRlcHJlY2F0ZWQiOiBmYWxz\nZX0sIHsiaWQiOiA5NywgIm5hbWUiOiAiYXJ0aWZhY3RfdmFsdWUiLCAidGV4dCI6ICJhcnRpZmFj\ndF92YWx1ZSIsICJwcmVmaXgiOiBudWxsLCAidHlwZV9pZCI6IDExLCAidG9vbHRpcCI6ICIiLCAi\ncGxhY2Vob2xkZXIiOiAiIiwgImlucHV0X3R5cGUiOiAidGV4dCIsICJoaWRlX25vdGlmaWNhdGlv\nbiI6IGZhbHNlLCAiY2hvc2VuIjogZmFsc2UsICJkZWZhdWx0X2Nob3Nlbl9ieV9zZXJ2ZXIiOiBm\nYWxzZSwgImJsYW5rX29wdGlvbiI6IGZhbHNlLCAiaW50ZXJuYWwiOiBmYWxzZSwgInV1aWQiOiAi\nYjA5ZTE4OTktNzQ1Mi00ZjRmLWJkZTEtMjNiMmZiY2NkOTA0IiwgIm9wZXJhdGlvbnMiOiBbXSwg\nIm9wZXJhdGlvbl9wZXJtcyI6IHt9LCAidmFsdWVzIjogW10sICJyZWFkX29ubHkiOiBmYWxzZSwg\nImNoYW5nZWFibGUiOiB0cnVlLCAicmljaF90ZXh0IjogZmFsc2UsICJleHBvcnRfa2V5IjogIl9f\nZnVuY3Rpb24vYXJ0aWZhY3RfdmFsdWUiLCAidGVtcGxhdGVzIjogW10sICJkZXByZWNhdGVkIjog\nZmFsc2V9LCB7ImlkIjogMzgyLCAibmFtZSI6ICJwaGlzaGFpX3NjYW5faWQiLCAidGV4dCI6ICJw\naGlzaGFpX3NjYW5faWQiLCAicHJlZml4IjogbnVsbCwgInR5cGVfaWQiOiAxMSwgInRvb2x0aXAi\nOiAiIiwgInBsYWNlaG9sZGVyIjogIiIsICJpbnB1dF90eXBlIjogInRleHQiLCAiaGlkZV9ub3Rp\nZmljYXRpb24iOiBmYWxzZSwgImNob3NlbiI6IGZhbHNlLCAiZGVmYXVsdF9jaG9zZW5fYnlfc2Vy\ndmVyIjogZmFsc2UsICJibGFua19vcHRpb24iOiBmYWxzZSwgImludGVybmFsIjogZmFsc2UsICJ1\ndWlkIjogImI2YWMyYWRiLTcxZjMtNGQzMy1hYjcxLWU2YTM2MjMxOTI2ZSIsICJvcGVyYXRpb25z\nIjogW10sICJvcGVyYXRpb25fcGVybXMiOiB7fSwgInZhbHVlcyI6IFtdLCAicmVhZF9vbmx5Ijog\nZmFsc2UsICJjaGFuZ2VhYmxlIjogdHJ1ZSwgInJpY2hfdGV4dCI6IGZhbHNlLCAiZXhwb3J0X2tl\neSI6ICJfX2Z1bmN0aW9uL3BoaXNoYWlfc2Nhbl9pZCIsICJ0ZW1wbGF0ZXMiOiBbXSwgImRlcHJl\nY2F0ZWQiOiBmYWxzZX1dLCAiaW5jaWRlbnRfdHlwZXMiOiBbeyJ1cGRhdGVfZGF0ZSI6IDE1NDUx\nNTI2NTE4NzcsICJjcmVhdGVfZGF0ZSI6IDE1NDUxNTI2NTE4NzcsICJ1dWlkIjogImJmZWVjMmQ0\nLTM3NzAtMTFlOC1hZDM5LTRhMDAwNDA0NGFhMCIsICJkZXNjcmlwdGlvbiI6ICJDdXN0b21pemF0\naW9uIFBhY2thZ2VzIChpbnRlcm5hbCkiLCAiZXhwb3J0X2tleSI6ICJDdXN0b21pemF0aW9uIFBh\nY2thZ2VzIChpbnRlcm5hbCkiLCAibmFtZSI6ICJDdXN0b21pemF0aW9uIFBhY2thZ2VzIChpbnRl\ncm5hbCkiLCAiZW5hYmxlZCI6IGZhbHNlLCAic3lzdGVtIjogZmFsc2UsICJwYXJlbnRfaWQiOiBu\ndWxsLCAiaGlkZGVuIjogZmFsc2UsICJpZCI6IDB9XSwgInBoYXNlcyI6IFtdLCAiYXV0b21hdGlj\nX3Rhc2tzIjogW10sICJvdmVycmlkZXMiOiBbXSwgIm1lc3NhZ2VfZGVzdGluYXRpb25zIjogW3si\nbmFtZSI6ICJQaGlzaCBBSSBNZXNzYWdlIERlc3RpbmF0aW9uIiwgInByb2dyYW1tYXRpY19uYW1l\nIjogInBoaXNoX2FpX21lc3NhZ2VfZGVzdGluYXRpb24iLCAiZGVzdGluYXRpb25fdHlwZSI6IDAs\nICJleHBlY3RfYWNrIjogdHJ1ZSwgInVzZXJzIjogWyJhZG1pbkBjbzNzeXMuY29tIl0sICJ1dWlk\nIjogIjRjN2UxMGJkLTU4NjUtNDVlMS05MGJmLTBiNjBjY2Q3OTU2MyIsICJleHBvcnRfa2V5Ijog\nInBoaXNoX2FpX21lc3NhZ2VfZGVzdGluYXRpb24ifV0sICJhY3Rpb25zIjogW3siaWQiOiAxMjYs\nICJuYW1lIjogIkV4YW1wbGU6IFBoaXNoLkFJIFVSTCBzY2FuIiwgInR5cGUiOiAxLCAib2JqZWN0\nX3R5cGUiOiAiYXJ0aWZhY3QiLCAiY29uZGl0aW9ucyI6IFt7Im1ldGhvZCI6ICJlcXVhbHMiLCAi\nZmllbGRfbmFtZSI6ICJhcnRpZmFjdC50eXBlIiwgInZhbHVlIjogIlVSTCIsICJ0eXBlIjogbnVs\nbCwgImV2YWx1YXRpb25faWQiOiBudWxsfV0sICJhdXRvbWF0aW9ucyI6IFtdLCAibWVzc2FnZV9k\nZXN0aW5hdGlvbnMiOiBbXSwgIndvcmtmbG93cyI6IFsiZXhhbXBsZV9waGlzaGFpX3NjYW5fdXJs\nIl0sICJ2aWV3X2l0ZW1zIjogW10sICJ0aW1lb3V0X3NlY29uZHMiOiA4NjQwMCwgInV1aWQiOiAi\nZmU4YWUzOWItODhlMy00ZGMyLTliMTUtYzhmY2MyMTk0NjcwIiwgImV4cG9ydF9rZXkiOiAiRXhh\nbXBsZTogUGhpc2guQUkgVVJMIHNjYW4iLCAibG9naWNfdHlwZSI6ICJhbGwifV0sICJsYXlvdXRz\nIjogW10sICJub3RpZmljYXRpb25zIjogbnVsbCwgInRpbWVmcmFtZXMiOiBudWxsLCAibG9jYWxl\nIjogbnVsbCwgImluZHVzdHJpZXMiOiBudWxsLCAicmVndWxhdG9ycyI6IG51bGwsICJnZW9zIjog\nbnVsbCwgInRhc2tfb3JkZXIiOiBbXSwgImFjdGlvbl9vcmRlciI6IFtdLCAidHlwZXMiOiBbXSwg\nInNjcmlwdHMiOiBbXSwgImluY2lkZW50X2FydGlmYWN0X3R5cGVzIjogW10sICJ3b3JrZmxvd3Mi\nOiBbeyJ3b3JrZmxvd19pZCI6IDEwMCwgIm5hbWUiOiAiRXhhbXBsZTogUGhpc2guQUkgU2NhbiBV\nUkwiLCAicHJvZ3JhbW1hdGljX25hbWUiOiAiZXhhbXBsZV9waGlzaGFpX3NjYW5fdXJsIiwgIm9i\namVjdF90eXBlIjogImFydGlmYWN0IiwgImRlc2NyaXB0aW9uIjogIlNjYW5zIFVSTCB1c2luZyBQ\naGlzaC5BSSBhbmQgcmV0dXJucyBhIHJlcG9ydCBiYXNlZCBvbiB0aGUgcmVzdWx0cy4iLCAiY3Jl\nYXRvcl9pZCI6ICJhZG1pbkBjbzNzeXMuY29tIiwgImxhc3RfbW9kaWZpZWRfYnkiOiAiYWRtaW5A\nY28zc3lzLmNvbSIsICJsYXN0X21vZGlmaWVkX3RpbWUiOiAxNTQ1MTUyNDYzNDk5LCAiZXhwb3J0\nX2tleSI6ICJleGFtcGxlX3BoaXNoYWlfc2Nhbl91cmwiLCAidXVpZCI6ICIwMTMxMWNhOC1iOWNj\nLTRjMTQtYjliYy05ZWUxMTRkNDc0YjQiLCAiY29udGVudCI6IHsid29ya2Zsb3dfaWQiOiAiZXhh\nbXBsZV9waGlzaGFpX3NjYW5fdXJsIiwgInhtbCI6ICI8P3htbCB2ZXJzaW9uPVwiMS4wXCIgZW5j\nb2Rpbmc9XCJVVEYtOFwiPz48ZGVmaW5pdGlvbnMgeG1sbnM9XCJodHRwOi8vd3d3Lm9tZy5vcmcv\nc3BlYy9CUE1OLzIwMTAwNTI0L01PREVMXCIgeG1sbnM6YnBtbmRpPVwiaHR0cDovL3d3dy5vbWcu\nb3JnL3NwZWMvQlBNTi8yMDEwMDUyNC9ESVwiIHhtbG5zOm9tZ2RjPVwiaHR0cDovL3d3dy5vbWcu\nb3JnL3NwZWMvREQvMjAxMDA1MjQvRENcIiB4bWxuczpvbWdkaT1cImh0dHA6Ly93d3cub21nLm9y\nZy9zcGVjL0RELzIwMTAwNTI0L0RJXCIgeG1sbnM6cmVzaWxpZW50PVwiaHR0cDovL3Jlc2lsaWVu\ndC5pYm0uY29tL2JwbW5cIiB4bWxuczp4c2Q9XCJodHRwOi8vd3d3LnczLm9yZy8yMDAxL1hNTFNj\naGVtYVwiIHhtbG5zOnhzaT1cImh0dHA6Ly93d3cudzMub3JnLzIwMDEvWE1MU2NoZW1hLWluc3Rh\nbmNlXCIgdGFyZ2V0TmFtZXNwYWNlPVwiaHR0cDovL3d3dy5jYW11bmRhLm9yZy90ZXN0XCI+PHBy\nb2Nlc3MgaWQ9XCJleGFtcGxlX3BoaXNoYWlfc2Nhbl91cmxcIiBpc0V4ZWN1dGFibGU9XCJ0cnVl\nXCIgbmFtZT1cIkV4YW1wbGU6IFBoaXNoLkFJIFNjYW4gVVJMXCI+PGRvY3VtZW50YXRpb24+U2Nh\nbnMgVVJMIHVzaW5nIFBoaXNoLkFJIGFuZCByZXR1cm5zIGEgcmVwb3J0IGJhc2VkIG9uIHRoZSBy\nZXN1bHRzLjwvZG9jdW1lbnRhdGlvbj48c3RhcnRFdmVudCBpZD1cIlN0YXJ0RXZlbnRfMTU1YXN4\nbVwiPjxvdXRnb2luZz5TZXF1ZW5jZUZsb3dfMWs1dzB0bjwvb3V0Z29pbmc+PC9zdGFydEV2ZW50\nPjxzZXJ2aWNlVGFzayBpZD1cIlNlcnZpY2VUYXNrXzAzbGxzYWJcIiBuYW1lPVwiUGhpc2guQUkg\nU2NhbiBVUkxcIiByZXNpbGllbnQ6dHlwZT1cImZ1bmN0aW9uXCI+PGV4dGVuc2lvbkVsZW1lbnRz\nPjxyZXNpbGllbnQ6ZnVuY3Rpb24gdXVpZD1cImVkMjNiOTA0LWZkMmUtNGQ3My1iNjQ1LWQ1ZmE0\nZmNiNGJhMFwiPntcImlucHV0c1wiOnt9LFwicG9zdF9wcm9jZXNzaW5nX3NjcmlwdFwiOlwiXFxc\nIlxcXCJcXFwiXFxuRXhhbXBsZSByZXNwb25zZVxcblxcbnsgIFxcbiAgIFxcXCJjb250ZW50XFxc\nIjp7ICBcXG4gICAgICBcXFwidXJsXFxcIjpcXFwiaHR0cHM6Ly9zdGFydHVwNDE3LmdiLm5ldC9N\nMz9tZXMxPWFzZGZAYXNkZi5jb21cXFwiLFxcbiAgICAgIFxcXCJzY2FuX2lkXFxcIjpcXFwiZ0dC\nU2FWdmxONXFjNVBjd3ZudVRcXFwiXFxuICAgfSxcXG4gICBcXFwiaW5wdXRzXFxcIjp7ICBcXG4g\nICAgICBcXFwiYXJ0aWZhY3RfdmFsdWVcXFwiOlxcXCJodHRwczovL3N0YXJ0dXA0MTcuZ2IubmV0\nL00zP21lczE9YXNkZkBhc2RmLmNvbVxcXCJcXG4gICB9LFxcbiAgIFxcXCJydW5fdGltZVxcXCI6\nXFxcIjAuNDQ2MTgxMDU4ODg0XFxcIlxcbn1cXG5cXFwiXFxcIlxcXCJcIixcInByZV9wcm9jZXNz\naW5nX3NjcmlwdFwiOlwiaW5wdXRzLmFydGlmYWN0X3ZhbHVlID0gYXJ0aWZhY3QudmFsdWVcIixc\nInJlc3VsdF9uYW1lXCI6XCJwaGlzaGFpX3NjYW5fb3V0cHV0XCJ9PC9yZXNpbGllbnQ6ZnVuY3Rp\nb24+PC9leHRlbnNpb25FbGVtZW50cz48aW5jb21pbmc+U2VxdWVuY2VGbG93XzFrNXcwdG48L2lu\nY29taW5nPjxvdXRnb2luZz5TZXF1ZW5jZUZsb3dfMGhjbW1hczwvb3V0Z29pbmc+PC9zZXJ2aWNl\nVGFzaz48c2VxdWVuY2VGbG93IGlkPVwiU2VxdWVuY2VGbG93XzFrNXcwdG5cIiBzb3VyY2VSZWY9\nXCJTdGFydEV2ZW50XzE1NWFzeG1cIiB0YXJnZXRSZWY9XCJTZXJ2aWNlVGFza18wM2xsc2FiXCIv\nPjxzZXJ2aWNlVGFzayBpZD1cIlNlcnZpY2VUYXNrXzEyMjlwbG9cIiBuYW1lPVwiUGhpc2guQUkg\nR2V0IFJlcG9ydFwiIHJlc2lsaWVudDp0eXBlPVwiZnVuY3Rpb25cIj48ZXh0ZW5zaW9uRWxlbWVu\ndHM+PHJlc2lsaWVudDpmdW5jdGlvbiB1dWlkPVwiMDFlNGUxMTUtMzMzYS00NWE0LTllMDEtZjQ3\nMGFhYzU0ZDMzXCI+e1wiaW5wdXRzXCI6e30sXCJwb3N0X3Byb2Nlc3Npbmdfc2NyaXB0XCI6XCJp\nZiByZXN1bHRzLmNvbnRlbnQ6XFxuICBub3RlID0gXFxcIlBoaXNoLkFJIHVybDogXFxcIiArIHJl\nc3VsdHMuY29udGVudC51cmxcXG4gIG5vdGUgPSBub3RlICsgXFxcIiZsdDtici8mZ3Q7UGhpc2gu\nQUkgdmVyZGljdDogXFxcIiArIHJlc3VsdHMuY29udGVudC52ZXJkaWN0XFxuICBub3RlID0gbm90\nZSArIFxcXCImbHQ7YnIvJmd0OyZsdDthIGhyZWY9XFxcXFxcXCJodHRwczovL2FwcC5waGlzaC5h\naS9pbmNpZGVudC97fVxcXFxcXFwiJmd0O1BoaXNoLkFJIHJlcG9ydCBsaW5rJmx0Oy9hJmd0O1xc\nXCIuZm9ybWF0KHJlc3VsdHMuaW5wdXRzLnBoaXNoYWlfc2Nhbl9pZClcXG4gIGluY2lkZW50LmFk\nZE5vdGUoaGVscGVyLmNyZWF0ZVJpY2hUZXh0KG5vdGUpKVxcblxcblxcblxcXCJcXFwiXFxcIlxc\nbkV4YW1wbGUgUmVzcG9uc2VcXG5cXG57ICBcXG4gICBcXFwiY29udGVudFxcXCI6eyAgXFxuICAg\nICAgXFxcInN0YXR1c1xcXCI6XFxcImNvbXBsZXRlZFxcXCIsXFxuICAgICAgXFxcImRvbWFpblxc\nXCI6XFxcInN0YXJ0dXA0MTcuZ2IubmV0XFxcIixcXG4gICAgICBcXFwidXNlcl9hZ2VudFxcXCI6\nXFxcIk1vemlsbGEvNS4wIChYMTE7IExpbnV4IHg4Nl82NCkgQXBwbGVXZWJLaXQvNTM3LjM2IChL\nSFRNTCwgbGlrZSBHZWNrbykgQ2hyb21lLzY0LjAuMzI4Mi4xNDAgU2FmYXJpLzUzNy4zNlxcXCIs\nXFxuICAgICAgXFxcInRhcmdldFxcXCI6XFxcIk1pY3Jvc29mdFxcXCIsXFxuICAgICAgXFxcInRp\ndGxlXFxcIjpcXFwic2lnbl9pbl90b195b3VyX21pY3Jvc29mdF9hY2NvdW50XFxcIixcXG4gICAg\nICBcXFwidXJsXFxcIjpcXFwiaHR0cHM6Ly9zdGFydHVwNDE3LmdiLm5ldC9NMz9tZXMxPWFzZGZA\nYXNkZi5jb21cXFwiLFxcbiAgICAgIFxcXCJ0aW1lXFxcIjpcXFwiMjAxOC0xMi0wNlQyMjozOToz\nNC4yMTBaXFxcIixcXG4gICAgICBcXFwidmVyZGljdFxcXCI6XFxcIm1hbGljaW91c1xcXCIsXFxu\nICAgICAgXFxcInBsYW5cXFwiOlxcXCJmcmVlXFxcIixcXG4gICAgICBcXFwidGxkXFxcIjpcXFwi\nbmV0XFxcIixcXG4gICAgICBcXFwiaXNvX2NvZGVcXFwiOlxcXCJVU1xcXCIsXFxuICAgICAgXFxc\nImZpcnN0X3NlZW5cXFwiOlxcXCIyMDE4LTEyLTA2VDE5OjE2OjIwLjgyNVpcXFwiLFxcbiAgICAg\nIFxcXCJpcF9hZGRyZXNzXFxcIjpcXFwiMTA0LjI0LjEwNC4xMTZcXFwiLFxcbiAgICAgIFxcXCJh\nc25cXFwiOjEzMzM1LFxcbiAgICAgIFxcXCJ1c2VyX2VtYWlsXFxcIjpcXFwiYXBpXFxcIixcXG4g\nICAgICBcXFwidXNlclxcXCI6XFxcImZyZWUtYXBpXFxcIlxcbiAgIH0sXFxuICAgXFxcImlucHV0\nc1xcXCI6eyAgXFxuICAgICAgXFxcInBoaXNoYWlfc2Nhbl9pZFxcXCI6XFxcImdHQlNhVnZsTjVx\nYzVQY3d2bnVUXFxcIlxcbiAgIH0sXFxuICAgXFxcInJ1bl90aW1lXFxcIjpcXFwiMC40MTkzNzI3\nOTcwMTJcXFwiXFxufVxcblxcXCJcXFwiXFxcIlwiLFwicHJlX3Byb2Nlc3Npbmdfc2NyaXB0XCI6\nXCJpbnB1dHMucGhpc2hhaV9zY2FuX2lkID0gd29ya2Zsb3cucHJvcGVydGllcy5waGlzaGFpX3Nj\nYW5fb3V0cHV0W1xcXCJjb250ZW50XFxcIl1bXFxcInNjYW5faWRcXFwiXVwifTwvcmVzaWxpZW50\nOmZ1bmN0aW9uPjwvZXh0ZW5zaW9uRWxlbWVudHM+PGluY29taW5nPlNlcXVlbmNlRmxvd18waGNt\nbWFzPC9pbmNvbWluZz48b3V0Z29pbmc+U2VxdWVuY2VGbG93XzB4NHlhOXI8L291dGdvaW5nPjwv\nc2VydmljZVRhc2s+PHNlcXVlbmNlRmxvdyBpZD1cIlNlcXVlbmNlRmxvd18waGNtbWFzXCIgc291\ncmNlUmVmPVwiU2VydmljZVRhc2tfMDNsbHNhYlwiIHRhcmdldFJlZj1cIlNlcnZpY2VUYXNrXzEy\nMjlwbG9cIi8+PGVuZEV2ZW50IGlkPVwiRW5kRXZlbnRfMGhyemNsMFwiPjxpbmNvbWluZz5TZXF1\nZW5jZUZsb3dfMHg0eWE5cjwvaW5jb21pbmc+PC9lbmRFdmVudD48c2VxdWVuY2VGbG93IGlkPVwi\nU2VxdWVuY2VGbG93XzB4NHlhOXJcIiBzb3VyY2VSZWY9XCJTZXJ2aWNlVGFza18xMjI5cGxvXCIg\ndGFyZ2V0UmVmPVwiRW5kRXZlbnRfMGhyemNsMFwiLz48dGV4dEFubm90YXRpb24gaWQ9XCJUZXh0\nQW5ub3RhdGlvbl8xa3h4aXl0XCI+PHRleHQ+U3RhcnQgeW91ciB3b3JrZmxvdyBoZXJlPC90ZXh0\nPjwvdGV4dEFubm90YXRpb24+PGFzc29jaWF0aW9uIGlkPVwiQXNzb2NpYXRpb25fMXNldWo0OFwi\nIHNvdXJjZVJlZj1cIlN0YXJ0RXZlbnRfMTU1YXN4bVwiIHRhcmdldFJlZj1cIlRleHRBbm5vdGF0\naW9uXzFreHhpeXRcIi8+PHRleHRBbm5vdGF0aW9uIGlkPVwiVGV4dEFubm90YXRpb25fMWltY2h1\nN1wiPjx0ZXh0PklucHV0IFVSTDwvdGV4dD48L3RleHRBbm5vdGF0aW9uPjxhc3NvY2lhdGlvbiBp\nZD1cIkFzc29jaWF0aW9uXzFiYmYxdTdcIiBzb3VyY2VSZWY9XCJTZXJ2aWNlVGFza18wM2xsc2Fi\nXCIgdGFyZ2V0UmVmPVwiVGV4dEFubm90YXRpb25fMWltY2h1N1wiLz48dGV4dEFubm90YXRpb24g\naWQ9XCJUZXh0QW5ub3RhdGlvbl8wZGV2MGd1XCI+PHRleHQ+T3V0cHV0cyByZXBvcnQgZnJvbSBQ\naGlzaC5BSTwvdGV4dD48L3RleHRBbm5vdGF0aW9uPjxhc3NvY2lhdGlvbiBpZD1cIkFzc29jaWF0\naW9uXzE0eGV4b3FcIiBzb3VyY2VSZWY9XCJTZXJ2aWNlVGFza18xMjI5cGxvXCIgdGFyZ2V0UmVm\nPVwiVGV4dEFubm90YXRpb25fMGRldjBndVwiLz48L3Byb2Nlc3M+PGJwbW5kaTpCUE1ORGlhZ3Jh\nbSBpZD1cIkJQTU5EaWFncmFtXzFcIj48YnBtbmRpOkJQTU5QbGFuZSBicG1uRWxlbWVudD1cInVu\nZGVmaW5lZFwiIGlkPVwiQlBNTlBsYW5lXzFcIj48YnBtbmRpOkJQTU5TaGFwZSBicG1uRWxlbWVu\ndD1cIlN0YXJ0RXZlbnRfMTU1YXN4bVwiIGlkPVwiU3RhcnRFdmVudF8xNTVhc3htX2RpXCI+PG9t\nZ2RjOkJvdW5kcyBoZWlnaHQ9XCIzNlwiIHdpZHRoPVwiMzZcIiB4PVwiMTYyXCIgeT1cIjE4OFwi\nLz48YnBtbmRpOkJQTU5MYWJlbD48b21nZGM6Qm91bmRzIGhlaWdodD1cIjBcIiB3aWR0aD1cIjkw\nXCIgeD1cIjE1N1wiIHk9XCIyMjNcIi8+PC9icG1uZGk6QlBNTkxhYmVsPjwvYnBtbmRpOkJQTU5T\naGFwZT48YnBtbmRpOkJQTU5TaGFwZSBicG1uRWxlbWVudD1cIlRleHRBbm5vdGF0aW9uXzFreHhp\neXRcIiBpZD1cIlRleHRBbm5vdGF0aW9uXzFreHhpeXRfZGlcIj48b21nZGM6Qm91bmRzIGhlaWdo\ndD1cIjMwXCIgd2lkdGg9XCIxMDBcIiB4PVwiOTlcIiB5PVwiMjU0XCIvPjwvYnBtbmRpOkJQTU5T\naGFwZT48YnBtbmRpOkJQTU5FZGdlIGJwbW5FbGVtZW50PVwiQXNzb2NpYXRpb25fMXNldWo0OFwi\nIGlkPVwiQXNzb2NpYXRpb25fMXNldWo0OF9kaVwiPjxvbWdkaTp3YXlwb2ludCB4PVwiMTY5XCIg\neHNpOnR5cGU9XCJvbWdkYzpQb2ludFwiIHk9XCIyMjBcIi8+PG9tZ2RpOndheXBvaW50IHg9XCIx\nNTNcIiB4c2k6dHlwZT1cIm9tZ2RjOlBvaW50XCIgeT1cIjI1NFwiLz48L2JwbW5kaTpCUE1ORWRn\nZT48YnBtbmRpOkJQTU5TaGFwZSBicG1uRWxlbWVudD1cIlNlcnZpY2VUYXNrXzAzbGxzYWJcIiBp\nZD1cIlNlcnZpY2VUYXNrXzAzbGxzYWJfZGlcIj48b21nZGM6Qm91bmRzIGhlaWdodD1cIjgwXCIg\nd2lkdGg9XCIxMDBcIiB4PVwiMjQ2XCIgeT1cIjE2NlwiLz48L2JwbW5kaTpCUE1OU2hhcGU+PGJw\nbW5kaTpCUE1ORWRnZSBicG1uRWxlbWVudD1cIlNlcXVlbmNlRmxvd18xazV3MHRuXCIgaWQ9XCJT\nZXF1ZW5jZUZsb3dfMWs1dzB0bl9kaVwiPjxvbWdkaTp3YXlwb2ludCB4PVwiMTk4XCIgeHNpOnR5\ncGU9XCJvbWdkYzpQb2ludFwiIHk9XCIyMDZcIi8+PG9tZ2RpOndheXBvaW50IHg9XCIyNDZcIiB4\nc2k6dHlwZT1cIm9tZ2RjOlBvaW50XCIgeT1cIjIwNlwiLz48YnBtbmRpOkJQTU5MYWJlbD48b21n\nZGM6Qm91bmRzIGhlaWdodD1cIjEzXCIgd2lkdGg9XCIwXCIgeD1cIjIyMlwiIHk9XCIxODQuNVwi\nLz48L2JwbW5kaTpCUE1OTGFiZWw+PC9icG1uZGk6QlBNTkVkZ2U+PGJwbW5kaTpCUE1OU2hhcGUg\nYnBtbkVsZW1lbnQ9XCJTZXJ2aWNlVGFza18xMjI5cGxvXCIgaWQ9XCJTZXJ2aWNlVGFza18xMjI5\ncGxvX2RpXCI+PG9tZ2RjOkJvdW5kcyBoZWlnaHQ9XCI4MFwiIHdpZHRoPVwiMTAwXCIgeD1cIjQz\nMS4zMjUxNTg5NDY0MTIzM1wiIHk9XCIxNjZcIi8+PC9icG1uZGk6QlBNTlNoYXBlPjxicG1uZGk6\nQlBNTkVkZ2UgYnBtbkVsZW1lbnQ9XCJTZXF1ZW5jZUZsb3dfMGhjbW1hc1wiIGlkPVwiU2VxdWVu\nY2VGbG93XzBoY21tYXNfZGlcIj48b21nZGk6d2F5cG9pbnQgeD1cIjM0NlwiIHhzaTp0eXBlPVwi\nb21nZGM6UG9pbnRcIiB5PVwiMjA2XCIvPjxvbWdkaTp3YXlwb2ludCB4PVwiNDMxXCIgeHNpOnR5\ncGU9XCJvbWdkYzpQb2ludFwiIHk9XCIyMDZcIi8+PGJwbW5kaTpCUE1OTGFiZWw+PG9tZ2RjOkJv\ndW5kcyBoZWlnaHQ9XCIxM1wiIHdpZHRoPVwiMFwiIHg9XCIzODguNVwiIHk9XCIxODRcIi8+PC9i\ncG1uZGk6QlBNTkxhYmVsPjwvYnBtbmRpOkJQTU5FZGdlPjxicG1uZGk6QlBNTlNoYXBlIGJwbW5F\nbGVtZW50PVwiRW5kRXZlbnRfMGhyemNsMFwiIGlkPVwiRW5kRXZlbnRfMGhyemNsMF9kaVwiPjxv\nbWdkYzpCb3VuZHMgaGVpZ2h0PVwiMzZcIiB3aWR0aD1cIjM2XCIgeD1cIjYwNS4zMjUxNTg5NDY0\nMTIzXCIgeT1cIjE4OFwiLz48YnBtbmRpOkJQTU5MYWJlbD48b21nZGM6Qm91bmRzIGhlaWdodD1c\nIjEzXCIgd2lkdGg9XCIwXCIgeD1cIjYyMy4zMjUxNTg5NDY0MTIzXCIgeT1cIjIyN1wiLz48L2Jw\nbW5kaTpCUE1OTGFiZWw+PC9icG1uZGk6QlBNTlNoYXBlPjxicG1uZGk6QlBNTkVkZ2UgYnBtbkVs\nZW1lbnQ9XCJTZXF1ZW5jZUZsb3dfMHg0eWE5clwiIGlkPVwiU2VxdWVuY2VGbG93XzB4NHlhOXJf\nZGlcIj48b21nZGk6d2F5cG9pbnQgeD1cIjUzMVwiIHhzaTp0eXBlPVwib21nZGM6UG9pbnRcIiB5\nPVwiMjA2XCIvPjxvbWdkaTp3YXlwb2ludCB4PVwiNjA1XCIgeHNpOnR5cGU9XCJvbWdkYzpQb2lu\ndFwiIHk9XCIyMDZcIi8+PGJwbW5kaTpCUE1OTGFiZWw+PG9tZ2RjOkJvdW5kcyBoZWlnaHQ9XCIx\nM1wiIHdpZHRoPVwiMFwiIHg9XCI1NjhcIiB5PVwiMTg0XCIvPjwvYnBtbmRpOkJQTU5MYWJlbD48\nL2JwbW5kaTpCUE1ORWRnZT48YnBtbmRpOkJQTU5TaGFwZSBicG1uRWxlbWVudD1cIlRleHRBbm5v\ndGF0aW9uXzFpbWNodTdcIiBpZD1cIlRleHRBbm5vdGF0aW9uXzFpbWNodTdfZGlcIj48b21nZGM6\nQm91bmRzIGhlaWdodD1cIjMwXCIgd2lkdGg9XCIxMDBcIiB4PVwiMTMwXCIgeT1cIjg5Ljc3ODM5\nMzM1MTgwMDU2XCIvPjwvYnBtbmRpOkJQTU5TaGFwZT48YnBtbmRpOkJQTU5FZGdlIGJwbW5FbGVt\nZW50PVwiQXNzb2NpYXRpb25fMWJiZjF1N1wiIGlkPVwiQXNzb2NpYXRpb25fMWJiZjF1N19kaVwi\nPjxvbWdkaTp3YXlwb2ludCB4PVwiMjUzXCIgeHNpOnR5cGU9XCJvbWdkYzpQb2ludFwiIHk9XCIx\nNjlcIi8+PG9tZ2RpOndheXBvaW50IHg9XCIxOTdcIiB4c2k6dHlwZT1cIm9tZ2RjOlBvaW50XCIg\neT1cIjEyMFwiLz48L2JwbW5kaTpCUE1ORWRnZT48YnBtbmRpOkJQTU5TaGFwZSBicG1uRWxlbWVu\ndD1cIlRleHRBbm5vdGF0aW9uXzBkZXYwZ3VcIiBpZD1cIlRleHRBbm5vdGF0aW9uXzBkZXYwZ3Vf\nZGlcIj48b21nZGM6Qm91bmRzIGhlaWdodD1cIjMwXCIgd2lkdGg9XCIxMDBcIiB4PVwiNTczXCIg\neT1cIjkwXCIvPjwvYnBtbmRpOkJQTU5TaGFwZT48YnBtbmRpOkJQTU5FZGdlIGJwbW5FbGVtZW50\nPVwiQXNzb2NpYXRpb25fMTR4ZXhvcVwiIGlkPVwiQXNzb2NpYXRpb25fMTR4ZXhvcV9kaVwiPjxv\nbWdkaTp3YXlwb2ludCB4PVwiNTI4XCIgeHNpOnR5cGU9XCJvbWdkYzpQb2ludFwiIHk9XCIxNzNc\nIi8+PG9tZ2RpOndheXBvaW50IHg9XCI2MDJcIiB4c2k6dHlwZT1cIm9tZ2RjOlBvaW50XCIgeT1c\nIjEyMFwiLz48L2JwbW5kaTpCUE1ORWRnZT48L2JwbW5kaTpCUE1OUGxhbmU+PC9icG1uZGk6QlBN\nTkRpYWdyYW0+PC9kZWZpbml0aW9ucz4iLCAidmVyc2lvbiI6IDIxfSwgImFjdGlvbnMiOiBbXX1d\nLCAicm9sZXMiOiBbXSwgIndvcmtzcGFjZXMiOiBbXSwgImZ1bmN0aW9ucyI6IFt7ImlkIjogNjAs\nICJuYW1lIjogInBoaXNoX2FpX2dldF9yZXBvcnQiLCAiZGlzcGxheV9uYW1lIjogIlBoaXNoLkFJ\nIEdldCBSZXBvcnQiLCAiZGVzY3JpcHRpb24iOiB7ImZvcm1hdCI6ICJ0ZXh0IiwgImNvbnRlbnQi\nOiAiUmV0dXJucyByZXBvcnQgb2YgYSBVUkwgc2NhbiBmcm9tIFBoaXNoLkFJLiJ9LCAiZGVzdGlu\nYXRpb25faGFuZGxlIjogInBoaXNoX2FpX21lc3NhZ2VfZGVzdGluYXRpb24iLCAiZXhwb3J0X2tl\neSI6ICJwaGlzaF9haV9nZXRfcmVwb3J0IiwgInV1aWQiOiAiMDFlNGUxMTUtMzMzYS00NWE0LTll\nMDEtZjQ3MGFhYzU0ZDMzIiwgInZlcnNpb24iOiAxLCAiY3JlYXRvciI6IHsiaWQiOiAxLCAidHlw\nZSI6ICJ1c2VyIiwgIm5hbWUiOiAiYWRtaW5AY28zc3lzLmNvbSIsICJkaXNwbGF5X25hbWUiOiAi\nUmVzaWxpZW50IFN5c2FkbWluIn0sICJsYXN0X21vZGlmaWVkX2J5IjogeyJpZCI6IDEsICJ0eXBl\nIjogInVzZXIiLCAibmFtZSI6ICJhZG1pbkBjbzNzeXMuY29tIiwgImRpc3BsYXlfbmFtZSI6ICJS\nZXNpbGllbnQgU3lzYWRtaW4ifSwgImxhc3RfbW9kaWZpZWRfdGltZSI6IDE1NDIwNTI1OTM5NTUs\nICJ2aWV3X2l0ZW1zIjogW3sic3RlcF9sYWJlbCI6IG51bGwsICJzaG93X2lmIjogbnVsbCwgImVs\nZW1lbnQiOiAiZmllbGRfdXVpZCIsICJmaWVsZF90eXBlIjogIl9fZnVuY3Rpb24iLCAiY29udGVu\ndCI6ICJiNmFjMmFkYi03MWYzLTRkMzMtYWI3MS1lNmEzNjIzMTkyNmUiLCAic2hvd19saW5rX2hl\nYWRlciI6IGZhbHNlfV0sICJ3b3JrZmxvd3MiOiBbeyJ3b3JrZmxvd19pZCI6IDEwMCwgIm5hbWUi\nOiAiRXhhbXBsZTogUGhpc2guQUkgU2NhbiBVUkwiLCAicHJvZ3JhbW1hdGljX25hbWUiOiAiZXhh\nbXBsZV9waGlzaGFpX3NjYW5fdXJsIiwgIm9iamVjdF90eXBlIjogImFydGlmYWN0IiwgImRlc2Ny\naXB0aW9uIjogbnVsbCwgInV1aWQiOiBudWxsLCAiYWN0aW9ucyI6IFtdfV19LCB7ImlkIjogNTks\nICJuYW1lIjogInBoaXNoX2FpX3NjYW5fdXJsIiwgImRpc3BsYXlfbmFtZSI6ICJQaGlzaC5BSSBT\nY2FuIFVSTCIsICJkZXNjcmlwdGlvbiI6IHsiZm9ybWF0IjogInRleHQiLCAiY29udGVudCI6ICJT\nY2FucyBVUkwgYWdhaW5zdCBQaGlzaC5BSS4ifSwgImRlc3RpbmF0aW9uX2hhbmRsZSI6ICJwaGlz\naF9haV9tZXNzYWdlX2Rlc3RpbmF0aW9uIiwgImV4cG9ydF9rZXkiOiAicGhpc2hfYWlfc2Nhbl91\ncmwiLCAidXVpZCI6ICJlZDIzYjkwNC1mZDJlLTRkNzMtYjY0NS1kNWZhNGZjYjRiYTAiLCAidmVy\nc2lvbiI6IDIsICJjcmVhdG9yIjogeyJpZCI6IDEsICJ0eXBlIjogInVzZXIiLCAibmFtZSI6ICJh\nZG1pbkBjbzNzeXMuY29tIiwgImRpc3BsYXlfbmFtZSI6ICJSZXNpbGllbnQgU3lzYWRtaW4ifSwg\nImxhc3RfbW9kaWZpZWRfYnkiOiB7ImlkIjogMSwgInR5cGUiOiAidXNlciIsICJuYW1lIjogImFk\nbWluQGNvM3N5cy5jb20iLCAiZGlzcGxheV9uYW1lIjogIlJlc2lsaWVudCBTeXNhZG1pbiJ9LCAi\nbGFzdF9tb2RpZmllZF90aW1lIjogMTU0MjA0NTk0MTIxMywgInZpZXdfaXRlbXMiOiBbeyJzdGVw\nX2xhYmVsIjogbnVsbCwgInNob3dfaWYiOiBudWxsLCAiZWxlbWVudCI6ICJmaWVsZF91dWlkIiwg\nImZpZWxkX3R5cGUiOiAiX19mdW5jdGlvbiIsICJjb250ZW50IjogImIwOWUxODk5LTc0NTItNGY0\nZi1iZGUxLTIzYjJmYmNjZDkwNCIsICJzaG93X2xpbmtfaGVhZGVyIjogZmFsc2V9XSwgIndvcmtm\nbG93cyI6IFt7IndvcmtmbG93X2lkIjogMTAwLCAibmFtZSI6ICJFeGFtcGxlOiBQaGlzaC5BSSBT\nY2FuIFVSTCIsICJwcm9ncmFtbWF0aWNfbmFtZSI6ICJleGFtcGxlX3BoaXNoYWlfc2Nhbl91cmwi\nLCAib2JqZWN0X3R5cGUiOiAiYXJ0aWZhY3QiLCAiZGVzY3JpcHRpb24iOiBudWxsLCAidXVpZCI6\nIG51bGwsICJhY3Rpb25zIjogW119XX1dfQ==\n\"\"\"\n )", "title": "" }, { "docid": "754acef91657ddeca657c6b822644aa3", "score": "0.45027596", "text": "def code_to_json(fn):\n data = code_to_dict(fn)\n data = code_encode_json_hook(data)\n return json.dumps(data)", "title": "" }, { "docid": "c83f736f985d3b08144fe8a4ff1a5796", "score": "0.45005855", "text": "def build_init_content(self) -> bytes:\n\n content = {}\n content[\"type\"] = \"FAT-0\"\n content[\"supply\"] = self.supply\n\n if self.symbol:\n content[\"symbol\"] = self.symbol\n if self.metadata:\n content[\"metadata\"] = self.metadata\n\n return json.dumps(content, separators=(\",\", \":\")).encode()", "title": "" }, { "docid": "0b49e1fd1c1d38d15e130c014a9b66fb", "score": "0.4497368", "text": "def get_rhs(json):\n pass # PLACEHOLDER. FIRST PUT TEST CASES IN a2test.py, THEN WRITE THE BODY", "title": "" }, { "docid": "637d79593454581c7b24e0c23fbb8914", "score": "0.44919026", "text": "def url_encode_raw(client, args):\n print(_code_helper(args, url_encode_helper, copy=False))", "title": "" }, { "docid": "3fa90c111283889d7cd74a1a1df8d008", "score": "0.44911838", "text": "def PublishJavascript(x):\n PublishHtml(\"<script type='text/javascript'>%s</script>\" % x)", "title": "" }, { "docid": "6ffe643a6c1b0a45bb29ec1416da9769", "score": "0.44882098", "text": "def output(self,obj: Environ) -> str:\n #really should be type Json but that has issues\n return self.text", "title": "" }, { "docid": "2409b2ff660e563f1db7519dc5cfe530", "score": "0.44847733", "text": "def generate_from_json(self, dict_) -> str:\n code = super().generate_from_json(dict_)\n return f\"{self.py_name}={code}\"", "title": "" }, { "docid": "c38e0b7a7ab096c1c738843ae6fe8c65", "score": "0.44820315", "text": "def generateSendOneVariableViaJson(self): \n return r\"\"\"\nvoid sendOneTunableVariablesAsJson(char * variableName, float value, int digits)\n{\n Serial1.println(\"\"); \n Serial1.print(\"{\\\"\"); \n Serial1.print(variableName);\n Serial1.print(\"\\\":\"); \n Serial1.print(value,digits); \n Serial1.println(\"}\"); \n}\n \"\"\"", "title": "" }, { "docid": "001b6f2ff7939d3a1f923628d403c029", "score": "0.44797572", "text": "def ISO8601():", "title": "" }, { "docid": "83526b1deca7b03f1cfb0191f7196716", "score": "0.44790483", "text": "def serialize(self) -> str:", "title": "" }, { "docid": "ab619d0a035667a46fc2c44689243cd3", "score": "0.44753155", "text": "def get_code_v2(color, bold=..., reverse=..., underline=..., blink=...):\n ...", "title": "" }, { "docid": "5d9c2e7d32a9f320f7cc11653737aea4", "score": "0.44675395", "text": "def helper():\n answer = countries[question-1][1].replace(\" \", \"\")\n return jsonify({\"answer\":answer})", "title": "" }, { "docid": "165ab25d07e79673b002c069c529805e", "score": "0.44558787", "text": "def client_code(service: \"UsualService\") -> None:\n service.save_output()\n with open(service.output_path, \"r\") as handle:\n service_output = json.load(handle)\n print(\"service_output: \", service_output)", "title": "" }, { "docid": "a314d9ffad730851c3235a85d7b668ec", "score": "0.44548413", "text": "def to_python(value, state=None):", "title": "" }, { "docid": "77256107a4faab1d1fff751fe31671d6", "score": "0.44535014", "text": "def encode(self, data, **kwargs)-> Any:", "title": "" }, { "docid": "9ad65d2c49323be26075dfb2b61d2106", "score": "0.44509628", "text": "def json(self):\r\n return self.content", "title": "" }, { "docid": "70ca0062137606745c92ce3b32c94dc5", "score": "0.44507483", "text": "def test_list_json(self):\n li = [\"iron\", \"maiden\", [\"thrash\", \"metal\"]]\n b = Base.to_json_string(li)\n self.assertEqual(b, \"[\\\"iron\\\", \\\"maiden\\\", [\\\"thrash\\\", \\\"metal\\\"]]\")", "title": "" }, { "docid": "e1d1e4eb4fda44aa21d20585accd9f59", "score": "0.4444246", "text": "def cbor2json(cbor_obj):\r\n\r\n return cbor.loads(cbor_obj)", "title": "" }, { "docid": "b2698b59f6597f71c50272c7984ab5d8", "score": "0.44441962", "text": "def JsonConfigEncoding(config):\n conf = OrderedDict()\n conf['id'] = config['name']\n conf['board']= config['board']\n conf['pin'] = config['pin']\n conf['freq'] = str(config['frequency'])\n conf['endpointIP'] = config['endpoint']['ip']\n conf['endpointPort'] = str(config['endpoint']['port'])\n return conf", "title": "" }, { "docid": "d90fa34beec65f5dbd538cbcc10618c7", "score": "0.4443845", "text": "def therm():\n\n composition = [\"All\",\"Aluminum\",\"Carbon\",\"Copper\",\"Graphite\",\"Iron\",\"Nickel\",\"Silicon\",\"Silver\",\"Tantalum\",\"Titanium\"]\n\n # Return a list of mechanical properties\n return jsonify(composition)", "title": "" }, { "docid": "0eb9abb75839047fc16128a50b9669ef", "score": "0.44367296", "text": "def customization_data(client=None):\n\n # This import data contains:\n # Incident fields:\n # avalon_auto_refresh\n # avalon_auto_refresh_time\n # avalon_last_pull_time\n # avalon_workspace_id\n # Message Destinations:\n # avalon_actions\n # Functions:\n # avalon_void\n # Rules:\n # Avalon: Create Workspace\n # Avalon: Pull Nodes\n # Avalon: Push Artifact\n # Avalon: Push Artifacts\n # Avalon: Start Auto-refresh\n # Avalon: Stop Auto-refresh\n\n\n yield ImportDefinition(u\"\"\"\neyJpZCI6IDgsICJmaWVsZHMiOiBbeyJjaG9zZW4iOiBmYWxzZSwgImludGVybmFsIjogZmFsc2Us\nICJ1dWlkIjogIjMyZTU4MWZhLTJlNmQtNDk1NC04YjBlLWY1NjYzYzFmNTlmNCIsICJvcGVyYXRp\nb25zIjogW10sICJ2YWx1ZXMiOiBbXSwgImNoYW5nZWFibGUiOiB0cnVlLCAiaWQiOiA4MCwgIm5h\nbWUiOiAiYXZhbG9uX3dvcmtzcGFjZV9pZCIsICJ0ZXh0IjogIkF2YWxvbjogV29ya3NwYWNlIElE\nIiwgInByZWZpeCI6ICJwcm9wZXJ0aWVzIiwgInR5cGVfaWQiOiAwLCAidG9vbHRpcCI6ICJFbnRl\nciBBdmFsb24gV29ya3NwYWNlIElEIHRvIGxpbmsgaXQgdG8gdGhpcyBJbmNpZGVudCAgIiwgInBs\nYWNlaG9sZGVyIjogIiIsICJpbnB1dF90eXBlIjogIm51bWJlciIsICJoaWRlX25vdGlmaWNhdGlv\nbiI6IGZhbHNlLCAiZGVmYXVsdF9jaG9zZW5fYnlfc2VydmVyIjogZmFsc2UsICJibGFua19vcHRp\nb24iOiBmYWxzZSwgIm9wZXJhdGlvbl9wZXJtcyI6IHt9LCAicmVhZF9vbmx5IjogZmFsc2UsICJy\naWNoX3RleHQiOiBmYWxzZSwgImV4cG9ydF9rZXkiOiAiaW5jaWRlbnQvYXZhbG9uX3dvcmtzcGFj\nZV9pZCIsICJ0ZW1wbGF0ZXMiOiBbXX0sIHsiY2hvc2VuIjogZmFsc2UsICJpbnRlcm5hbCI6IGZh\nbHNlLCAidXVpZCI6ICI1N2NkMjcyYi1jMDExLTQ4N2EtOGIwMS04YTczYmYyMzBhMzAiLCAib3Bl\ncmF0aW9ucyI6IFtdLCAidmFsdWVzIjogW10sICJjaGFuZ2VhYmxlIjogdHJ1ZSwgImlkIjogNzgs\nICJuYW1lIjogImF2YWxvbl9sYXN0X3B1bGxfdGltZSIsICJ0ZXh0IjogIkF2YWxvbjogTGFzdCBQ\ndWxsIFRpbWUiLCAicHJlZml4IjogInByb3BlcnRpZXMiLCAidHlwZV9pZCI6IDAsICJ0b29sdGlw\nIjogIlRpbWUgb2YgdGhlIGxhc3QgcHVsbCBmcm9tIEF2YWxvbiIsICJwbGFjZWhvbGRlciI6ICIi\nLCAiaW5wdXRfdHlwZSI6ICJkYXRldGltZXBpY2tlciIsICJoaWRlX25vdGlmaWNhdGlvbiI6IGZh\nbHNlLCAiZGVmYXVsdF9jaG9zZW5fYnlfc2VydmVyIjogZmFsc2UsICJibGFua19vcHRpb24iOiBm\nYWxzZSwgIm9wZXJhdGlvbl9wZXJtcyI6IHt9LCAicmVhZF9vbmx5IjogZmFsc2UsICJyaWNoX3Rl\neHQiOiBmYWxzZSwgImV4cG9ydF9rZXkiOiAiaW5jaWRlbnQvYXZhbG9uX2xhc3RfcHVsbF90aW1l\nIiwgInRlbXBsYXRlcyI6IFtdfSwgeyJjaG9zZW4iOiBmYWxzZSwgImludGVybmFsIjogZmFsc2Us\nICJ1dWlkIjogIjA2YTJhYjhmLTdkNjktNDk2Yi1iMWZhLTBmNjc5YTU2MzNhNiIsICJvcGVyYXRp\nb25zIjogW10sICJ2YWx1ZXMiOiBbXSwgImNoYW5nZWFibGUiOiB0cnVlLCAiaWQiOiA4MSwgIm5h\nbWUiOiAiYXZhbG9uX2F1dG9fcmVmcmVzaCIsICJ0ZXh0IjogIkF2YWxvbjogQXV0byBSZWZyZXNo\nIiwgInByZWZpeCI6ICJwcm9wZXJ0aWVzIiwgInR5cGVfaWQiOiAwLCAidG9vbHRpcCI6ICJTZXQg\ndG8gWWVzIHRvIGF1dG9tYXRpY2FsbHkgcHVsbCBub2RlcyBmcm9tIEF2YWxvbiIsICJwbGFjZWhv\nbGRlciI6ICIiLCAiaW5wdXRfdHlwZSI6ICJib29sZWFuIiwgImhpZGVfbm90aWZpY2F0aW9uIjog\nZmFsc2UsICJkZWZhdWx0X2Nob3Nlbl9ieV9zZXJ2ZXIiOiBmYWxzZSwgImJsYW5rX29wdGlvbiI6\nIGZhbHNlLCAib3BlcmF0aW9uX3Blcm1zIjoge30sICJyZWFkX29ubHkiOiBmYWxzZSwgInJpY2hf\ndGV4dCI6IGZhbHNlLCAiZXhwb3J0X2tleSI6ICJpbmNpZGVudC9hdmFsb25fYXV0b19yZWZyZXNo\nIiwgInRlbXBsYXRlcyI6IFtdfSwgeyJjaG9zZW4iOiBmYWxzZSwgImludGVybmFsIjogZmFsc2Us\nICJ1dWlkIjogIjgzYWY1NzYyLTA4MmUtNDlhMS1iNDhlLTUyMThiNmIxNjAwNiIsICJvcGVyYXRp\nb25zIjogW10sICJ2YWx1ZXMiOiBbXSwgImNoYW5nZWFibGUiOiB0cnVlLCAiaWQiOiA3OSwgIm5h\nbWUiOiAiYXZhbG9uX2F1dG9fcmVmcmVzaF90aW1lIiwgInRleHQiOiAiQXZhbG9uOiBBdXRvIFJl\nZnJlc2ggVGltZSIsICJwcmVmaXgiOiAicHJvcGVydGllcyIsICJ0eXBlX2lkIjogMCwgInRvb2x0\naXAiOiAiQXZhbG9uIGF1dG8tcmVmcmVzaCBpbnRlcnZhbCIsICJwbGFjZWhvbGRlciI6ICJNaW51\ndGVzIiwgImlucHV0X3R5cGUiOiAibnVtYmVyIiwgImhpZGVfbm90aWZpY2F0aW9uIjogZmFsc2Us\nICJkZWZhdWx0X2Nob3Nlbl9ieV9zZXJ2ZXIiOiBmYWxzZSwgImJsYW5rX29wdGlvbiI6IGZhbHNl\nLCAib3BlcmF0aW9uX3Blcm1zIjoge30sICJyZWFkX29ubHkiOiBmYWxzZSwgInJpY2hfdGV4dCI6\nIGZhbHNlLCAiZXhwb3J0X2tleSI6ICJpbmNpZGVudC9hdmFsb25fYXV0b19yZWZyZXNoX3RpbWUi\nLCAidGVtcGxhdGVzIjogW119XSwgInBoYXNlcyI6IFtdLCAib3ZlcnJpZGVzIjogW10sICJhY3Rp\nb25zIjogW3siaWQiOiAxNCwgIm5hbWUiOiAiQXZhbG9uOiBDcmVhdGUgV29ya3NwYWNlIiwgInR5\ncGUiOiAxLCAib2JqZWN0X3R5cGUiOiAiaW5jaWRlbnQiLCAiY29uZGl0aW9ucyI6IFt7Im1ldGhv\nZCI6ICJub3RfaGFzX2FfdmFsdWUiLCAiZmllbGRfbmFtZSI6ICJpbmNpZGVudC5wcm9wZXJ0aWVz\nLmF2YWxvbl93b3Jrc3BhY2VfaWQiLCAidmFsdWUiOiBudWxsLCAidHlwZSI6IG51bGwsICJldmFs\ndWF0aW9uX2lkIjogbnVsbH1dLCAiYXV0b21hdGlvbnMiOiBbXSwgIm1lc3NhZ2VfZGVzdGluYXRp\nb25zIjogWyJhdmFsb25fYWN0aW9ucyJdLCAid29ya2Zsb3dzIjogW10sICJ2aWV3X2l0ZW1zIjog\nW10sICJ0aW1lb3V0X3NlY29uZHMiOiA4NjQwMCwgInV1aWQiOiAiMjYxZjZhZDItYmE2OC00Y2Qx\nLWFkMjMtNmI1ZmI5Y2NjNjliIiwgImV4cG9ydF9rZXkiOiAiQXZhbG9uOiBDcmVhdGUgV29ya3Nw\nYWNlIiwgImxvZ2ljX3R5cGUiOiAiYWxsIn0sIHsiaWQiOiAxNSwgIm5hbWUiOiAiQXZhbG9uOiBQ\ndWxsIE5vZGVzIiwgInR5cGUiOiAxLCAib2JqZWN0X3R5cGUiOiAiaW5jaWRlbnQiLCAiY29uZGl0\naW9ucyI6IFt7Im1ldGhvZCI6ICJoYXNfYV92YWx1ZSIsICJmaWVsZF9uYW1lIjogImluY2lkZW50\nLnByb3BlcnRpZXMuYXZhbG9uX3dvcmtzcGFjZV9pZCIsICJ2YWx1ZSI6IG51bGwsICJ0eXBlIjog\nbnVsbCwgImV2YWx1YXRpb25faWQiOiBudWxsfV0sICJhdXRvbWF0aW9ucyI6IFtdLCAibWVzc2Fn\nZV9kZXN0aW5hdGlvbnMiOiBbImF2YWxvbl9hY3Rpb25zIl0sICJ3b3JrZmxvd3MiOiBbXSwgInZp\nZXdfaXRlbXMiOiBbXSwgInRpbWVvdXRfc2Vjb25kcyI6IDg2NDAwLCAidXVpZCI6ICJhZmNmOTE4\nMy04NGE5LTRjOGEtOGFhYy04NTEwYWQwMjk3MDAiLCAiZXhwb3J0X2tleSI6ICJBdmFsb246IFB1\nbGwgTm9kZXMiLCAibG9naWNfdHlwZSI6ICJhbGwifSwgeyJpZCI6IDE2LCAibmFtZSI6ICJBdmFs\nb246IFB1c2ggQXJ0aWZhY3QiLCAidHlwZSI6IDEsICJvYmplY3RfdHlwZSI6ICJhcnRpZmFjdCIs\nICJjb25kaXRpb25zIjogW3sibWV0aG9kIjogImluIiwgImZpZWxkX25hbWUiOiAiYXJ0aWZhY3Qu\ndHlwZSIsICJ2YWx1ZSI6IFsiSVAgQWRkcmVzcyIsICJETlMgTmFtZSIsICJVUkwiLCAiRW1haWwg\nU2VuZGVyIiwgIk1hbHdhcmUgTUQ1IEhhc2giLCAiTWFsd2FyZSBTSEEtMSBIYXNoIiwgIk1hbHdh\ncmUgU0hBLTI1NiBIYXNoIl0sICJ0eXBlIjogbnVsbCwgImV2YWx1YXRpb25faWQiOiBudWxsfSwg\neyJtZXRob2QiOiAiaGFzX2FfdmFsdWUiLCAiZmllbGRfbmFtZSI6ICJpbmNpZGVudC5wcm9wZXJ0\naWVzLmF2YWxvbl93b3Jrc3BhY2VfaWQiLCAidmFsdWUiOiBudWxsLCAidHlwZSI6IG51bGwsICJl\ndmFsdWF0aW9uX2lkIjogbnVsbH0sIHsibWV0aG9kIjogIm5vdF9jb250YWlucyIsICJmaWVsZF9u\nYW1lIjogImFydGlmYWN0LmRlc2NyaXB0aW9uIiwgInZhbHVlIjogIkF2YWxvbiIsICJ0eXBlIjog\nbnVsbCwgImV2YWx1YXRpb25faWQiOiBudWxsfV0sICJhdXRvbWF0aW9ucyI6IFtdLCAibWVzc2Fn\nZV9kZXN0aW5hdGlvbnMiOiBbImF2YWxvbl9hY3Rpb25zIl0sICJ3b3JrZmxvd3MiOiBbXSwgInZp\nZXdfaXRlbXMiOiBbXSwgInRpbWVvdXRfc2Vjb25kcyI6IDg2NDAwLCAidXVpZCI6ICJlOTA4Nzhk\nOC1iYzVkLTQ4YjYtODFjZC00MTQ4NDE1ZThiNTMiLCAiZXhwb3J0X2tleSI6ICJBdmFsb246IFB1\nc2ggQXJ0aWZhY3QiLCAibG9naWNfdHlwZSI6ICJhbGwifSwgeyJpZCI6IDE3LCAibmFtZSI6ICJB\ndmFsb246IFB1c2ggQXJ0aWZhY3RzIiwgInR5cGUiOiAxLCAib2JqZWN0X3R5cGUiOiAiaW5jaWRl\nbnQiLCAiY29uZGl0aW9ucyI6IFt7Im1ldGhvZCI6ICJoYXNfYV92YWx1ZSIsICJmaWVsZF9uYW1l\nIjogImluY2lkZW50LnByb3BlcnRpZXMuYXZhbG9uX3dvcmtzcGFjZV9pZCIsICJ2YWx1ZSI6IG51\nbGwsICJ0eXBlIjogbnVsbCwgImV2YWx1YXRpb25faWQiOiBudWxsfV0sICJhdXRvbWF0aW9ucyI6\nIFtdLCAibWVzc2FnZV9kZXN0aW5hdGlvbnMiOiBbImF2YWxvbl9hY3Rpb25zIl0sICJ3b3JrZmxv\nd3MiOiBbXSwgInZpZXdfaXRlbXMiOiBbXSwgInRpbWVvdXRfc2Vjb25kcyI6IDg2NDAwLCAidXVp\nZCI6ICI0YjUxYjBjOC0zZDI5LTRjMWMtOTQxMy03MTA5YzhkZDVjNTIiLCAiZXhwb3J0X2tleSI6\nICJBdmFsb246IFB1c2ggQXJ0aWZhY3RzIiwgImxvZ2ljX3R5cGUiOiAiYWxsIn0sIHsiaWQiOiAx\nOCwgIm5hbWUiOiAiQXZhbG9uOiBTdGFydCBBdXRvLXJlZnJlc2giLCAidHlwZSI6IDEsICJvYmpl\nY3RfdHlwZSI6ICJpbmNpZGVudCIsICJjb25kaXRpb25zIjogW3sibWV0aG9kIjogIm5vdF9lcXVh\nbHMiLCAiZmllbGRfbmFtZSI6ICJpbmNpZGVudC5wcm9wZXJ0aWVzLmF2YWxvbl9hdXRvX3JlZnJl\nc2giLCAidmFsdWUiOiB0cnVlLCAidHlwZSI6IG51bGwsICJldmFsdWF0aW9uX2lkIjogbnVsbH0s\nIHsibWV0aG9kIjogImhhc19hX3ZhbHVlIiwgImZpZWxkX25hbWUiOiAiaW5jaWRlbnQucHJvcGVy\ndGllcy5hdmFsb25fd29ya3NwYWNlX2lkIiwgInZhbHVlIjogbnVsbCwgInR5cGUiOiBudWxsLCAi\nZXZhbHVhdGlvbl9pZCI6IG51bGx9XSwgImF1dG9tYXRpb25zIjogW3sidHlwZSI6ICJtb2RpZnlf\nZmllbGQiLCAidHlwZV9pZCI6ICJpbmNpZGVudCIsICJmaWVsZCI6ICJhdmFsb25fYXV0b19yZWZy\nZXNoIiwgIm9wZXJhdGlvbiI6ICJzZXRfZmllbGQiLCAidmFsdWUiOiB0cnVlfV0sICJtZXNzYWdl\nX2Rlc3RpbmF0aW9ucyI6IFsiYXZhbG9uX2FjdGlvbnMiXSwgIndvcmtmbG93cyI6IFtdLCAidmll\nd19pdGVtcyI6IFtdLCAidGltZW91dF9zZWNvbmRzIjogODY0MDAsICJ1dWlkIjogIjUyNDIwOWI5\nLWE2OTYtNDgxZS1iOWNjLWRkZDAxOGJjNDE5OSIsICJleHBvcnRfa2V5IjogIkF2YWxvbjogU3Rh\ncnQgQXV0by1yZWZyZXNoIiwgImxvZ2ljX3R5cGUiOiAiYWxsIn0sIHsiaWQiOiAxOSwgIm5hbWUi\nOiAiQXZhbG9uOiBTdG9wIEF1dG8tcmVmcmVzaCIsICJ0eXBlIjogMSwgIm9iamVjdF90eXBlIjog\nImluY2lkZW50IiwgImNvbmRpdGlvbnMiOiBbeyJtZXRob2QiOiAiZXF1YWxzIiwgImZpZWxkX25h\nbWUiOiAiaW5jaWRlbnQucHJvcGVydGllcy5hdmFsb25fYXV0b19yZWZyZXNoIiwgInZhbHVlIjog\ndHJ1ZSwgInR5cGUiOiBudWxsLCAiZXZhbHVhdGlvbl9pZCI6IG51bGx9XSwgImF1dG9tYXRpb25z\nIjogW10sICJtZXNzYWdlX2Rlc3RpbmF0aW9ucyI6IFsiYXZhbG9uX2FjdGlvbnMiXSwgIndvcmtm\nbG93cyI6IFtdLCAidmlld19pdGVtcyI6IFtdLCAidGltZW91dF9zZWNvbmRzIjogODY0MDAsICJ1\ndWlkIjogImNjNmZjZjk3LTJjYWItNGZjOC05YTIyLWYyMDQwOTU0NGMzYSIsICJleHBvcnRfa2V5\nIjogIkF2YWxvbjogU3RvcCBBdXRvLXJlZnJlc2giLCAibG9naWNfdHlwZSI6ICJhbGwifV0sICJs\nYXlvdXRzIjogW10sICJub3RpZmljYXRpb25zIjogbnVsbCwgInRpbWVmcmFtZXMiOiBudWxsLCAi\naW5kdXN0cmllcyI6IG51bGwsICJyZWd1bGF0b3JzIjogbnVsbCwgImdlb3MiOiBudWxsLCAiZnVu\nY3Rpb25zIjogW3siaWQiOiA0LCAibmFtZSI6ICJhdmFsb25fdm9pZCIsICJkZXNjcmlwdGlvbiI6\nIHsiZm9ybWF0IjogInRleHQiLCAiY29udGVudCI6ICJWb2lkIGZ1bmN0aW9uIHRvIGFsbG93IGN1\nc3RvbWl6YXRpb25zIGV4cG9ydCBhbmQgaW1wb3J0In0sICJ1dWlkIjogIjc2MDkxMWY3LTdlYzkt\nNGFlYy04YWQwLWE1NGRkZTU1YmVlMiIsICJ2ZXJzaW9uIjogMywgImNyZWF0b3IiOiB7ImlkIjog\nMSwgInR5cGUiOiAidXNlciIsICJuYW1lIjogInZhbEBraW5nYW5kdW5pb24uY29tIiwgImRpc3Bs\nYXlfbmFtZSI6ICJWYWwgS2FudGNoZXYifSwgIndvcmtmbG93cyI6IFtdLCAiZGlzcGxheV9uYW1l\nIjogImF2YWxvbl92b2lkIiwgImRlc3RpbmF0aW9uX2hhbmRsZSI6ICJhdmFsb25fYWN0aW9ucyIs\nICJleHBvcnRfa2V5IjogImF2YWxvbl92b2lkIiwgImxhc3RfbW9kaWZpZWRfYnkiOiB7ImlkIjog\nMSwgInR5cGUiOiAidXNlciIsICJuYW1lIjogInZhbEBraW5nYW5kdW5pb24uY29tIiwgImRpc3Bs\nYXlfbmFtZSI6ICJWYWwgS2FudGNoZXYifSwgImxhc3RfbW9kaWZpZWRfdGltZSI6IDE1NTU1NDk5\nODcxNjIsICJ2aWV3X2l0ZW1zIjogW119XSwgInNlcnZlcl92ZXJzaW9uIjogeyJtYWpvciI6IDMw\nLCAibWlub3IiOiAwLCAiYnVpbGRfbnVtYmVyIjogMzQ3NiwgInZlcnNpb24iOiAiMzAuMC4zNDc2\nIn0sICJleHBvcnRfZm9ybWF0X3ZlcnNpb24iOiAyLCAiZXhwb3J0X2RhdGUiOiAxNTU1NTUwMTEy\nNDE3LCAiaW5jaWRlbnRfdHlwZXMiOiBbeyJ1cGRhdGVfZGF0ZSI6IDE1NTU1NjE4MTE4MjAsICJj\ncmVhdGVfZGF0ZSI6IDE1NTU1NjE4MTE4MjAsICJ1dWlkIjogImJmZWVjMmQ0LTM3NzAtMTFlOC1h\nZDM5LTRhMDAwNDA0NGFhMCIsICJkZXNjcmlwdGlvbiI6ICJDdXN0b21pemF0aW9uIFBhY2thZ2Vz\nIChpbnRlcm5hbCkiLCAiZXhwb3J0X2tleSI6ICJDdXN0b21pemF0aW9uIFBhY2thZ2VzIChpbnRl\ncm5hbCkiLCAibmFtZSI6ICJDdXN0b21pemF0aW9uIFBhY2thZ2VzIChpbnRlcm5hbCkiLCAiZW5h\nYmxlZCI6IGZhbHNlLCAic3lzdGVtIjogZmFsc2UsICJwYXJlbnRfaWQiOiBudWxsLCAiaGlkZGVu\nIjogZmFsc2UsICJpZCI6IDB9XSwgImF1dG9tYXRpY190YXNrcyI6IFtdLCAibWVzc2FnZV9kZXN0\naW5hdGlvbnMiOiBbeyJuYW1lIjogImF2YWxvbl9hY3Rpb25zIiwgInByb2dyYW1tYXRpY19uYW1l\nIjogImF2YWxvbl9hY3Rpb25zIiwgImRlc3RpbmF0aW9uX3R5cGUiOiAwLCAiZXhwZWN0X2FjayI6\nIHRydWUsICJ1c2VycyI6IFsidmFsQGtpbmdhbmR1bmlvbi5jb20iXSwgInV1aWQiOiAiYzMzMjUw\nZjQtNzgxMS00NWI1LTg1OGItZDU5N2QyMjY5YzgxIiwgImV4cG9ydF9rZXkiOiAiYXZhbG9uX2Fj\ndGlvbnMifV0sICJ0YXNrX29yZGVyIjogW10sICJhY3Rpb25fb3JkZXIiOiBbXSwgInR5cGVzIjog\nW10sICJzY3JpcHRzIjogW10sICJpbmNpZGVudF9hcnRpZmFjdF90eXBlcyI6IFtdLCAid29ya2Zs\nb3dzIjogW10sICJyb2xlcyI6IFtdLCAid29ya3NwYWNlcyI6IFtdfQ==\n\"\"\"\n )", "title": "" }, { "docid": "3c3f412684082c65a586c9521f1cba24", "score": "0.44359836", "text": "def piece_to_string(piece) -> str:\n def p(x): return '\"' + piece[x].strip(',\"') + '\"' # Sorry for this\n return f\"{p('title')},{p('name')},{p('creator')},{p('art_series')},{piece['price']},{p('symbol')},{piece['type'].name},{piece['reactions']['likes']},{piece['nsfw']},{piece['tokens']},{piece['year']},{piece['rights']},{piece['royalty']},{p('cid')},{p('path')}\\r\\n\"", "title": "" }, { "docid": "ae22d7e65531f5b7bcf48d4ba608dce3", "score": "0.4433037", "text": "def stylish(diff_dict):\n return '{\\n' + dict_node_stylish(diff_dict).replace(',\\n', '\\n') + '}'", "title": "" }, { "docid": "4b7ade3dad61e93f9af8b6a992681704", "score": "0.44316816", "text": "def three_laws():\n return jsonify('A robot may not injure a human being or, through inaction, allow a human being to come to harm. ' +\n 'A robot must obey the orders given it by human beings except where such orders would conflict with the First Law. ' +\n 'A robot must protect its own existence as long as such protection does not conflict with the First or Second Laws.')", "title": "" }, { "docid": "953fa535de977c9539c8c890adbee6d0", "score": "0.44267505", "text": "def serialize(ag):\n #turn adventureGame object into string\n return json.dumps(ag.data)", "title": "" }, { "docid": "0de3d965dcb1ce3310a8aba4c96c7ad3", "score": "0.44253922", "text": "def jasonizar(self, lista):\n \n p='' \n pre=\"{\\\"totalpages\\\": \\\"\"+str(self.totalPages) + \"\\\",\\\"currpage\\\" : \\\"\" + str(self.currPage) + \"\\\",\\\"totalrecords\\\" : \\\"\" \n pre= pre + str(self.totalRecords) + \" \\\",\\\"invdata\\\" : [\" \n \n \n for lb in listaLB:\n p=p+\"{\\\"idLB\\\":\\\"\"+str(lb.id)+\"\\\",\\\"descripcion\\\": \\\"\"+lb.descripcion +\"\\\",\\\"estado\\\": \\\"\"+lb.estado+\"\\\"},\"\n # {\"nombre\":\"nombre\",\"idRol\":\"rol\",\"descripcion\":\"descripciones\"},\n p=p[0:len(p)-1] \n p=p+\"]}\" \n p=pre+p\n \n return p", "title": "" }, { "docid": "4a42f7f957bfeb41ebf0bce1dba49c01", "score": "0.4424738", "text": "def html_encode_raw(client, args):\n print(_code_helper(args, html_encode_helper, copy=False))", "title": "" }, { "docid": "014e95560606611b4be493b98e8bab54", "score": "0.4421971", "text": "def part_hair_curly():\r\n s = r\"012345678901234567\"\r\n s = r\" {}{{}}{{}}{{}}{} \" + \"\\n\"\r\n s+= r\" {{{{}}{}{}{{}}}} \"\r\n return s", "title": "" }, { "docid": "d45d4189823171ecc4207d676a17ce72", "score": "0.4419664", "text": "def html_encode(client, args):\n print_maybe_bin(_code_helper(args, html_encode_helper))", "title": "" }, { "docid": "8fa645bb6aa8147082f514cee76ba73b", "score": "0.44117945", "text": "def gen_passcode():\n now = datetime.now()\n expr = now + timedelta(weeks=1)\n payload = {\n 'version': 1,\n 'exp': expr.isoformat()\n }\n message = json.dumps(payload).encode('utf-8')\n encrypted = box.encrypt(message)\n return base64.b64encode(encrypted).decode('utf-8')", "title": "" }, { "docid": "ef6e1815332844874656de13f956dcbf", "score": "0.44098404", "text": "def format(data):\n return json.dumps(data, separators=(',', ':'))", "title": "" } ]
0a9782ee81bb26fb84e039a1ecbdf41a
Get the identifying parameters.
[ { "docid": "e543393755b1051c6b633dcab2fa8d74", "score": "0.6742015", "text": "def _identifying_params(self) -> Mapping[str, Any]:\n return {\n \"model_id\": self.model_id,\n \"model_kwargs\": self.model_kwargs,\n }", "title": "" } ]
[ { "docid": "d0eb583f6810b34496922a0da6388052", "score": "0.723974", "text": "def getParams(self):\n pass", "title": "" }, { "docid": "73464deb87e88bdec05ef72afc5f7f3b", "score": "0.7190109", "text": "def get_params(self):\n raise NotImplementedError", "title": "" }, { "docid": "73464deb87e88bdec05ef72afc5f7f3b", "score": "0.7190109", "text": "def get_params(self):\n raise NotImplementedError", "title": "" }, { "docid": "f8123b7107820cb6857eb080e3695d63", "score": "0.71658415", "text": "def get_parameters(self):\n\n detection_topic = rospy.get_param(\"~detection_topic\")\n tracker_topic = rospy.get_param('~tracker_topic')\n cost_threhold = rospy.get_param('~cost_threhold')\n min_hits = rospy.get_param('~min_hits')\n max_age = rospy.get_param('~max_age')\n\n return (detection_topic, tracker_topic, cost_threhold, \\\n max_age, min_hits)", "title": "" }, { "docid": "5b232c80d6bb6736190034ff3d5cf0d9", "score": "0.71364325", "text": "def get_params(self):\n\t\treturn []", "title": "" }, { "docid": "28a33265674b121dc04862b1a0c27a20", "score": "0.7122999", "text": "def get_params(self):\n\t\treturn self.params", "title": "" }, { "docid": "f7d7c1cd433733d76114bba710cd26da", "score": "0.7099845", "text": "def get_params(self):\n return self[\"params\"]", "title": "" }, { "docid": "04621be558a878573a8c84e583b5f0d5", "score": "0.70447904", "text": "def get_params(self):\n\n return None", "title": "" }, { "docid": "cd2efdf19a738071d9433848bfda5a8e", "score": "0.700849", "text": "def params(self):\n return self._get_params()", "title": "" }, { "docid": "daed6293ea3954102a65746df141dace", "score": "0.7004102", "text": "def get_params(self):\n return self.params", "title": "" }, { "docid": "3ba4b24b2192c8cfb0f815ed8a26d32a", "score": "0.69657934", "text": "def get_params(self):\n return []", "title": "" }, { "docid": "0abb0db746e98089ead1dd75ea3842b8", "score": "0.6949943", "text": "def _getparams(self):\n\t\traise NotImplementedError('has to be subclassed')", "title": "" }, { "docid": "68f0eddd7b8a81606e739f28013176af", "score": "0.69233745", "text": "def get_params(self):\n return {\n \"field count\": self.field_count,\n \"feature count\": self.feature_count,\n \"file path\": self.filepath,\n }", "title": "" }, { "docid": "81d5b59d17895c9c161488a1983711e3", "score": "0.6890609", "text": "def getParameters(self) :\n\t\treturn self.getParameterDict().values()", "title": "" }, { "docid": "202619d3eaa5ad032f636bb0183f9f50", "score": "0.6890397", "text": "def get_parameters(self):\n return self.parameters", "title": "" }, { "docid": "61d79cc366060aec5df7bc5ef58a3484", "score": "0.6882314", "text": "def get_params(self):\n return {\"d\": \"57\"}", "title": "" }, { "docid": "0b90ecfbe0405443fdcdb632360a8a3f", "score": "0.6836656", "text": "def get_params(self):\n return self.alg.get_params()", "title": "" }, { "docid": "62ade4456905c8ce55fb8040a5f09a13", "score": "0.681319", "text": "def get_params(self):\n return {\"orderid\": self.order_id, \"analyse\": self.analyse}", "title": "" }, { "docid": "84f2544a8ec73d702c93a91983f6558a", "score": "0.6787992", "text": "def get_params(self):\n return self.dbm.get_params()", "title": "" }, { "docid": "bb3f1def8c2d8ea94c982b4ddf54989c", "score": "0.67814875", "text": "def fetch_parameters(self, params):\n return {n: params[v.uuid] for n, v in self.parameters.items()}", "title": "" }, { "docid": "f841760d8988ae5010a2e040c9aeb22b", "score": "0.67655313", "text": "def GetParameters(cls):\n return []", "title": "" }, { "docid": "4929373553d0c062e03ed7d5327b09bb", "score": "0.6729927", "text": "def parameters(self):\n pass", "title": "" }, { "docid": "4929373553d0c062e03ed7d5327b09bb", "score": "0.6729927", "text": "def parameters(self):\n pass", "title": "" }, { "docid": "4929373553d0c062e03ed7d5327b09bb", "score": "0.6729927", "text": "def parameters(self):\n pass", "title": "" }, { "docid": "4929373553d0c062e03ed7d5327b09bb", "score": "0.6729927", "text": "def parameters(self):\n pass", "title": "" }, { "docid": "4929373553d0c062e03ed7d5327b09bb", "score": "0.6729927", "text": "def parameters(self):\n pass", "title": "" }, { "docid": "46f2960cb924da45b25fc48800c0935a", "score": "0.6716622", "text": "def get_params(self):\n\n return self._parameters", "title": "" }, { "docid": "bb0e79c244555a7ff3536a88703e46bf", "score": "0.66791433", "text": "def parameters(self):\n return self._parameters", "title": "" }, { "docid": "bb0e79c244555a7ff3536a88703e46bf", "score": "0.66791433", "text": "def parameters(self):\n return self._parameters", "title": "" }, { "docid": "e349218200b5e6316a16f563640e8580", "score": "0.6668688", "text": "def getparams(self):\r\n return{self.__params[0]: str(self.getid()), self.__params[1]: str(self.getweight()), \r\n self.__params[2]: str(self.getalpha()), self.__params[3]: str(self.getbqp()),\r\n self.__params[4]: str(self.getbqp_b()), self.__params[5]: str(self.getcellrem()),\r\n self.__params[6]: str(self.getsppfile())}", "title": "" }, { "docid": "3b80ffee7b5da6527860c0e2b9026be9", "score": "0.66604304", "text": "def parameters(self):\n raise NotImplementedError", "title": "" }, { "docid": "3b80ffee7b5da6527860c0e2b9026be9", "score": "0.66604304", "text": "def parameters(self):\n raise NotImplementedError", "title": "" }, { "docid": "3b80ffee7b5da6527860c0e2b9026be9", "score": "0.66604304", "text": "def parameters(self):\n raise NotImplementedError", "title": "" }, { "docid": "880f4a32576596c2c20ab21f68eb1d2b", "score": "0.6651582", "text": "def get_parameters(self):\n return self.__parameters", "title": "" }, { "docid": "618106ad9c5c7a07219ecab5002a1d92", "score": "0.66515815", "text": "def get_params(self):\n return {}", "title": "" }, { "docid": "8bec32d4734c283b29ea484b9d38a1ab", "score": "0.66432923", "text": "def parameters(self):", "title": "" }, { "docid": "fe6eb7b8ff3eade4114272e5622922e5", "score": "0.6642197", "text": "def params(self):\n return self._params", "title": "" }, { "docid": "fe6eb7b8ff3eade4114272e5622922e5", "score": "0.6642197", "text": "def params(self):\n return self._params", "title": "" }, { "docid": "fe6eb7b8ff3eade4114272e5622922e5", "score": "0.6642197", "text": "def params(self):\n return self._params", "title": "" }, { "docid": "fe6eb7b8ff3eade4114272e5622922e5", "score": "0.6642197", "text": "def params(self):\n return self._params", "title": "" }, { "docid": "d8b0730ba2a5a71e6fa7d77bb94e2f5f", "score": "0.6639904", "text": "def parameter_info(self):\n pass", "title": "" }, { "docid": "ace46a59fb428b6465758a7760d2074b", "score": "0.66366994", "text": "def get_parameters(self):\r\n return (self.model.parameters())\r\n # return []\r", "title": "" }, { "docid": "42967349264ece98d6d8b2f1ffdbdbd0", "score": "0.6624513", "text": "def getParameters(self):\n\t\treturn self.__par", "title": "" }, { "docid": "69dc66ef30e88a82422105f57930c4bc", "score": "0.6616934", "text": "def get_parameters(self):\n return self.parameters()\n #return list(self.model.parameters())", "title": "" }, { "docid": "18893b4cc090f701f9ca8f8eaad45c25", "score": "0.65940315", "text": "def getParameterInfo(self):\n\n # self.param[0].filter.list = ['Option1', 'Option2', 'Option3']\n # self.param[1].filter.list = ['xml'] # only xml files for DEFile\n\n return self.parameters", "title": "" }, { "docid": "fbd43a6f39947e34ddc078e66219c144", "score": "0.65913665", "text": "def get_parameters(self):\n return self._vertex.get_parameter_values(\n self._vertex.get_parameters(), self.id)", "title": "" }, { "docid": "19ac95c49e7aea3daeb218f393f9750e", "score": "0.65844953", "text": "def get_params_info(cls):\n return {}", "title": "" }, { "docid": "2b1741882153c3a388926077c65229a4", "score": "0.6580103", "text": "def get_parameters(self):\n\n params = {}\n for par in self.PARAMETERS:\n params[par] = getattr(self, par)\n return params", "title": "" }, { "docid": "aff99146d9b65c10bca817adb14eec65", "score": "0.6569603", "text": "def get_params(self):\n return ()", "title": "" }, { "docid": "aff99146d9b65c10bca817adb14eec65", "score": "0.6569603", "text": "def get_params(self):\n return ()", "title": "" }, { "docid": "9f9705309e0e045c816935e2ba229178", "score": "0.6568622", "text": "def get_params(self) -> dict:\n\t\treturn dict()", "title": "" }, { "docid": "31c8eccebe5eb3ade8fc517697ff3df0", "score": "0.6561542", "text": "def parameters(self):\n return self._parameters", "title": "" }, { "docid": "31c8eccebe5eb3ade8fc517697ff3df0", "score": "0.6561542", "text": "def parameters(self):\n return self._parameters", "title": "" }, { "docid": "980f12fa5faa1886efddf83a4c46accb", "score": "0.6538671", "text": "def parameters(self) -> Dict[Any, List[Any]]:\n return self._parameters", "title": "" }, { "docid": "533136426d9c4855c314bd74ed25750f", "score": "0.6510776", "text": "def parameters(self):\n return tuple(param.data for param in self.params())", "title": "" }, { "docid": "34b0ba001faa89ff0b31e5b57b0b6e79", "score": "0.6510613", "text": "def params():\n raise NotImplementedError", "title": "" }, { "docid": "34b0ba001faa89ff0b31e5b57b0b6e79", "score": "0.6510613", "text": "def params():\n raise NotImplementedError", "title": "" }, { "docid": "915b7acf232ca98ec2d6ad8cc86079aa", "score": "0.6506615", "text": "def params(self):", "title": "" }, { "docid": "bd9f67504952c103cfe1a09a376d168d", "score": "0.6505649", "text": "def parameters(self):\n return self.__parameters__", "title": "" }, { "docid": "a12e41aaf81582ca2ffaf530e95f87a1", "score": "0.64714926", "text": "def get_params(self) -> Dict[str, Any]:\n dictionary = {param: getattr(self, param) for param in self.defence_params}\n return dictionary", "title": "" }, { "docid": "65b82aa90b6537d0effc03c6efdc8ba0", "score": "0.6457402", "text": "def params(self):\n raise NotImplementedError()", "title": "" }, { "docid": "9a7558346739a851542312de9f187ba9", "score": "0.64486134", "text": "def get_parameters(self):\n parameter_names = self.PARAMETERS.keys()\n # TODO: Unresolved reference for processor\n parameter_values = [getattr(processor, n) for n in parameter_names]\n return dict(zip(parameter_names, parameter_values))", "title": "" }, { "docid": "76a85c3c8b4a344fcb4d6a452dc8054f", "score": "0.64344424", "text": "def get_params_info(cls):\n return dict(\n lch='channel length, in meters.',\n w_dict='width dictionary.',\n intent_dict='intent dictionary.',\n fg_dict='number of fingers dictionary.',\n ndum='number of dummies on each side.',\n ptap_w='NMOS substrate width, in meters/number of fins.',\n ntap_w='PMOS substrate width, in meters/number of fins.',\n show_pins='True to draw pin geometries.',\n )", "title": "" }, { "docid": "76a85c3c8b4a344fcb4d6a452dc8054f", "score": "0.64344424", "text": "def get_params_info(cls):\n return dict(\n lch='channel length, in meters.',\n w_dict='width dictionary.',\n intent_dict='intent dictionary.',\n fg_dict='number of fingers dictionary.',\n ndum='number of dummies on each side.',\n ptap_w='NMOS substrate width, in meters/number of fins.',\n ntap_w='PMOS substrate width, in meters/number of fins.',\n show_pins='True to draw pin geometries.',\n )", "title": "" }, { "docid": "76a85c3c8b4a344fcb4d6a452dc8054f", "score": "0.64344424", "text": "def get_params_info(cls):\n return dict(\n lch='channel length, in meters.',\n w_dict='width dictionary.',\n intent_dict='intent dictionary.',\n fg_dict='number of fingers dictionary.',\n ndum='number of dummies on each side.',\n ptap_w='NMOS substrate width, in meters/number of fins.',\n ntap_w='PMOS substrate width, in meters/number of fins.',\n show_pins='True to draw pin geometries.',\n )", "title": "" }, { "docid": "e99162ac38941e6556d0705993e57414", "score": "0.64237624", "text": "def parameters(self) -> Optional[Mapping[str, str]]:\n return pulumi.get(self, \"parameters\")", "title": "" }, { "docid": "e99162ac38941e6556d0705993e57414", "score": "0.64237624", "text": "def parameters(self) -> Optional[Mapping[str, str]]:\n return pulumi.get(self, \"parameters\")", "title": "" }, { "docid": "c794a57cfc9fdd0b224b8bdacd683bbb", "score": "0.64169204", "text": "def parameters(self) -> Optional[str]:\n return pulumi.get(self, \"parameters\")", "title": "" }, { "docid": "13c2c14b032ebecb16942792d5379631", "score": "0.6412162", "text": "def get_parameters():\n params_exits = False\n if rospy.has_param('~row_skip'):\n global row_skip\n row_skip = rospy.get_param('~row_skip')\n params_exits = True\n if rospy.has_param('~outer_speed_max'):\n global outer_speed_max\n outer_speed_max = rospy.get_param('~outer_speed_max')\n params_exits = True\n if PARAM_DEBUG:\n global pub_params\n pub_params.publish(rospy.get_name()+ \" \" +str(params_exits))", "title": "" }, { "docid": "542458f22973a480103decdeda32442e", "score": "0.64030164", "text": "def get_parameters(self):\n strategy_parameters = {}\n for name in self.parameters:\n strategy_parameters[name] = getattr(self, name)\n return strategy_parameters", "title": "" }, { "docid": "307ff0c87d1ef0ab5b88ea15576b209d", "score": "0.6401688", "text": "def get_params():\n return Evaluator.get_params().merge({\n 'root': './datasets/flickr32',\n 'file': './flickr32_predictions.csv',\n 'dataset': 'test',\n 'dataloader': {\n 'num_workers': 8,\n 'shuffle': False,\n 'batch_size': 8\n },\n 'iou_threshold': 0.5\n })", "title": "" }, { "docid": "da06a3228259e9c2623f74e4015c3aaf", "score": "0.6368078", "text": "def parameters(self) -> Mapping[str, Any]:", "title": "" }, { "docid": "0689a330290966797bafc1eb457de12b", "score": "0.6363163", "text": "def get_resource_params(self):\n return Parameter.list()", "title": "" }, { "docid": "0689a330290966797bafc1eb457de12b", "score": "0.6363163", "text": "def get_resource_params(self):\n return Parameter.list()", "title": "" }, { "docid": "0689a330290966797bafc1eb457de12b", "score": "0.6363163", "text": "def get_resource_params(self):\n return Parameter.list()", "title": "" }, { "docid": "0689a330290966797bafc1eb457de12b", "score": "0.6363163", "text": "def get_resource_params(self):\n return Parameter.list()", "title": "" }, { "docid": "0689a330290966797bafc1eb457de12b", "score": "0.6363163", "text": "def get_resource_params(self):\n return Parameter.list()", "title": "" }, { "docid": "c6523b0f2ed4ad1003d8346605336339", "score": "0.63529044", "text": "def _generate_params(self):\n return {\n 'lis_outcome_service_url': self.url,\n 'lis_result_sourcedid': self.sourced_id,\n 'resource_link_id': self.url,\n 'user_id': self.user_id,\n 'oauth_consumer_key': self.key\n }", "title": "" }, { "docid": "ebf9277fb75828e063235f9b2301f93e", "score": "0.6334125", "text": "def parameters_names(self):\n return self.params_dict.keys()", "title": "" }, { "docid": "43e194a4de565ed6d9080fd366e97602", "score": "0.63313204", "text": "def params(self):\n return tuple()", "title": "" }, { "docid": "9b7f63ab711a22e394117d5083165b8a", "score": "0.63278645", "text": "def params(self):\n return {'identity_map': self.identity_map,\n 'num_caps': self.num_caps,\n 'act_fn': self.act_fn,\n 'vec_dim': self.vec_dim,\n 'batch_size': self.batch_size}", "title": "" }, { "docid": "b568ed135319af732559bbd3def3d05f", "score": "0.6324194", "text": "def get_params(self) -> Tuple[np.ndarray, np.ndarray]:\n return self.model.get_params()", "title": "" }, { "docid": "a2e970d8b6d8bee040ef7a02e002ad61", "score": "0.63225746", "text": "def get_params(self):\n self.max_range = rospy.get_param('sonar_maxrange')\n self.min_range = rospy.get_param('sonar_minrange') \n self.angle_range = rospy.get_param('sweep_angle')\n self.step = rospy.get_param('sonar_step')\n self.ang_noise = rospy.get_param('angle_noise')\n self.loc_noise = rospy.get_param('location_noise')\n self.rng_noise = rospy.get_param('range_noise')", "title": "" }, { "docid": "16fd28a4a51642fdf9e28eabe9003787", "score": "0.6318624", "text": "def parameters(self) -> dict:\n return self.__parameters", "title": "" }, { "docid": "16fd28a4a51642fdf9e28eabe9003787", "score": "0.6318624", "text": "def parameters(self) -> dict:\n return self.__parameters", "title": "" }, { "docid": "eb211462669a69f218a38a45519d9e21", "score": "0.6307788", "text": "def getParameterNames(self) :\n\t\treturn self.getParameterDict().keys()", "title": "" }, { "docid": "b84a5c1365893b64046ccfe708cd533d", "score": "0.63027376", "text": "def getParams(self):\n return getattr(self.getDocumentedObject(), \"_params\")", "title": "" }, { "docid": "8464dc7d6398b606fe1fb32993dbdcc8", "score": "0.62975425", "text": "def get_params(self, *args, **kwargs):\n return {}", "title": "" }, { "docid": "b2713cd08a5e7a9672eb3c3c9bf204f1", "score": "0.62889785", "text": "def params(self) -> List[Tuple[str, str]]:\n raise NotImplementedError()", "title": "" }, { "docid": "553d1878cd6bb5b4945c3b1325f40880", "score": "0.6288296", "text": "def parameters(self):\n return _libBornAgainCore.IterationInfo_parameters(self)", "title": "" }, { "docid": "e6fbf793bd79463e91c5a334844f3b49", "score": "0.6284942", "text": "def get_params(self):\n return {\n 'model': self.model,\n 'model_parameters': self.model_parameters,\n 'decomposition': self.decomposition,\n 'n_components': self.n_components,\n }", "title": "" }, { "docid": "cd5757ed303053fba911e50586809cff", "score": "0.62814105", "text": "def _param_names(self):\n return self.pnames", "title": "" }, { "docid": "f14b828db956923a82beb9c1521dee62", "score": "0.62808603", "text": "def get_parameters(self):\n params = super().get_parameters()\n params.update({'p': self.p})\n return params", "title": "" }, { "docid": "fc8039afdf4ef2de1e21397b8a4b887f", "score": "0.6275107", "text": "def get_params(self) -> np.ndarray:\n pass", "title": "" }, { "docid": "09f5c8dcfcaa2dbc997b83f4911c38d4", "score": "0.6272307", "text": "def get_parameters():\n\n params = {\n 'patient_data_bucket': ssm_client.get_parameter(Name='/macie_demo/patient_data_bucket')['Parameter']['Value']\n }\n\n return params", "title": "" }, { "docid": "b046560528b3bbb7ab6ec78af58a86f6", "score": "0.62721497", "text": "def GetParameters(self):\n return _SimpleITK.Transform_GetParameters(self)", "title": "" }, { "docid": "8a00eecaa9b578a517c8ba37d0eeada4", "score": "0.6267052", "text": "def showParams(self):\n return [p.name() for p in self.params()]", "title": "" }, { "docid": "9f6301482d019641040ce3b811102690", "score": "0.6266573", "text": "def getParams(self, uid):\n deviceType = self.getDeviceType(uid)\n if deviceType not in self.deviceTypes:\n print(\"Bad deviceType of uid\")\n return None\n params = self.deviceTypes[deviceType].params\n return params", "title": "" }, { "docid": "d8c6a8e3fdf020fab5d75e074afc73b9", "score": "0.62626696", "text": "def parameters(self):\n return self.model.parameters()", "title": "" }, { "docid": "91f85587a05c2e9821388f1c9d464f9a", "score": "0.6260303", "text": "def currentParams(self):\n\t\trecord = self.registrar.currentRecord()\n\t\treturn {\"length\": record.length, \"nstep\": record.nstep}", "title": "" } ]
a85ff5c8323343bb03dfbcf523a3880f
Actual Sending of the sms
[ { "docid": "68efa6b52c41765c3886efd016195294", "score": "0.7036426", "text": "def send_message(self, sms_gateway_id, from_number, to_number, sms_content, my_model_name='', my_record_id=0, media=None, queued_sms_message=None, media_filename=False):\n sms_account = self.env['sms.account'].browse(sms_gateway_id)\n\n #format the from number before sending\n format_from = from_number\n if \" \" in format_from: format_from.replace(\" \", \"\")\n\n #format the to number before sending\n format_to = to_number\n if \" \" in format_to: format_to.replace(\" \", \"\")\n\n #send the sms message\n params = {\n 'userkey': sms_account.zenviva_userkey,\n 'passkey': sms_account.zenviva_passkey,\n 'to': format_to,\n 'message': sms_content,\n }\n\n response = requests.post(\n sms_account.zenviva_api_url,\n data=params\n )\n\n #Analyse the response string and determine if it sent successfully other wise return a human readable error message\n human_read_error = \"\"\n sms_gateway_message_id = \"\"\n delivary_state = \"failed\"\n if response:\n json_response = json.loads(response.text)\n if json_response[\"status\"] == \"1\":\n delivary_state = \"successful\"\n elif json_response[\"status\"] == \"0\":\n delivary_state = \"failed\"\n human_read_error = json_response[\"text\"]\n else:\n delivary_state = \"failed\"\n human_read_error = \"Kode kesalahan tidak diketahui\"\n\n #send a response back saying how the sending went\n my_sms_response = sms_response()\n my_sms_response.delivary_state = delivary_state\n my_sms_response.response_string = response.text\n my_sms_response.human_read_error = human_read_error\n my_sms_response.message_id = sms_gateway_message_id\n return my_sms_response", "title": "" } ]
[ { "docid": "99a9ead6ee65b8d6a10b557b8b247a88", "score": "0.7578458", "text": "def _send(self, message):\r\n charset='UTF-8'\r\n params = {\r\n 'action' : 'sendsms',\r\n 'user' : self.get_username(),\r\n 'password' : self.get_password(),\r\n 'from' : message.from_phone,\r\n 'to' : \",\".join(message.to),\r\n 'text' : message.body,\r\n 'clientcharset' : charset,\r\n 'detectcharset' : 1,\r\n 'maxsplit': int(math.ceil(len(message.body) / 160))\r\n }\r\n \r\n req = urllib2.Request(SMSGLOBAL_API_URL_SENDSMS, urllib.urlencode(params))\r\n result_page = urllib2.urlopen(req).read()\r\n results = self._parse_response(result_page)\r\n \r\n if results is None:\r\n if not self.fail_silently:\r\n raise Exception(\"Error determining response: [\" + result_page + \"]\")\r\n return False\r\n \r\n code, sendqmsgid, msgid = results\r\n \r\n if code != '0':\r\n if not self.fail_silently:\r\n raise Exception(\"Error sending sms: [%s], extracted results(code, sendqmsgid, msgid): [%s]\" % (result_page, results))\r\n return False\r\n else:\r\n logger.info('SENT to: %s; sender: %s; code: %s; sendqmsgid: %s; msgid: %s; message: %s' % (\r\n message.to,\r\n message.from_phone,\r\n code, \r\n sendqmsgid, \r\n msgid,\r\n message.body\r\n ))\r\n return True", "title": "" }, { "docid": "0e2f82952ee813822d7490c73e4644e4", "score": "0.71073484", "text": "def send_sms(self, sms_text):\n client = Client(TWILIO_ACCOUNT_SID, TWILIO_AUTH_TOKEN)\n message = client.messages.create(body=sms_text, from_=TWILIO_PH_NO, to=MY_PH_NO)\n print(f'SMS Status: {message.status}')", "title": "" }, { "docid": "e8e19f80bb8b1baa2c7c5e6d48d4903a", "score": "0.7080253", "text": "def send_sms_transactional(request):\n try:\n input_json = request\n headers_var = {\n 'content-type': \"application/json\",\n 'authkey': \"334097AuBU02YGOYB5efae2b6P1\"\n }\n payload = dict(zip(['sender', 'route', 'country', 'sms'],\n [\"genericbackend\", \"4\", input_json['country_code'],\n [dict(zip([\"message\", \"to\"], [input_json['text'], [input_json['phone_number']]]))]]))\n result = requests.post(\"https://api.msg91.com/api/v2/sendsms\", headers=headers_var, json=payload)\n # print(result.content)\n output_json = dict(zip(['Status', 'Message', 'Payload'],\n [\"Success\", \"OTP SMS has been sent to given phone number\", result.content]))\n return output_json\n\n except Exception as ex:\n output_json = dict(zip([\"Status\", \"Message\", \"Payload\"],\n [\"Failure\", f\"Unable to send SMS via MSG91. Exception encountered: {ex}\", None]))\n return output_json", "title": "" }, { "docid": "531d812c989856c9e679ad87d12f4c06", "score": "0.7064733", "text": "def send_sms(self, toNumber, message):\r\n\t\tself._gv.send_sms(toNumber, message)", "title": "" }, { "docid": "c5ca4825c66440add9ac10d930f7646e", "score": "0.69973546", "text": "def send():", "title": "" }, { "docid": "74990ef0f79f7a8dd414330fd50805e5", "score": "0.69689703", "text": "def send(self, fail_silently=False):\r\n if not self.to:\r\n # Don't bother creating the connection if there's nobody to send to\r\n return 0\r\n res = self.get_connection(fail_silently).send_messages([self])\r\n sms_post_send.send(sender=self, to=self.to, from_phone=self.from_phone, body=self.body)\r\n return res", "title": "" }, { "docid": "f7a6b71a76e919d15fec2373ed83a69b", "score": "0.69555724", "text": "def test_send_sms_message(self):\n deviceid = 1234\n body = 'Hello world!'\n resp = self.client.sms.send_message(deviceid, body)\n self.assertTrue(resp.get('success'))", "title": "" }, { "docid": "98c3b634fca89fd2cdc7b60596b9b8e9", "score": "0.6949564", "text": "def send_sms(request):\n try:\n input_json = request\n # initializing invalid input types to promotional sms\n if 'sms_type' not in input_json or input_json['sms_type'] not in [\"transactional\", \"promotional\"]:\n input_json['sms_type'] = \"promotional\"\n if input_json['sms_type'] == \"promotional\":\n output_json = send_sms_promotional(input_json)\n return output_json\n output_json = send_sms_transactional(input_json)\n return output_json\n except Exception as ex:\n output_json = dict(zip([\"Status\", \"Message\", \"Payload\"],\n [\"Failure\", f\"Unable to send SMS. Exception encountered: {ex}\", None]))\n return output_json", "title": "" }, { "docid": "05dd3744e475f54ce857c9b9e021b1ba", "score": "0.6907953", "text": "def send_sms(self, msg ):\n load_dotenv(find_dotenv())\n account_sid = os.environ.get(\"TWILIO_ACCOUNT_SID\")\n my_phone = os.environ.get(\"MY_PHONE\")\n auth_token = os.environ.get(\"AUTH_TOKEN\")\n\n client = Client(account_sid,auth_token)\n client.messages.create(\n to=my_phone,\n from_=\"+18082154128\",\n body=msg\n )\n call = client.calls.create(\n to=my_phone,\n from_=\"+18082154128\",\n twiml=\"<Response><Say>PS5 is in stock!</Say></Response>\",\n )", "title": "" }, { "docid": "956b96f08d15dba7701f08d2b866bc0d", "score": "0.68351704", "text": "def send(self, msg):\n pass", "title": "" }, { "docid": "be5c404be8a220bc4b4cb4279a3fb036", "score": "0.6794282", "text": "def SendSMS(product,\n sms_from,\n mobiles,\n content):\n socket.setdefaulttimeout(DEFAULT_SOCKET_TIMEOUT)\n return ServerProxy(\"http://nanny.netease.com:8080\").accept_msg(\n product,\n sms_from,\n mobiles,\n content)", "title": "" }, { "docid": "c6da9d4f74293b34c2d2337c572684c5", "score": "0.6761882", "text": "def send(msg, delay=True, *args, **kwargs):\n context = {\n 'phone_number': urllib.quote(msg.phone_number),\n 'sender_id': urllib.quote(kwargs.get(\"sender_id\", DEFAULT_SENDER_ID)),\n }\n encoding_param = \"\"\n try:\n text = msg.text.encode(\"iso-8859-1\")\n context[\"message\"] = clean_outgoing_sms_text(text)\n except UnicodeEncodeError:\n context[\"message\"] = msg.text.encode(\"utf-16-be\").encode(\"hex\")\n encoding_param = \"&encoding=ucs\"\n url = \"%s?%s%s\" % (settings.SMS_GATEWAY_URL, settings.SMS_GATEWAY_PARAMS % context, encoding_param)\n # just opening the url is enough to send the message\n # TODO, check response\n resp = urllib2.urlopen(url).read()\n msg.save()\n\n create_billable_for_sms(msg, API_ID, delay=delay, response=resp)\n\n return resp", "title": "" }, { "docid": "0f796c012c3c26ecf182579972999b61", "score": "0.67497635", "text": "def SendTextToPhone(self, number, message):\n try:\n if(self.gm862.serialConnect.isOpen()):\n print(\"Serial port is open GSM\")\n phoneNumberCmd = \"AT+CMGS=\"+number+\"\\r\"\n self.gm862.serialConnect.write(phoneNumberCmd.encode())\n self.gm862.serialConnect.flush()\n response = self.gm862.serialConnect.read(1000)\n self.gm862.serialConnect.flush()\n print(response.strip()) \n \"\"\"Send SMS. Set phone number, wait for > then send message in IRA format \n ending with ctrl-z (0x1a) or 0x1b to cancel message send\n wait for > error 331 is network service, cmgs<message ref number> is valid sent result\n dont always get the >... just send anyway\"\"\" \n self.gm862.serialConnect.write(self.gsm_encode(message) + b\"\\x1A\")\n self.gm862.serialConnect.flush()\n print(self.gm862.serialConnect.read(250).strip())\n self.gm862.serialConnect.flush()\n time.sleep(1)\n else:\n print(\"Serial port is not open\")\n except:\n print(\"Error with serial port: \", sys.exc_info()[0])", "title": "" }, { "docid": "4bf008b416fa8a8e7ca8f4f04e32fbe6", "score": "0.6718336", "text": "def test_sms(callto=\"5102297683\",callfrom=\"5102503825\"):\n text = \"How doth the little crocodile improve his shining tail\"\n sms(callto,callfrom,text)", "title": "" }, { "docid": "638a1aa35ef16bd528a7ddb8b042ab53", "score": "0.669619", "text": "def sendMessage(self, message):\n self.server.sendSMS(self.conversation[\"address\"], message)", "title": "" }, { "docid": "1943457a000d832d4c67eea2c0e3d29f", "score": "0.6658529", "text": "def _send(self, message):\r\n\r\n params = {\r\n 'V': SMSPUBLI_API_VERSION, \r\n 'UN': SMSPUBLI_USERNAME, \r\n 'PWD': SMSPUBLI_PASSWORD,\r\n 'R': SMSPUBLI_ROUTE, \r\n 'SA': message.from_phone,\r\n 'DA': ','.join(message.to),\r\n 'M': message.body.encode('latin-1'),\r\n 'DC': SMSPUBLI_DC,\r\n 'DR': SMSPUBLI_DR, \r\n 'UR': message.from_phone\r\n }\r\n if SMSPUBLI_ALLOW_LONG_SMS:\r\n params['LM'] = '1'\r\n\r\n response = requests.post(SMSPUBLI_API_URL, params)\r\n if response.status_code != 200:\r\n if not self.fail_silently:\r\n raise\r\n else:\r\n return False\r\n\r\n response_msg, response_code = response.content.split(':')\r\n if response_msg == 'OK':\r\n try:\r\n if \",\" in response_code:\r\n codes = map(int, response_code.split(\",\"))\r\n else:\r\n codes = [int(response_code)]\r\n\r\n for code in codes:\r\n if code == -5:\r\n #: TODO send error signal (no $$)\r\n pass\r\n elif code == -3:\r\n #: TODO send error signal (incorrect num)\r\n pass\r\n\r\n return True\r\n\r\n except (ValueError, TypeError):\r\n if not self.fail_silently:\r\n raise\r\n return False\r\n \r\n return False", "title": "" }, { "docid": "4608389b4e57c8e26cab8fd6f5f5c296", "score": "0.6646789", "text": "def send_mt_nosign(self, id, msgid, mobile, content):\r\n url = 'http://120.197.89.173:8081/openapi/router'\r\n secret = '494e58f3a9808daea3bef94078563109'\r\n #NOTE: system_para\r\n appKey = 'j1baerwhjp'\r\n sessionId = ''\r\n method = 'sms.service.send'\r\n v = '1.0'\r\n format = 'json'\r\n locale = ''\r\n sign = '' #upper()\r\n system_para_dict = dict(appKey=appKey,\r\n method=method,\r\n v=v,\r\n format=format,\r\n locale=locale,\r\n sessionId=sessionId,\r\n sign=sign)\r\n\r\n system_para_list = []\r\n business_para_list = []\r\n system_para_list = [\"appKey\" + appKey, \"method\" + method, \"v\" + v, \"format\" + format]\r\n if locale:\r\n system_para_list.append(\"locale\"+locale)\r\n if sessionId:\r\n system_para_list.append(\"sessionId\"+sessionId)\r\n\r\n #NOTE: business_para, 4 items.\r\n phoneNumbers = mobile\r\n Content = content \r\n EntCode = '106571205329'\r\n ReportId = msgid\r\n\r\n isImmediately = True #lower\r\n\r\n business_para_list = [\"phoneNumbers\"+phoneNumbers, \"Content\"+Content, \"EntCode\"+EntCode, \"ReportId\"+str(ReportId), \"isImmediately\"+str(isImmediately)]\r\n business_para_dict = dict(phoneNumbers=phoneNumbers,\r\n Content=Content,\r\n EntCode=EntCode,\r\n ReportId=ReportId,\r\n isImmediately=isImmediately)\r\n\r\n parameters_list = self.get_parameters_list(system_para_list, business_para_list)\r\n sign = self.get_sign(secret, parameters_list)\r\n system_para_dict['sign'] = sign\r\n request_url = self.get_request_url(url, system_para_dict, business_para_dict)\r\n\r\n h = httplib2.Http(timeout=20) # second\r\n response, content = h.request(request_url)\r\n return response, content", "title": "" }, { "docid": "e6ce8e21e873063c29feb418f3df0400", "score": "0.6644227", "text": "def dispatch_message(self):\n print(self.to)\n code = self.create_code()\n\n self.send_sms(self.to, code)\n\n print('~CODE', code)\n return {'message': _('SMS message with MFA code has been sent.')}", "title": "" }, { "docid": "b13665b8574ac0d33a9714d98fbcc352", "score": "0.661255", "text": "def incoming_twil_sms():\n sms_from = request.form['From']\n print(f\"from: {sms_from} type: {type(sms_from)}\")\n sms_to = request.form['To']\n sms_txt = request.form['Body']\n address = sms.email_lookup(sms_to) # Twilio numbers send a +\n\n if address:\n return str(sms.send_email(to_email=address, txt_from=sms_from, txt_body=sms_txt, txt_to=sms_to))\n else:\n return \"invalid number\"", "title": "" }, { "docid": "5e63e8e22e24a35b0919fb7e1289a46e", "score": "0.65959954", "text": "def sendall(self, msg):\n self.write(msg)", "title": "" }, { "docid": "8ecd191f2249562baac6d1a7eee5382a", "score": "0.6595028", "text": "def do_send_sms_spryng(num, text):\n cleaned_num = cleaned_number(num)\n if not is_valid_phone_number(cleaned_num):\n return False\n\n if not text:\n return False\n\n if len(text) > app.config[\"MAX_SMS_LENGTH\"]:\n return False\n\n url = app.config[\"SPRYNG_API_URL\"]\n token = app.config[\"SPRYNG_API_BEARER_TOKEN\"]\n spryng_headers = {\n \"Accept\": \"application/json\",\n \"Authorization\": f\"Bearer {token}\",\n }\n\n spryng_request = {\n \"body\": text,\n \"encoding\": \"auto\",\n \"originator\": app.config[\"SPRYNG_MSG_ORIGINATOR\"],\n \"recipients\": [num],\n \"route\": app.config[\"SPRYNG_API_ROUTE\"],\n # \"scheduled_at\": \"now\" #optional, default: \"now\"\n }\n print(\"spryng request\", url, spryng_request, spryng_headers)\n response = requests.post(url, headers=spryng_headers, json=spryng_request)\n rj = response.json()\n print(\"spryng response\", response, \"json-content:\", rj)\n\n if not response.ok:\n return False\n\n #success = rj[\"status\"] in [\"scheduled\", \"pending\", \"delivered\"]\n # could also be \"failed\"\n success = True\n #assume success, FIXME!\n\n return success", "title": "" }, { "docid": "8f183dac2a4c5ecdf2147882052a8bc5", "score": "0.65809655", "text": "def test_send(twilio_client_class):\n sms = twilio.TwilioSMSBackend()\n twilio_client_class.assert_called_with('account_sid', 'token')\n\n sms.send('1', '2', 'body')\n sms.client.sms.messages.create.assert_called_with(\n to='1', from_='2', body='body')", "title": "" }, { "docid": "f22e139237cefe79d9413798195a7a64", "score": "0.65795803", "text": "def SendSMS(phone, pattern, message):\n # *************************************************\n \"\"\"!!!Формирование текста СМС и номеров получателей!!!\"\"\"\n ph = re.sub(r'([-\\s\\+\\(\\)]*)', \"\", phone)\n assert (ph)[:2] == '79', 'phone should start with 79'\n try:\n ph = int(ph)\n except ValueError as e:\n raise ValueError(\"invalid phone number {0}\".format(phone))\n assert isinstance(pattern, int) and pattern >= 0 and pattern < len(settings.SMS_PATTERNS), \"invalid pattern\"\n\n # кому отправлять СМС\n # phone = 7...\n\n # текст СМС\n # newPass = \"Qwer1234\" # для записи переменных данных\n # msg = \"ваш новый пароль к системе мониторинга \" + newPass\n # msg = \"тестовое СМС для системы мониторинга\"\n # *************************************************\n\n if re.search(r'\\{\\d\\}', settings.SMS_PATTERNS[pattern]):\n msg = settings.SMS_PATTERNS[pattern].format(message)\n else:\n msg = settings.SMS_PATTERNS[pattern]\n\n # тело запроса\n sms = {\n \"from\": \"PZMONITOR\",\n \"to\": ph,\n \"message\": msg\n }\n\n # заголовок\n head = {\n \"Authorization\": \"Basic %s\" % settings.SMS_PASS_PHRASE\n }\n\n # отправка запроса\n r = requests.post(settings.SMS_API_URL, headers=head, json=sms)\n\n # статус и отчет о выполнении, не обязательно\n print(r.status_code)\n print(r.text)\n\n return r.status_code", "title": "" }, { "docid": "6ba7dde28e535a58f591d3703cbfd157", "score": "0.65672004", "text": "def send_sms(message):\n if not SEND_SMS:\n return False\n\n client = Client(twilio_settings.account_sid, twilio_settings.auth_token)\n\n message = client.api.account.messages.create(\n to=twilio_settings.phone_to,\n from_=twilio_settings.phone_from,\n body=message)\n\n return True", "title": "" }, { "docid": "22a372ee308621dffdb06fc00cbfd3b4", "score": "0.65611416", "text": "def send_message():\n\n # Get content from form\n student_id = request.form.get('phone_dropdown_id')\n text_message_content = request.form.get('message_content')\n student = crud.get_student_phone(student_id)\n student_num = student.student_phone\n\n # If testing, don't send the text\n if os.environ.get('TESTING'):\n return jsonify({'message_content': text_message_content})\n\n account_sid = os.environ.get('ACCOUNT_SID')\n auth_token = os.environ.get('AUTH_TOKEN')\n client = Client(account_sid, auth_token)\n\n client.messages.create(\n body=text_message_content,\n to=str(\"1\" + student_num),\n from_=os.environ[\"TWILIO_PHONE\"]\n )\n\n return jsonify({'message_content': text_message_content})", "title": "" }, { "docid": "abb5441a413e07b473f717792f356588", "score": "0.6556712", "text": "def send(self):\n pass", "title": "" }, { "docid": "a0b053f1b6a17c5806181af8b7dfc3ec", "score": "0.6539937", "text": "def thread_send(self, body, recipients):\r\n try:\r\n for recipient in recipients: \r\n sms = {}\r\n sms['from'] = self.conf['SENDER_NAME']\r\n sms['to'] = recipient\r\n sms['text'] = body\r\n logger.debug(str(sms))\r\n http_response = self.provider.send_message(sms)\r\n response = http_response['messages'][0]\r\n if response['status'] == '0':\r\n logger.info('SMS sent to %s' % (recipient))\r\n else:\r\n logger.error('SMS Fail to %s Error: %s' % (recipient, response['error-text']))\r\n \r\n # log nexmo balance\r\n balance = '%.1f' % (float(response['remaining-balance']))\r\n logger.info('Remaining balance is %s' % (balance))\r\n except Exception as e:\r\n logger.exception('Failed sending SMS')\r\n \r\n # Email the balance value to admin\r\n try: EmailNexmoBalance(response['remaining-balance'])\r\n except: pass", "title": "" }, { "docid": "5018cd7a7f835a6f2189b10e79a8de19", "score": "0.6533249", "text": "def send_message(phone, num, serial_set):\n if num == 1:\n sms_message = '{} card Activated'.format(num) + ' ' + 'serial number: {}'.format(serial_set[0])\n else:\n sms_message = '{} cards Activated'.format(num) + ' ' + 'range: from {} to {}'.format(serial_set[0], serial_set[-1])\n\n phone_number = [phone]\n\n try:\n response = sms.send(sms_message, phone_number)\n print(response)\n except ConnectionError:\n return 'Network Error'", "title": "" }, { "docid": "3c9fb3451d7cca2a46f9cb8323fc2b48", "score": "0.6512432", "text": "def sms(self, sms):\n\n self._sms = sms", "title": "" }, { "docid": "909ca706b601058b03ac71158466a77b", "score": "0.6511297", "text": "def test_sms_sending(self):\n reader = ListReader(SAMPLE_FRIENDS)\n sender = PrintSMSSender()\n\n with unittest.mock.patch.object(PrintSMSSender, \"send_sms\") as send_sms:\n utils.send_messages(datetime.date(2020, 11, 7), reader, sender)\n send_sms.assert_not_called()\n\n utils.send_messages(datetime.date(2000, 9, 11), reader, sender)\n f, msg = SAMPLE_FRIENDS[1], SAMPLE_MESSAGES[1]\n send_sms.assert_called_with(f, f.extra_data[\"phone\"], msg)\n\n with unittest.mock.patch.object(PrintSMSSender, \"send_sms\") as send_sms:\n with self.assertRaises(Exception):\n utils.send_messages(datetime.date(2000, 10, 8), reader, sender)\n send_sms.assert_not_called()\n\n with unittest.mock.patch.object(PrintSender, \"send_email\") as send_email:\n utils.send_messages(datetime.date(1900, 9, 11), reader, sender)\n send_email.assert_not_called()", "title": "" }, { "docid": "ccc77f0e1ee5bd0a86fd111e8c33b8e3", "score": "0.64992535", "text": "def send_message(self, message):\n pass", "title": "" }, { "docid": "ed5db0f53adce5073aa44af9e7daa8a7", "score": "0.6490204", "text": "def send_sms():\n\n # Extract the form values:\n to_number = request.form['to_number']\n message = request.form['message']\n\n # Send the SMS message:\n result = nexmo_client.send_message({\n 'from': NEXMO_NUMBER,\n 'to': to_number,\n 'text': message,\n })\n\n # Set a message for the user to see on the next view:\n err = extract_error(result)\n if err is not None:\n flash(\"There was a problem sending your message: \" + err, 'error')\n else:\n flash(\"You just sent a message to \" + to_number)\n\n # Redirect the user back to the form:\n return redirect(url_for('index'))", "title": "" }, { "docid": "6f6cf280f0b1a0717ff642ec89332dd7", "score": "0.6488142", "text": "def send_mobileconfirmation_sms(code):\n log_info('###send_mobileconfirmation_sms###')\n user = User.query.filter_by(id=current_user.id).first()\n user.mobileConfirmationCode = code\n user.mobileConfirmationCodeDT = datetime.now()\n user.mobileConfirmed = False\n user.mobileConfirmedDT = None\n db.session.commit()\n sms_message = render_template('administration/sms_templates/sms_mobile_confirmation.html', verification_code=code)\n smsfrom = 'Ganimides'\n log_variable('message', sms_message)\n #result = send_sms(user.mobile,smsfrom,sms_message)\n subject = \"please confirm your mobile\"\n result = send_email(user.email, subject, sms_message)\n log_variable('result',result)\n return(result)", "title": "" }, { "docid": "7c2e8f0217c1b48e53424c6ae899d42c", "score": "0.6480138", "text": "def incoming_anv_sms():\n sms_from = request.args.get('from', '')\n sms_to = request.args.get('to', '')\n sms_txt = request.args.get('message', '')\n address = sms.email_lookup(sms_to) # Anveo numbers do not send the +\n\n if address:\n return str(sms.send_email(to_email=address, txt_from=sms_from, txt_body=sms_txt, txt_to=sms_to))\n else:\n return \"invalid number\"", "title": "" }, { "docid": "d32e41512c022a7bc770b542c0bc2ab4", "score": "0.64628315", "text": "def send_msg(self, msg):\n\n # implemented in sub classes", "title": "" }, { "docid": "d96367d47a0620be03c0d22de7814d35", "score": "0.6438378", "text": "def sendMsg(self ):\n encrypted_msg = 'dgjzZcuwYVvgiMtBlzoa8RS7edxfMniMPR2naJakzDo6jfQKGGbzEee6ENKT4qW8o95BhdaLX1yonQuqKImGAJv9fdeyZEvjlfzrT5S4g3I='\n if self._m_user_id >= MIN_FROM_ID and self._m_user_id < MAX_FROM_ID:\n to_user_id = self._m_user_id + 5000 - 1\n log.debug(\"In sendMsg, from {} -> {}\".format(self._m_user_id, to_user_id))\n pdu_msg = ClientConnReq._MsgData(self._m_user_id, to_user_id, encrypted_msg )\n self._socket.send(pdu_msg)\n elif self._m_user_id >= MIN_TO_ID and self._m_user_id < MAX_TO_ID:\n pass", "title": "" }, { "docid": "5afc795b9fcdeaf3c508babb2bfb796a", "score": "0.64379925", "text": "def send_sms(body, from_phone, to, flash=False, fail_silently=False,\r\n auth_user=None, auth_password=None, connection=None):\r\n from sendsms.message import SmsMessage\r\n connection = connection or get_connection(\r\n username = auth_user, \r\n password = auth_password,\r\n fail_silently = fail_silently\r\n )\r\n return SmsMessage(body=body, from_phone=from_phone, to=to, \\\r\n flash=flash, connection=connection).send()", "title": "" }, { "docid": "0923c0f91f34b6d32bb1f87c63f11240", "score": "0.6432759", "text": "def send_messages(self, messages):\r\n count = 0\r\n for message in messages:\r\n message_body = unicodedata.normalize('NFKD', unicode(message.body)).encode('ascii', 'ignore')\r\n for tel_number in message.to:\r\n try:\r\n self.client.send(tel_number, message_body, getattr(settings, 'SMS_SLUZBA_API_USE_POST', True))\r\n except Exception:\r\n if self.fail_silently:\r\n log.exception('Error while sending sms via sms.sluzba.cz backend API.')\r\n else:\r\n raise\r\n else:\r\n count += 1\r\n\r\n return count", "title": "" }, { "docid": "ee2d1d7f41e6f8c5f643f6ded16147ac", "score": "0.6400599", "text": "def post_send_message(self, msg):\r\n pass", "title": "" }, { "docid": "bacc054dce12f26c682bae445c748f16", "score": "0.6359422", "text": "def test_send_message(self):\n pass", "title": "" }, { "docid": "9bd7e0b394350ce160e63c734b2d682f", "score": "0.63553065", "text": "def send_sms_promotional(request):\n client = boto3.client(\"sns\", aws_access_key_id=AWS_ACCESS_KEY_ID,\n aws_secret_access_key=AWS_SECRET_ACCESS_KEY,\n region_name=AWS_REGION)\n # client.setSMSAttributes(\n # {\n # attributes : {\n # DefaultSMSType : \"Transactional\"\n # }\n # },\n # function(error){\n # if(error){\n # console.log(error);\n # }\n # }\n # );\n # input_jsons = dict(mobile_numbers=[\n # '+917406135629',\n # '+919741601203',\n # '+917795123525',\n # '+918939099619',\n # '+919606230339',\n # '+918909308092',\n # '+919741292046',\n # '+919686064664'\n # ], message=\"Hello folks, welcome to www.genericbackend.com\")\n phone_number = request['phone_number']\n input_json = dict(mobile_numbers=[phone_number], message=f\"Hello folks, welcome to www.genericbackend.com, \"\n f\"The One Time Password (OTP) for registering to \"\n f\"genericbackend.com is {request['text']}\")\n some_list_of_contacts = input_json['mobile_numbers']\n\n # Add SMS Subscribers\n for number in some_list_of_contacts:\n client.subscribe(\n TopicArn=AWS_ARN,\n Protocol='sms',\n Endpoint=number # <-- number who'll receive an SMS message.\n )\n response = client.publish(\n TopicArn=AWS_ARN,\n Message=input_json[\"message\"],\n MessageAttributes={'string': {'DataType': 'String', 'StringValue': 'String', },\n 'AWS.SNS.SMS.SenderID': {'DataType': 'String', 'StringValue': \"abk\"}}\n )\n if response[\"ResponseMetadata\"][\"HTTPStatusCode\"] == 200:\n output_json = {\"Status\": \"Success\", \"Message\": \"SMS has been sent\"}\n\n else:\n output_json = {\"Status\": \"Failure\", \"Message\": \"Something Went Wrong\"}\n return output_json", "title": "" }, { "docid": "0df87e7afe58015e8346192777bb1e51", "score": "0.6274136", "text": "def send_msg(self, msg):\n return self.sendall(Message(msg))", "title": "" }, { "docid": "2bb57875ddf03c493cd820e3affc3c50", "score": "0.6248984", "text": "def send_test_message(numObj):\n to_number = numObj.phone_number\n \n from_number= settings.TWILIO_TEST_NUMBER\n account_sid= settings.TWILIO_TEST_SID\n auth_token = settings.TWILIO_TEST_TOKEN\n \n out = send_text(\"Test.\", to_number, from_number, account_sid, auth_token)\n return(out)", "title": "" }, { "docid": "6725f9be61d53b4d3c7af748bff2f51d", "score": "0.62479943", "text": "def at_msg_send(self, text=None, to_obj=None, **kwargs):\r\n pass", "title": "" }, { "docid": "b8543f44dc92e0ade20b282382f36873", "score": "0.62401277", "text": "def message_sent(self, message):\n pass", "title": "" }, { "docid": "1079ea0aa65f149904bd5e24f9c9306b", "score": "0.6227844", "text": "def send_messages(self):\n\n messages = SendReminder.objects.filter(status='queued')[:50]\n self.info('found {0} reminder(s) to send'.format(messages.count()))\n for message in messages:\n connection = message.recipient.default_connection\n template = u'{reminder} {content}'.format(\n reminder=self.reminder,\n content=message.message or \"\"\n )\n if len(template) > 160:\n # Truncate at 160 characters but keeping whole words\n template = template[:160]\n words = template.split(' ')[:-1]\n template = u' '.join(words) + u'...'\n msg = OutgoingMessage(connection=connection, template=template)\n success = True\n try:\n self.router.outgoing(msg)\n except Exception, e:\n self.exception(e)\n success = False\n if success and msg.sent:\n self.debug('message sent successfully')\n message.status = 'sent'\n message.date_sent = datetime.datetime.now()\n else:\n self.debug('message failed to send')\n message.status = 'error'\n message.save()", "title": "" }, { "docid": "c4580b24534477452f02e60bbfa75285", "score": "0.62187266", "text": "def to_send(self):\n pass", "title": "" }, { "docid": "2580fe3f20a692c1d91505033f6cef2b", "score": "0.62180763", "text": "def do_POST(self):\r\n\r\n # Get a timestamp for the request\r\n timestamp = time.strftime('%d/%m/%y %H:%M:%S')\r\n\r\n # Redirect any POST request that does not match the current URL\r\n if not self.path == '/send_message':\r\n self.send_response(303)\r\n self.send_header('Location', '/sms_sender.html')\r\n self.end_headers()\r\n return\r\n\r\n # Get the length of the POST content\r\n content_length = int(self.headers.getheader('content-length'))\r\n\r\n # If there is POST content then process it\r\n if content_length:\r\n\r\n # Get the form data\r\n post_data = cgi.parse_qs(self.rfile.read(content_length))\r\n try:\r\n recipients = post_data['recipients'][0]\r\n message = post_data['message'][0]\r\n except KeyError, IndexError:\r\n self.serve_message(400, 'Error', 'The message request was missing either recipient or message data.')\r\n return\r\n\r\n # Check if message class is defined, if not default to '1'\r\n # Class 0 = Message is displayed but not stored on phone\r\n # Class 1 = Store the message on the phone\r\n # Class 2 = Store the message on the SIM card\r\n try:\r\n msg_class = post_data['class'][0]\r\n except KeyError, IndexError:\r\n msg_class = '1'\r\n\r\n # Validate the recipient data\r\n for char in recipients:\r\n if char not in ';+0123456789 \\t':\r\n self.serve_message(400, 'Error', 'The recipient data contains an invalid character (\"%s\").' % char)\r\n return\r\n\r\n # Validate the SMS class data\r\n try:\r\n msg_class = int(msg_class)\r\n except ValueError:\r\n self.serve_message(400, 'Error', 'The given SMS class is invalid.')\r\n\r\n if not 0 <= msg_class <= 2:\r\n self.serve_message(400, 'Error', 'The SMS class can only be either 0, 1 or 2.')\r\n return\r\n\r\n # Get a list of recipients from the recipient string\r\n recipient_list = [x.strip() for x in recipients.split(';') if x]\r\n\r\n for recipient in recipient_list:\r\n self.server.message_received({'timestamp': timestamp,\r\n 'recipient': recipient,\r\n 'class': msg_class,\r\n 'message': message,\r\n 'sender_ip': self.client_address[0]})\r\n\r\n self.serve_message(200, 'Message(s) Queued', 'Your message(s) have been added to the queue to be sent.')", "title": "" }, { "docid": "c457691f1bbcc47dc359933a458f8136", "score": "0.6215273", "text": "def create_sms(self, request, queryset):\n for verification_obj in queryset:\n self.send_code(request, verification_obj, PhoneVerification.VERIFICATION_METHOD_CHOICES.sms)", "title": "" }, { "docid": "e94fb647386dfc5205659c5f487c35b1", "score": "0.62148833", "text": "def send_sms_message(recipient, message, mtype='text'):\n params = {\n 'api_key': settings.NEXMO_USERNAME,\n 'api_secret': settings.NEXMO_PASSWORD,\n 'type': mtype,\n 'from': getSourceNumber(),\n 'to': recipient,\n 'text': message.encode('utf-8'),\n }\n sms = NexmoMessage(params)\n response = sms.send_request()\n return response", "title": "" }, { "docid": "701b175f4e726ee4b5f14e9c7bd91e34", "score": "0.62095726", "text": "def send_messages(self, messages):\n pass", "title": "" }, { "docid": "f3984407ee01ea9ffd54b608a0d7f981", "score": "0.62094337", "text": "def send_verification(self, number, otp):\n message = self._generate_message(otp)\n\n self.backend.send_sms(number, message)", "title": "" }, { "docid": "cb6d455a04dd347dbb10181e9c665702", "score": "0.6183902", "text": "def incoming_sms():\n global data\n # Get the message the user sent our Twilio number\n body = request.values.get('Body', None).lower()\n # Get the number the request was sent from\n from_number = request.form['From'].lower()\n\n response = game_logic.determine_response(data, from_number, body)\n # Text back the response\n if response is not None:\n resp = MessagingResponse()\n resp.message(response)\n return str(resp)\n return \"Tbd\"", "title": "" }, { "docid": "1cc5eb12a8d6afe8f07ad19debecc2e0", "score": "0.61696696", "text": "def send_test(to):", "title": "" }, { "docid": "45993d4dc767abcc3ad56a5edd4bbe84", "score": "0.61664146", "text": "def send_messages(self, sms_messages):\r\n if not sms_messages:\r\n return\r\n \r\n num_sent = 0\r\n for message in sms_messages:\r\n if self._send(message):\r\n num_sent += 1\r\n return num_sent", "title": "" }, { "docid": "da339a3f032f8da1e1427eb78c9ecca3", "score": "0.6164981", "text": "def sms_reply():\r\n # Fetch the message\r\n msg = request.form.get('Body')\r\n phone_no = request.form.get('From')\r\n\r\n print(phone_no , \" => \" , msg.lower() )\r\n client = Client(account_sid, auth_token)\r\n \r\n \r\n if msg.lower() == \"hi cubestop\" :\r\n \r\n media_url = ('https://he-s3.s3.amazonaws.com/media/uploads/0ca15d3.png')\r\n \r\n msgBody = \" Hi Sagar , Welcome to cubestop 1-stop Destination for all your need \\n \\n Visit : https://www.cubestop.com \"\r\n resp = MessagingResponse()\r\n mymsg = resp.message(msgBody)\r\n mymsg.media(media_url)\r\n\r\n return str(resp)\r\n elif msg.lower() == \"find nearest restaurant\":\r\n \r\n print( \"String Case matched !!\") \r\n \r\n msgBody = \" Plz Click on the link to share your Location 📍 with Cube Stop . \\n \\n Visit : http://751648d76e73.ngrok.io/share \\n \\n OR \\n \\n Share your thorugh whatsapp itself !! 😊 \"\r\n resp = MessagingResponse()\r\n mymsg = resp.message(msgBody)\r\n\r\n return str(resp)\r\n\r\n elif msg.lower() == \"done\":\r\n \r\n print( \"String Case matched = Done!!\") \r\n \r\n media_url = ('https://i.pinimg.com/736x/6a/fc/63/6afc63bf06c99262ad7efa4683f956b1.jpg')\r\n \r\n msgBody = \" Hi Sagar , \\n \\n We found the nearest cubestop to you . \\n \\n Click below link to Navigate : https://goo.gl/maps/vVwTX6p95fkpJFNp6 \"\r\n resp = MessagingResponse()\r\n mymsg = resp.message(msgBody)\r\n mymsg.media(media_url)\r\n\r\n return str(resp)\r\n\r\n elif msg.lower() == \"great service\":\r\n \r\n print( \"String Case matched = great service !!\") \r\n \r\n happy_url = ('https://www.futurenetzero.com/wp-content/uploads/2014/06/Smiley-face-emoticon-575.jpg')\r\n \r\n msgBody = \" Thanks for the FeedBack . \\n \\n We are happy to Help you . \\n \\n If you would like to recommed someone to visit cubestop just tell us their number and you will get a Free Service next Time !! \"\r\n resp = MessagingResponse()\r\n mymsg = resp.message(msgBody)\r\n mymsg.media(happy_url)\r\n\r\n return str(resp)\r\n\r\n\r\n else:\r\n reply = fetch_reply(msg, phone_no)\r\n resp = MessagingResponse()\r\n # Create reply\r\n resp.message(reply)\r\n return str(resp)", "title": "" }, { "docid": "9c90bfd352d3aa81063676cf1fb7acc5", "score": "0.61614615", "text": "def incoming_sms():\n number = request.values.get('From', None)\n body = request.values.get('Body', None)\n print(body)\n # Start our TwiML response\n resp = MessagingResponse()\n if body is None:\n resp.message(\"Invalid: Enter your name, class, and session# separated by spaces as shown (one student at a time). Examples:\\nAvi Patel grade1 session1\\nRavi Rao PreK session1\\nMira Singh KG session2\")\n return str(resp)\n body = body.lower()\n body = body.strip()\n body_arr = body.split()\n class_name = \"\"\n name = \"\"\n if len(body_arr) == 4:\n first_name = body_arr[0]\n last_name = body_arr[1]\n name = first_name + \" \" + last_name\n class_name = body_arr[2] + body_arr[3]\n elif len(body_arr) == 6:\n first_name = body_arr[0]\n last_name = body_arr[1]\n name = first_name + \" \" + last_name\n class_name = body_arr[2] + body_arr[3] + body_arr[4] + body_arr[5]\n else:\n resp.message(\"Invalid: Enter your name, class, and session# separated by spaces as shown (one student at a time). Examples:\\nAvi Patel grade1 session1\\nRavi Rao PreK session1\\nMira Singh KG session2\")\n return str(resp)\n\n # forward_message(class_name, number, name)\n\n return str(resp)", "title": "" }, { "docid": "9c94bb9a4949edeff0ab6549d6c786b9", "score": "0.6160334", "text": "def send(self, msg):\n self.sendLine(msg)", "title": "" }, { "docid": "9a8163d55682c78ce4d0da075a3491ef", "score": "0.615045", "text": "def __send(self, tosend, source):\n for line in tosend.splitlines(True):\n if self.delay:\n debug('ReplyHandler.send: delay answer for %d ms', self.delay)\n sleep(self.delay / 1000.0)\n debug('ReplyHandler.send(%s): %r', source, line)\n self.request.sendall(line.encode('utf-8'))", "title": "" }, { "docid": "2119bd67cadd1aabe93e25bd038dd1d8", "score": "0.61460364", "text": "def sms_reply():\n body = request.values.get('Body', None)\n app.logger.debug(\"sms received: {}\".format(body))\n Qqueue.enqueue(body)\n Qqueue.save()\n \n resp = MessagingResponse()\n resp.message(\"Your message has been received.\")\n\n return str(resp)", "title": "" }, { "docid": "071ffa79d52ead6598c625a3e24cfc89", "score": "0.613295", "text": "def send(self, txrecords):\n # txrecords is a list of TransmissionLog IDs\n txrecords = TransmissionLog.objects.filter(pk__in=txrecords)\n message = txrecords[0].message\n notification_id = txrecords[0].notification_id\n self.LAST_MESSAGE = message\n self.LAST_NOTIFICATION_ID = notification_id\n \n # allows for testing of failed gateway send\n send_ok = False if message == 'FAIL MESSAGE' else True\n \n for tx in txrecords:\n tx.enqueued = False \n tx.send_ok = send_ok\n tx.gateway_response = self.GATEWAY_MID\n tx.save()\n \n if not send_ok: \n raise squawk.GatewayFailError(\"Send failed\", notification_id = notification_id)", "title": "" }, { "docid": "0881c91542efb47932c4489e26b14a1b", "score": "0.611443", "text": "def send(data):\r\n\t\tpass", "title": "" }, { "docid": "e34792ef807449002a12889c43053d36", "score": "0.6102822", "text": "def _emit(self):\n print \"emitting \", self.conf['id']\n if self.conf['repeat'] in _done_action_types:\n self.eventObj.update({\"action\": \"Done\"})\n else:\n self.eventObj.update({\"action\": _correct_out_time(datetime.now()).strftime(_format)})\n\n event = {\n \"type\": \"send_sms\",\n \"data\": self.eventObj\n }\n\n if self.conf['repeat'] == 'Fortnightly':\n print \"Additional emit condition\"\n self.trigger = DateTrigger(datetime.now() + timedelta(days=14))\n self._schedule(create_cancel=False)\n\n if self.conf['repeat'] not in ['Once', 'Immediately']:\n self._schedule_data_download()\n\n dispatcher.send(signal=SIG, event=event, sender=self)", "title": "" }, { "docid": "ab56f0f2ab6acc9449c70822b72a8522", "score": "0.61000264", "text": "def send_message(self,to,message):\n # we make sure to put a newline on the end so the client receives the\n # message on its own line\n\t\tself._attempt_send(to,message+\"\\n\\r\")", "title": "" }, { "docid": "9b049ac7ee3e13f290e4aa70cea3b582", "score": "0.6096604", "text": "def send_sms(self, recipient, text, max_messages = 255): \n pdus = gsmpdu.get_outbound_pdus(text, recipient)\n if len(pdus) > max_messages:\n raise ValueError(\n 'Max_message is %d and text requires %d messages' %\n (max_messages, len(pdus))\n )\n\n for pdu in pdus:\n self._send_pdu(pdu)\n return True", "title": "" }, { "docid": "40900a6d7053e90c442c367ce1c45b73", "score": "0.6090905", "text": "def send_sms(self, text, **kwargs):\n\n params = {\n 'user': self._user,\n 'pass': self._password,\n 'msg': text\n }\n\n response = requests.get(self.BASE_URL, params=params, **kwargs)\n return FreeResponse(response)", "title": "" }, { "docid": "17529dd3ba6d2249176130ef20c8b8d1", "score": "0.60863715", "text": "def send(self, msg):\n from ututi.lib.messaging import EmailMessage, SMSMessage\n if isinstance(msg, EmailMessage):\n email = self.email\n if email.confirmed or msg.force:\n msg.send(email.email)\n else:\n log.info(\"Could not send message to unconfirmed email %(email)s\" % dict(email=email.email))\n elif isinstance(msg, SMSMessage):\n if self.phone_number is not None and (self.phone_confirmed or msg.force):\n msg.recipient=self\n msg.send(self.phone_number)\n else:\n log.info(\"Could not send message to uncofirmed phone number %(num)s\" % dict(num=self.phone_number))", "title": "" }, { "docid": "cabf33e382a6d7362fa1f83076c4a7c6", "score": "0.60832", "text": "def fire_sms_event(sms):\n data = {\n ATTR_HOST: modem_data.host,\n ATTR_SMS_ID: sms.id,\n ATTR_FROM: sms.sender,\n ATTR_MESSAGE: sms.message,\n }\n hass.bus.async_fire(EVENT_SMS, data)", "title": "" }, { "docid": "6d64f45e99c85e05da8c665ce6d0315e", "score": "0.6079566", "text": "def send(self, msg):\n return self.client.send(msg)", "title": "" }, { "docid": "910aaab9f50e67d0bd5a88c0dfbf14e5", "score": "0.6074344", "text": "def smsCanSend(self, uid, callback):\n j = Json().put(u\"uid\", uid)\n self.callMethodRetInteger(u\"sms.canSend\", j.getJavaScriptObject(), callback)", "title": "" }, { "docid": "61b1e03cb23e1a72b56b2ed212176dad", "score": "0.60611385", "text": "def on_send(self, text):\n pass", "title": "" }, { "docid": "06db7117db6e02363f48619c6c4bd98b", "score": "0.60578454", "text": "def _send(self):\n # Set data as appropriate\n if self.conversation:\n linecount = 1\n for line in reversed(self.conversation):\n linecount += 1\n self.data['vText' + str(linecount)] = line\n if linecount == 8:\n break\n\n # Generate the token\n enc_data = urlencode(self.data)\n digest_txt = enc_data[9:35]\n token = hashlib.md5(digest_txt.encode('utf-8')).hexdigest()\n self.data['icognocheck'] = token\n\n # POST the data to Cleverbot and return\n return self.session.post(self.SERVICE_URL,\n data=self.data,\n headers=self.headers)", "title": "" }, { "docid": "6ff6a1414f0e447719071aad331f9280", "score": "0.6055317", "text": "def _send_text(self) -> None:\n text_message = self._dq.pop_left()\n\n # ------------------\n # In development: send a text\n # ------------------\n # # send text and store the returned MessageInstance\n # twilio_message = send_text(text_message)\n #\n # LOGGER.info(\"text sent: {}\".format(text_message))\n # LOGGER.info(\"sid: {}\".format(twilio_message.sid))\n\n LOGGER.info(text_message)\n\n self.broker.put(ReceiptPacket(text_message.text, datetime.datetime.now(), text_message.id))", "title": "" }, { "docid": "3c3f4eb5b0d600973eaec598d8108a22", "score": "0.603847", "text": "def send(self,data):\n self.s.sendall(data.encode())", "title": "" }, { "docid": "eb161df4d4a489774277f948e8ed8527", "score": "0.602129", "text": "def test_send(self):\n msg_flag = self.instance.send(self.msg_short)\n assert(msg_flag)\n msg_flag, msg_recv = self.driver.recv(self.timeout)\n assert(msg_flag)\n nt.assert_equal(msg_recv, self.msg_short)", "title": "" }, { "docid": "0484a99543f615a96817efe51596699e", "score": "0.6010134", "text": "def send_message(phone_no='', message_body=''):\n client = Client(TWILIO_DETAILS['account_sid'], TWILIO_DETAILS['auth_token'])\n\n # Phone number given to you by Twilio\n # all message will come from this number\n message_body = '\\n' + message_body\n client.messages.create(to=phone_no, from_=TWILIO_DETAILS['number'], body=message_body)", "title": "" }, { "docid": "29b3664fc3f659491b98ba4946f6adef", "score": "0.6007218", "text": "def _msg_send(self, adapter: Bus, logger: SizedRotatingLogger) -> None:\n while not self.send_queue.empty():\n message: Message = self.send_queue.get()\n adapter.send(message)\n message.is_rx = False # to have it correctly in the logger\n logger(message)", "title": "" }, { "docid": "4e51a2d648c81cc552cb94b7fc239d96", "score": "0.6005658", "text": "def send_text(self, num, message, test=False):\n if test:\n print(\"Sending a text message to number '{0:s}'. The message reads as follows\\n{1:s}\".format(num,message))\n else:\n self.client.messages.create(body=message, from_=self.number, to=num)", "title": "" }, { "docid": "dd5d81bc54a1af0d9121829301fbd6a7", "score": "0.5995788", "text": "def send_sms(contact, content):\n account_sid = os.environ[\"TWILIO_SID\"]\n auth_token = os.environ[\"TWILIO_TOKEN\"]\n twilio_number = os.environ[\"TWILIO_NUMBER\"]\n client = TwilioRestClient(account_sid, auth_token)\n client.messages.create(\n to='+1' + contact.phone,\n from_=twilio_number,\n body=content\n )", "title": "" }, { "docid": "74534312db9c275958120ec4596d1f76", "score": "0.5988096", "text": "def _send(self, *args, **kwargs):", "title": "" }, { "docid": "7f47feb8caa36df12f45843697c7e4b2", "score": "0.59877837", "text": "def test_send_csr_message(self):\n deviceid = 54321\n data = 'Hello, Hologram!'\n resp = self.client.csr.send_message(deviceid, data)\n self.assertTrue(resp.get('success'))", "title": "" }, { "docid": "985e1d5103f35aa5cb3607d7709d6a05", "score": "0.59871304", "text": "def send_messages(self, messages):\r\n raise NotImplementedError", "title": "" }, { "docid": "338bffdfd2ba1ba2a4a3e8a65b109cef", "score": "0.5980607", "text": "def send(self, msg):\n print(\"[OUT]\"+str(msg))\n self.socket.sendall(bytes((str(msg)+'\\0').encode(\"utf-8\")))", "title": "" }, { "docid": "577001f493484872d17271fb732d6b28", "score": "0.5979784", "text": "def sendMessage(self, message, sender):\n info = {\"msg\":message, \"sender\":sender}\n msg = json.dumps(info).encode(\"utf-8\")\n try: \n self.conn.send(msg)\n except:\n print(\"That user is not getting massage\")", "title": "" }, { "docid": "566e94616e3a067af475f18969cc1b0b", "score": "0.59787863", "text": "def sms_reply():\n\n # Use this data in your application logic\n for key in request.form.keys():\n print(key)\n time.sleep(1)\n print(request.form[key])\n print(\"\")\n for key in request.values.keys():\n print(key)\n time.sleep(1)\n print(request.values[key])\n print(\"\")\n \n\n # Start our TwiML response\n resp = MessagingResponse()\n\n # Add a message\n resp.message(\"Did this work?\")\n\n return str(resp)", "title": "" }, { "docid": "92689dec76541305a138fcce46104456", "score": "0.59781194", "text": "def send(self):\n raise NotImplementedError", "title": "" }, { "docid": "64f9542b2c45ba2f40bd8d97d2cea221", "score": "0.59726256", "text": "def send_to_calculate():\n data_to_send = request.get_json()\n app.logger.debug(str(data_to_send))\n mq = rabbitmqclient.RabbitMQ()\n mq.new_task(str(data_to_send))\n\n return str(data_to_send), \"sent\\n\"", "title": "" }, { "docid": "266f95fea86b971417605660ee9cf8b4", "score": "0.5972053", "text": "def send(self, buf):", "title": "" }, { "docid": "d8fbc1a6ddf1938f0785f19dac5346f9", "score": "0.59629893", "text": "def send_sms(self, recipient, msg):\n\n # User may not have configured twilio - don't initialize it until it's\n # first used\n if self.twilio_client is None:\n self.logger.info(\"Initializing Twilio\")\n\n if cfg.TWILIO_ACCOUNT == '' or cfg.TWILIO_TOKEN == '':\n self.logger.error(\"Twilio account or token not specified - unable to send SMS!\")\n else:\n self.twilio_client = TwilioRestClient(cfg.TWILIO_ACCOUNT, cfg.TWILIO_TOKEN)\n\n if self.twilio_client != None:\n self.logger.info(\"Sending SMS to %s: %s\", recipient, msg)\n try:\n self.twilio_client.sms.messages.create(\n to=recipient,\n from_=cfg.TWILIO_PHONE_NUMBER,\n body=truncate(msg, 140))\n except TwilioRestException as ex:\n self.logger.error(\"Unable to send SMS: %s\", ex)\n except httplib2.ServerNotFoundError as ex:\n self.logger.error(\"Unable to send SMS - internet connectivity issues: %s\", ex)\n except:\n self.logger.error(\"Exception sending SMS: %s\", sys.exc_info()[0])", "title": "" }, { "docid": "178fbbf817040b651a100a44d77bd410", "score": "0.5962635", "text": "def onSend(self):\r\n #resolve unicode encoding for text parts;\r\n bodytextEncoding, attachesEncodings = self.resolveUnicodeEncodings()\r\n\r\n # get components from GUI\r\n fieldvalues = [entry.get() for entry in self.hdrFields]\r\n From, To, Cc, Subj = fieldvalues[:4]\r\n extraHdrs = [('Cc', Cc), ('X-Mailer', appname + '(Python)')]\r\n extraHdrs += list(zip(self.userHdrs, fieldvalues[4:]))\r\n bodytext = self.editor.getAllText()\r\n\r\n # split multiple reciepent lists on ',', fix empty fields\r\n Tos = self.splitAddresses(To)\r\n for (ix, (name, value)) in enumerate(extraHdrs):\r\n if value:\r\n if value == '?':\r\n extraHdrs[ix] = (name, '')\r\n elif name.lower() in ['cc', 'bcc']:\r\n extraHdrs[ix] = (name, self.splitAddresses(value))\r\n\r\n # withdraw to disallow send duriing send\r\n self.withdraw()\r\n self.getPassword()\r\n popup = popuputil.BusyBoxNowait(appname, 'Sending message')\r\n sendingBusy.incr()\r\n threadtools.startThread(\r\n action = self.sendMessage,\r\n args = (From, Tos, Subj, extraHdrs, bodytext, self.attaches, saveMailSeparator, bodytextEncoding, attachesEncodings),\r\n context = (popup,),\r\n onExit = self.onSendExit,\r\n onFail = self.onSendFail\r\n )", "title": "" }, { "docid": "ca30487e5e0cbf28c1418ceb7aed3027", "score": "0.5957321", "text": "def send_cmd(self):\n pass", "title": "" }, { "docid": "a6e6d02e71d431fc6dda9117fd90138b", "score": "0.59546053", "text": "def send(self, stanza):\n pass", "title": "" }, { "docid": "70c32b793e8e733866597cd44d63c79e", "score": "0.5948438", "text": "def send(self, message: str) -> None:\n\n pass", "title": "" }, { "docid": "157150c0b365d466388936184c9ebe8d", "score": "0.5946702", "text": "async def send_message(self) -> bool:\r\n\r\n TELEGRAM_SEND_MESSAGE_URL = await telegram_sender(self.token_bot,\r\n self.chat_id,\r\n self.outgoing_message_text)\r\n\r\n res = await requests.post(TELEGRAM_SEND_MESSAGE_URL)\r\n return True if res.status_code == 200 else False", "title": "" }, { "docid": "cfe90333a5b8b0f02a71f798fc5e8ed9", "score": "0.5941839", "text": "def sms_reply():\n # Fetch the message\n msg = request.form.get('Body')\n remitente = request.form.get('From')\n\n # todo los siguientes dos no me funcionan\n fecha = request.form.get('date_created')\n msgID = request.form.get('sid')\n\n\n # Create reply\n resp = MessagingResponse()\n respuesta = \"Hola {}, como estas! Tu mensage es: {} y lo mandaste el: {}. El id del msg es: {}\".format(remitente, msg, fecha, msgID)\n resp.message(respuesta)\n\n return str(resp)", "title": "" }, { "docid": "3cde0f3ede5ee0679497aa506f5749d1", "score": "0.5931106", "text": "def test_send_dm(self):\n self.mock_sc.chat_postMessage = mock.MagicMock(return_value=OK_RESP)\n\n self.bot.send_dm(\"Hahahaha\", \"UD8UCTN05\")\n self.mock_sc.chat_postMessage.assert_called_with(\n text=\"Hahahaha\",\n channel=\"UD8UCTN05\",\n as_user=True\n )", "title": "" }, { "docid": "1e145a5329098d27307ed985b42436f9", "score": "0.592634", "text": "def send(self, message_obj):\n\n tx_buff = self._get_tx_buffer() # info = addr.\n if tx_buff is None:\n print(\"No transmit buffer available to send\")\n return False\n\n #print(\"\\t\\tTX Buffer:\", tx_buff)\n\n return self._write_message(tx_buff, message_obj)", "title": "" }, { "docid": "500fe964b63642f9ad665993353248b4", "score": "0.5925051", "text": "def _send(self, message):\n response = requests.post(\n self.send_url,\n message,\n headers={'Content-Type': 'text/plain'})\n return response.status_code in (200, 204)", "title": "" }, { "docid": "500fe964b63642f9ad665993353248b4", "score": "0.5925051", "text": "def _send(self, message):\n response = requests.post(\n self.send_url,\n message,\n headers={'Content-Type': 'text/plain'})\n return response.status_code in (200, 204)", "title": "" }, { "docid": "facd911aafd6bd230ea2025e6655cf74", "score": "0.5918287", "text": "def at_message_send(self, message, to_object):\r\n pass", "title": "" } ]
4af6ee9402e059ed8fad043e8af712a8
Safely JSONencode an object. To protect against XSS attacks, HTML special characters (, &) and unicode newlines are replaced by escaped unicode characters. Django does not escape these characters by default. Output of this method is not marked as HTML safe. If you use it inside an HTML
[ { "docid": "9e7a27980d8bc0da5b37ef78e5665b10", "score": "0.7651096", "text": "def safe_json(data):\n unsafe_chars = {\n \"&\": \"\\\\u0026\",\n \"<\": \"\\\\u003c\",\n \">\": \"\\\\u003e\",\n \"\\u2028\": \"\\\\u2028\",\n \"\\u2029\": \"\\\\u2029\",\n }\n json_str = json_.dumps(data, cls=DjangoErrorJSONEncoder)\n\n for (c, d) in unsafe_chars.items():\n json_str = json_str.replace(c, d)\n\n return json_str", "title": "" } ]
[ { "docid": "d1642861764950e40025644edb7a72f9", "score": "0.75244474", "text": "def json_encode(obj):\r\n return to_unicode(_json_encode(obj))", "title": "" }, { "docid": "9e1e4e8590df6c0ca5534c6946317323", "score": "0.71535826", "text": "def _safe_str(obj):\n try:\n return str(obj)\n except UnicodeEncodeError:\n # obj is unicode\n return unicode(obj).encode('unicode_escape')", "title": "" }, { "docid": "6f98d7e94d15549d20e34366d240bfe1", "score": "0.69944006", "text": "def safe_str(obj):\r\n try:\r\n return str(obj)\r\n except UnicodeEncodeError:\r\n # obj is unicode\r\n return unicode(obj).encode('unicode_escape')", "title": "" }, { "docid": "e23dcf94f90b0e796dbcef975b02882e", "score": "0.69850713", "text": "def safe_str(obj):\n try:\n return str(obj)\n except UnicodeEncodeError:\n # obj is unicode\n return unicode(obj).encode('unicode_escape')", "title": "" }, { "docid": "e23dcf94f90b0e796dbcef975b02882e", "score": "0.69850713", "text": "def safe_str(obj):\n try:\n return str(obj)\n except UnicodeEncodeError:\n # obj is unicode\n return unicode(obj).encode('unicode_escape')", "title": "" }, { "docid": "f3f0ecf55da803f762229a78d9f3f9d9", "score": "0.6926217", "text": "def json(value):\n uncleaned = jsonlib.dumps(value)\n clean = bleach.clean(uncleaned)\n # Small replacements\n clean = clean.replace('&amp;', '&')\n clean = clean.replace('&lt;', '<')\n return mark_safe(clean)", "title": "" }, { "docid": "e0209d01a93a032004569dfa716f150d", "score": "0.68114585", "text": "def encode(obj):\n if _CJSON:\n return cjson.encode(obj)\n else:\n return json.dumps(obj)", "title": "" }, { "docid": "22de1c514336766f06840e9ca284efeb", "score": "0.679083", "text": "def json_filter(value):\n return mark_safe(json.dumps(value, cls=SafeJSONEncoder))", "title": "" }, { "docid": "f74d9d4356f3e6b3729cf0ee4a2ddbd8", "score": "0.6723732", "text": "def render(self, obj: object) -> str:\n return json.dumps(\n asdict(obj, dict_factory=self.dict_factory),\n cls=self.encoder,\n indent=self.indent,\n )", "title": "" }, { "docid": "0e0d6e23db4799e69a2f69a4610a9e73", "score": "0.6714277", "text": "def safe_str(obj):\n try:\n return str(obj)\n except UnicodeEncodeError:\n return obj", "title": "" }, { "docid": "381eeb79dd2b3d08c4a039438c2fa10b", "score": "0.6686007", "text": "def safe_str(obj):\n try:\n return str(obj)\n except UnicodeEncodeError:\n return unicode(obj).encode('UTF-8')", "title": "" }, { "docid": "592dea91afdcc6fd9d55157fe0456c0d", "score": "0.6655969", "text": "def json(value):\n uncleaned = jsonlib.dumps(value, iterable_as_array=True)\n clean = html.parser.unescape(bleach.clean(uncleaned))\n return mark_safe(clean)", "title": "" }, { "docid": "0f4b281c7c2e02ab3bc23b6f39feb431", "score": "0.6623755", "text": "def json_encode(value):\r\n # JSON permits but does not require forward slashes to be escaped.\r\n # This is useful when json data is emitted in a <script> tag\r\n # in HTML, as it prevents </script> tags from prematurely terminating\r\n # the javscript. Some json libraries do this escaping by default,\r\n # although python's standard library does not, so we do it here.\r\n # http://stackoverflow.com/questions/1580647/json-why-are-forward-slashes-escaped\r\n return json.dumps(value).replace(\"</\", \"<\\\\/\")", "title": "" }, { "docid": "0f4b281c7c2e02ab3bc23b6f39feb431", "score": "0.6623755", "text": "def json_encode(value):\r\n # JSON permits but does not require forward slashes to be escaped.\r\n # This is useful when json data is emitted in a <script> tag\r\n # in HTML, as it prevents </script> tags from prematurely terminating\r\n # the javscript. Some json libraries do this escaping by default,\r\n # although python's standard library does not, so we do it here.\r\n # http://stackoverflow.com/questions/1580647/json-why-are-forward-slashes-escaped\r\n return json.dumps(value).replace(\"</\", \"<\\\\/\")", "title": "" }, { "docid": "be350e29925c76c83dd3262d9a09fab3", "score": "0.6599834", "text": "def escapeJSONForScriptTag(self, s):\n return s.replace(\"<\",\"\\\\u003C\").replace(\">\",\"\\\\u003E\").replace(\"&\",\"\\\\u0026\")", "title": "" }, { "docid": "c851914bf0ac6622538d2aaa211d9a1e", "score": "0.6589596", "text": "def json_encode(value):\r\n # JSON permits but does not require forward slashes to be escaped.\r\n # This is useful when json data is emitted in a <script> tag\r\n # in HTML, as it prevents </script> tags from prematurely terminating\r\n # the javscript. Some json libraries do this escaping by default,\r\n # although python's standard library does not, so we do it here.\r\n # http://stackoverflow.com/questions/1580647/json-why-are-forward-slashes-escaped\r\n return _json_encode(recursive_unicode(value), default=_defaultjson).replace(\"</\", \"<\\\\/\")", "title": "" }, { "docid": "22642d0a2bd0f18b82a58c34e8b98c47", "score": "0.65451306", "text": "def _html_print_obj(self, obj_json, str_list: list, indent):\n if isinstance(obj_json, dict):\n self._html_print_dict(obj_json, str_list, indent)\n elif isinstance(obj_json, list):\n self._html_print_array(obj_json, str_list, indent)\n elif isinstance(obj_json, str):\n str_list.append('&quot;')\n str_list.append(html.escape(obj_json))\n str_list.append('&quot;')\n elif isinstance(obj_json, int):\n str_list.append(str(obj_json))\n elif isinstance(obj_json, bool):\n str_list.append(str(obj_json))\n elif obj_json is None:\n str_list.append('null')", "title": "" }, { "docid": "79c8d9ea1a77a0816813d4bb596e914c", "score": "0.65404683", "text": "def do_forceescape(value):\r\n if hasattr(value, '__html__'):\r\n value = value.__html__()\r\n return escape(unicode(value))", "title": "" }, { "docid": "d7972b4c978a21a270c5ac078e7282c1", "score": "0.6522463", "text": "def as_json(self, obj=None):\n json = simplejson.dumps(obj, indent=True, sort_keys=True)\n json = re.sub(\"<\",\"&lt;\",json)\n return \"<h3>Debug JSON</h3><pre class='usage'>%s</pre>\" % json", "title": "" }, { "docid": "62e46cca090f3a45962f1bbd8696e68a", "score": "0.6481337", "text": "def safe_unicode(obj, *args):\n try:\n return str(obj, *args)\n except UnicodeDecodeError:\n # obj is byte string\n ascii_text = str(obj).encode('string_escape')\n return str(ascii_text)", "title": "" }, { "docid": "99ace45a8c74fdada1bdf2150bb9199a", "score": "0.64710265", "text": "def escapejs_filter(value):\n return escapejson(value)", "title": "" }, { "docid": "9ae8a54bbf37836e6ec63660309c21a3", "score": "0.6467364", "text": "def jsonify(obj):\n return json.dumps(raw_json(obj))", "title": "" }, { "docid": "a5b6945312fbaab42c609e532a689a10", "score": "0.643901", "text": "def json_format(obj):\n\n return json.dumps(obj, cls=BytesBase64Encoder)", "title": "" }, { "docid": "f6ec63e85aa9c1aec019802155ddc423", "score": "0.6437541", "text": "def safestr(obj, encoding='utf-8'):\n if isinstance(obj, unicode):\n return obj.encode(encoding)\n elif isinstance(obj, str):\n return obj\n elif hasattr(obj, 'next'): # iterator\n return itertools.imap(safestr, obj)\n else:\n return str(obj)", "title": "" }, { "docid": "69016a68688b0141769175f5d2ed61a8", "score": "0.6325441", "text": "def safe_bytestring(obj):\n try:\n return str(obj)\n except UnicodeEncodeError:\n # obj is unicode\n return str(obj).encode('unicode_escape')", "title": "" }, { "docid": "6dde9f7de7e2d61ac0b111e2d3e6d57e", "score": "0.62997544", "text": "def json_encode(data, *args, **kwargs):\n\n def _any(data):\n ret = None\n # Opps, we used to check if it is of type list, but that fails\n # i.e. in the case of django.newforms.utils.ErrorList, which extends\n # the type \"list\". Oh man, that was a dumb mistake!\n if hasattr(data, 'canonical'):\n ret = _any(data.canonical())\n elif isinstance(data, list):\n ret = _list(data)\n elif isinstance(data, set):\n ret = _list(list(data))\n # Same as for lists above.\n elif isinstance(data, dict):\n ret = _dict(data)\n # elif isinstance(data, CallableBool):\n # ret = bool(data)\n elif isinstance(data, (Decimal, ObjectId)):\n # json.dumps() cant handle Decimal\n ret = str(data)\n elif isinstance(data, models.query.QuerySet):\n # Actually its the same as a list ...\n ret = _list(data)\n elif isinstance(data, MongoQuerySet):\n # Actually its the same as a list ...\n ret = _list(data)\n elif isinstance(data, models.Model):\n ret = _model(data)\n # here we need to encode the string as unicode (otherwise we get utf-16 in the json-response)\n elif isinstance(data, bytes):\n ret = data.decode('utf-8', 'ignore')\n elif isinstance(data, str):\n ret = smart_str(data)\n elif isinstance(data, Exception):\n ret = str(data)\n # see http://code.djangoproject.com/ticket/5868\n elif isinstance(data, Promise):\n ret = force_text(data)\n elif isinstance(data, datetime.datetime) or isinstance(data, datetime.date):\n ret = str(data)\n elif hasattr(data, 'to_json'):\n ret = data.to_json()\n else:\n ret = data\n return ret\n\n def _model(data):\n ret = {}\n # If we only have a model, we only want to encode the fields.\n for f in data._meta.fields:\n ret[f.attname] = _any(getattr(data, f.attname))\n # And additionally encode arbitrary properties that had been added.\n fields = dir(data.__class__) + list(ret.keys())\n add_ons = [k for k in dir(data) if k not in fields]\n for k in add_ons:\n ret[k] = _any(getattr(data, k))\n return ret\n\n def _list(data):\n ret = []\n for v in data:\n ret.append(_any(v))\n return ret\n\n def _dict(data):\n ret = {}\n for k, v in list(data.items()):\n ret[str(k)] = _any(v)\n return ret\n\n if hasattr(data, 'to_json'):\n data = data.to_json()\n ret = _any(data)\n return json.dumps(ret)", "title": "" }, { "docid": "3115a7d7a67da04720f06f042b49116b", "score": "0.6270912", "text": "def encode_json_for_js(data, indent=None):\n return json.dumps(data, indent=indent, cls=JSONEncoderForHTML)", "title": "" }, { "docid": "02b6b7ce038b533f8091edcfd03dd92f", "score": "0.6268698", "text": "def _safe_unicode(obj, * args):\n try:\n return unicode(obj, * args)\n except UnicodeDecodeError:\n # obj is byte string\n ascii_text = str(obj).encode('string_escape')\n return unicode(ascii_text)", "title": "" }, { "docid": "0a00c03389cc793264cb4aba8f152ef6", "score": "0.62523603", "text": "def render(value):\n return json.dumps(value)", "title": "" }, { "docid": "3bcb96ce0d7efabfdf9c8818fb32dc1d", "score": "0.62409", "text": "def escape(inp):\n def conv(obj):\n if isinstance(obj, list):\n rv = as_unicode('[' + ','.join(conv(o) for o in obj) + ']')\n elif isinstance(obj, dict):\n rv = as_unicode('{' + ','.join([\n \"%s:%s\" % (conv(key), conv(value))\n for key, value in obj.iteritems()]) + '}')\n else:\n rv = as_unicode('\"%s\"') % as_unicode(obj).replace('\"', '\\\\\"')\n return rv\n return conv(inp)", "title": "" }, { "docid": "c3f109cd29474b74d36d85d6d3129711", "score": "0.6209368", "text": "def to_str(self):\n import simplejson as json\n if six.PY2:\n import sys\n reload(sys)\n sys.setdefaultencoding(\"utf-8\")\n return json.dumps(sanitize_for_serialization(self), ensure_ascii=False)", "title": "" }, { "docid": "c3f109cd29474b74d36d85d6d3129711", "score": "0.6209368", "text": "def to_str(self):\n import simplejson as json\n if six.PY2:\n import sys\n reload(sys)\n sys.setdefaultencoding(\"utf-8\")\n return json.dumps(sanitize_for_serialization(self), ensure_ascii=False)", "title": "" }, { "docid": "c3f109cd29474b74d36d85d6d3129711", "score": "0.6209368", "text": "def to_str(self):\n import simplejson as json\n if six.PY2:\n import sys\n reload(sys)\n sys.setdefaultencoding(\"utf-8\")\n return json.dumps(sanitize_for_serialization(self), ensure_ascii=False)", "title": "" }, { "docid": "c3f109cd29474b74d36d85d6d3129711", "score": "0.6209368", "text": "def to_str(self):\n import simplejson as json\n if six.PY2:\n import sys\n reload(sys)\n sys.setdefaultencoding(\"utf-8\")\n return json.dumps(sanitize_for_serialization(self), ensure_ascii=False)", "title": "" }, { "docid": "c3f109cd29474b74d36d85d6d3129711", "score": "0.6209368", "text": "def to_str(self):\n import simplejson as json\n if six.PY2:\n import sys\n reload(sys)\n sys.setdefaultencoding(\"utf-8\")\n return json.dumps(sanitize_for_serialization(self), ensure_ascii=False)", "title": "" }, { "docid": "c3f109cd29474b74d36d85d6d3129711", "score": "0.6209368", "text": "def to_str(self):\n import simplejson as json\n if six.PY2:\n import sys\n reload(sys)\n sys.setdefaultencoding(\"utf-8\")\n return json.dumps(sanitize_for_serialization(self), ensure_ascii=False)", "title": "" }, { "docid": "c3f109cd29474b74d36d85d6d3129711", "score": "0.6209368", "text": "def to_str(self):\n import simplejson as json\n if six.PY2:\n import sys\n reload(sys)\n sys.setdefaultencoding(\"utf-8\")\n return json.dumps(sanitize_for_serialization(self), ensure_ascii=False)", "title": "" }, { "docid": "c3f109cd29474b74d36d85d6d3129711", "score": "0.6209368", "text": "def to_str(self):\n import simplejson as json\n if six.PY2:\n import sys\n reload(sys)\n sys.setdefaultencoding(\"utf-8\")\n return json.dumps(sanitize_for_serialization(self), ensure_ascii=False)", "title": "" }, { "docid": "c3f109cd29474b74d36d85d6d3129711", "score": "0.6209368", "text": "def to_str(self):\n import simplejson as json\n if six.PY2:\n import sys\n reload(sys)\n sys.setdefaultencoding(\"utf-8\")\n return json.dumps(sanitize_for_serialization(self), ensure_ascii=False)", "title": "" }, { "docid": "c3f109cd29474b74d36d85d6d3129711", "score": "0.6209368", "text": "def to_str(self):\n import simplejson as json\n if six.PY2:\n import sys\n reload(sys)\n sys.setdefaultencoding(\"utf-8\")\n return json.dumps(sanitize_for_serialization(self), ensure_ascii=False)", "title": "" }, { "docid": "c3f109cd29474b74d36d85d6d3129711", "score": "0.6209368", "text": "def to_str(self):\n import simplejson as json\n if six.PY2:\n import sys\n reload(sys)\n sys.setdefaultencoding(\"utf-8\")\n return json.dumps(sanitize_for_serialization(self), ensure_ascii=False)", "title": "" }, { "docid": "c3f109cd29474b74d36d85d6d3129711", "score": "0.6209368", "text": "def to_str(self):\n import simplejson as json\n if six.PY2:\n import sys\n reload(sys)\n sys.setdefaultencoding(\"utf-8\")\n return json.dumps(sanitize_for_serialization(self), ensure_ascii=False)", "title": "" }, { "docid": "c3f109cd29474b74d36d85d6d3129711", "score": "0.6209368", "text": "def to_str(self):\n import simplejson as json\n if six.PY2:\n import sys\n reload(sys)\n sys.setdefaultencoding(\"utf-8\")\n return json.dumps(sanitize_for_serialization(self), ensure_ascii=False)", "title": "" }, { "docid": "c3f109cd29474b74d36d85d6d3129711", "score": "0.6209368", "text": "def to_str(self):\n import simplejson as json\n if six.PY2:\n import sys\n reload(sys)\n sys.setdefaultencoding(\"utf-8\")\n return json.dumps(sanitize_for_serialization(self), ensure_ascii=False)", "title": "" }, { "docid": "c3f109cd29474b74d36d85d6d3129711", "score": "0.6209368", "text": "def to_str(self):\n import simplejson as json\n if six.PY2:\n import sys\n reload(sys)\n sys.setdefaultencoding(\"utf-8\")\n return json.dumps(sanitize_for_serialization(self), ensure_ascii=False)", "title": "" }, { "docid": "c3f109cd29474b74d36d85d6d3129711", "score": "0.6209368", "text": "def to_str(self):\n import simplejson as json\n if six.PY2:\n import sys\n reload(sys)\n sys.setdefaultencoding(\"utf-8\")\n return json.dumps(sanitize_for_serialization(self), ensure_ascii=False)", "title": "" }, { "docid": "c3f109cd29474b74d36d85d6d3129711", "score": "0.6209368", "text": "def to_str(self):\n import simplejson as json\n if six.PY2:\n import sys\n reload(sys)\n sys.setdefaultencoding(\"utf-8\")\n return json.dumps(sanitize_for_serialization(self), ensure_ascii=False)", "title": "" }, { "docid": "c3f109cd29474b74d36d85d6d3129711", "score": "0.6209368", "text": "def to_str(self):\n import simplejson as json\n if six.PY2:\n import sys\n reload(sys)\n sys.setdefaultencoding(\"utf-8\")\n return json.dumps(sanitize_for_serialization(self), ensure_ascii=False)", "title": "" }, { "docid": "c3f109cd29474b74d36d85d6d3129711", "score": "0.6209368", "text": "def to_str(self):\n import simplejson as json\n if six.PY2:\n import sys\n reload(sys)\n sys.setdefaultencoding(\"utf-8\")\n return json.dumps(sanitize_for_serialization(self), ensure_ascii=False)", "title": "" }, { "docid": "c3f109cd29474b74d36d85d6d3129711", "score": "0.6209368", "text": "def to_str(self):\n import simplejson as json\n if six.PY2:\n import sys\n reload(sys)\n sys.setdefaultencoding(\"utf-8\")\n return json.dumps(sanitize_for_serialization(self), ensure_ascii=False)", "title": "" }, { "docid": "c3f109cd29474b74d36d85d6d3129711", "score": "0.6209368", "text": "def to_str(self):\n import simplejson as json\n if six.PY2:\n import sys\n reload(sys)\n sys.setdefaultencoding(\"utf-8\")\n return json.dumps(sanitize_for_serialization(self), ensure_ascii=False)", "title": "" }, { "docid": "c3f109cd29474b74d36d85d6d3129711", "score": "0.6209368", "text": "def to_str(self):\n import simplejson as json\n if six.PY2:\n import sys\n reload(sys)\n sys.setdefaultencoding(\"utf-8\")\n return json.dumps(sanitize_for_serialization(self), ensure_ascii=False)", "title": "" }, { "docid": "c3f109cd29474b74d36d85d6d3129711", "score": "0.6209368", "text": "def to_str(self):\n import simplejson as json\n if six.PY2:\n import sys\n reload(sys)\n sys.setdefaultencoding(\"utf-8\")\n return json.dumps(sanitize_for_serialization(self), ensure_ascii=False)", "title": "" }, { "docid": "c3f109cd29474b74d36d85d6d3129711", "score": "0.6209368", "text": "def to_str(self):\n import simplejson as json\n if six.PY2:\n import sys\n reload(sys)\n sys.setdefaultencoding(\"utf-8\")\n return json.dumps(sanitize_for_serialization(self), ensure_ascii=False)", "title": "" }, { "docid": "c3f109cd29474b74d36d85d6d3129711", "score": "0.6209368", "text": "def to_str(self):\n import simplejson as json\n if six.PY2:\n import sys\n reload(sys)\n sys.setdefaultencoding(\"utf-8\")\n return json.dumps(sanitize_for_serialization(self), ensure_ascii=False)", "title": "" }, { "docid": "c3f109cd29474b74d36d85d6d3129711", "score": "0.6209368", "text": "def to_str(self):\n import simplejson as json\n if six.PY2:\n import sys\n reload(sys)\n sys.setdefaultencoding(\"utf-8\")\n return json.dumps(sanitize_for_serialization(self), ensure_ascii=False)", "title": "" }, { "docid": "c3f109cd29474b74d36d85d6d3129711", "score": "0.6209368", "text": "def to_str(self):\n import simplejson as json\n if six.PY2:\n import sys\n reload(sys)\n sys.setdefaultencoding(\"utf-8\")\n return json.dumps(sanitize_for_serialization(self), ensure_ascii=False)", "title": "" }, { "docid": "c3f109cd29474b74d36d85d6d3129711", "score": "0.6209368", "text": "def to_str(self):\n import simplejson as json\n if six.PY2:\n import sys\n reload(sys)\n sys.setdefaultencoding(\"utf-8\")\n return json.dumps(sanitize_for_serialization(self), ensure_ascii=False)", "title": "" }, { "docid": "c3f109cd29474b74d36d85d6d3129711", "score": "0.6209368", "text": "def to_str(self):\n import simplejson as json\n if six.PY2:\n import sys\n reload(sys)\n sys.setdefaultencoding(\"utf-8\")\n return json.dumps(sanitize_for_serialization(self), ensure_ascii=False)", "title": "" }, { "docid": "c3f109cd29474b74d36d85d6d3129711", "score": "0.6209368", "text": "def to_str(self):\n import simplejson as json\n if six.PY2:\n import sys\n reload(sys)\n sys.setdefaultencoding(\"utf-8\")\n return json.dumps(sanitize_for_serialization(self), ensure_ascii=False)", "title": "" }, { "docid": "c3f109cd29474b74d36d85d6d3129711", "score": "0.6209368", "text": "def to_str(self):\n import simplejson as json\n if six.PY2:\n import sys\n reload(sys)\n sys.setdefaultencoding(\"utf-8\")\n return json.dumps(sanitize_for_serialization(self), ensure_ascii=False)", "title": "" }, { "docid": "c3f109cd29474b74d36d85d6d3129711", "score": "0.6209368", "text": "def to_str(self):\n import simplejson as json\n if six.PY2:\n import sys\n reload(sys)\n sys.setdefaultencoding(\"utf-8\")\n return json.dumps(sanitize_for_serialization(self), ensure_ascii=False)", "title": "" }, { "docid": "c3f109cd29474b74d36d85d6d3129711", "score": "0.6209368", "text": "def to_str(self):\n import simplejson as json\n if six.PY2:\n import sys\n reload(sys)\n sys.setdefaultencoding(\"utf-8\")\n return json.dumps(sanitize_for_serialization(self), ensure_ascii=False)", "title": "" }, { "docid": "c3f109cd29474b74d36d85d6d3129711", "score": "0.6209368", "text": "def to_str(self):\n import simplejson as json\n if six.PY2:\n import sys\n reload(sys)\n sys.setdefaultencoding(\"utf-8\")\n return json.dumps(sanitize_for_serialization(self), ensure_ascii=False)", "title": "" }, { "docid": "c3f109cd29474b74d36d85d6d3129711", "score": "0.6209368", "text": "def to_str(self):\n import simplejson as json\n if six.PY2:\n import sys\n reload(sys)\n sys.setdefaultencoding(\"utf-8\")\n return json.dumps(sanitize_for_serialization(self), ensure_ascii=False)", "title": "" }, { "docid": "c3f109cd29474b74d36d85d6d3129711", "score": "0.6209368", "text": "def to_str(self):\n import simplejson as json\n if six.PY2:\n import sys\n reload(sys)\n sys.setdefaultencoding(\"utf-8\")\n return json.dumps(sanitize_for_serialization(self), ensure_ascii=False)", "title": "" }, { "docid": "c3f109cd29474b74d36d85d6d3129711", "score": "0.6209368", "text": "def to_str(self):\n import simplejson as json\n if six.PY2:\n import sys\n reload(sys)\n sys.setdefaultencoding(\"utf-8\")\n return json.dumps(sanitize_for_serialization(self), ensure_ascii=False)", "title": "" }, { "docid": "c3f109cd29474b74d36d85d6d3129711", "score": "0.6209368", "text": "def to_str(self):\n import simplejson as json\n if six.PY2:\n import sys\n reload(sys)\n sys.setdefaultencoding(\"utf-8\")\n return json.dumps(sanitize_for_serialization(self), ensure_ascii=False)", "title": "" }, { "docid": "c3f109cd29474b74d36d85d6d3129711", "score": "0.6209368", "text": "def to_str(self):\n import simplejson as json\n if six.PY2:\n import sys\n reload(sys)\n sys.setdefaultencoding(\"utf-8\")\n return json.dumps(sanitize_for_serialization(self), ensure_ascii=False)", "title": "" }, { "docid": "c3f109cd29474b74d36d85d6d3129711", "score": "0.6209368", "text": "def to_str(self):\n import simplejson as json\n if six.PY2:\n import sys\n reload(sys)\n sys.setdefaultencoding(\"utf-8\")\n return json.dumps(sanitize_for_serialization(self), ensure_ascii=False)", "title": "" }, { "docid": "c3f109cd29474b74d36d85d6d3129711", "score": "0.6209368", "text": "def to_str(self):\n import simplejson as json\n if six.PY2:\n import sys\n reload(sys)\n sys.setdefaultencoding(\"utf-8\")\n return json.dumps(sanitize_for_serialization(self), ensure_ascii=False)", "title": "" }, { "docid": "c3f109cd29474b74d36d85d6d3129711", "score": "0.6209368", "text": "def to_str(self):\n import simplejson as json\n if six.PY2:\n import sys\n reload(sys)\n sys.setdefaultencoding(\"utf-8\")\n return json.dumps(sanitize_for_serialization(self), ensure_ascii=False)", "title": "" }, { "docid": "c3f109cd29474b74d36d85d6d3129711", "score": "0.6209368", "text": "def to_str(self):\n import simplejson as json\n if six.PY2:\n import sys\n reload(sys)\n sys.setdefaultencoding(\"utf-8\")\n return json.dumps(sanitize_for_serialization(self), ensure_ascii=False)", "title": "" }, { "docid": "c3f109cd29474b74d36d85d6d3129711", "score": "0.6209368", "text": "def to_str(self):\n import simplejson as json\n if six.PY2:\n import sys\n reload(sys)\n sys.setdefaultencoding(\"utf-8\")\n return json.dumps(sanitize_for_serialization(self), ensure_ascii=False)", "title": "" }, { "docid": "c3f109cd29474b74d36d85d6d3129711", "score": "0.6209368", "text": "def to_str(self):\n import simplejson as json\n if six.PY2:\n import sys\n reload(sys)\n sys.setdefaultencoding(\"utf-8\")\n return json.dumps(sanitize_for_serialization(self), ensure_ascii=False)", "title": "" }, { "docid": "c3f109cd29474b74d36d85d6d3129711", "score": "0.6209368", "text": "def to_str(self):\n import simplejson as json\n if six.PY2:\n import sys\n reload(sys)\n sys.setdefaultencoding(\"utf-8\")\n return json.dumps(sanitize_for_serialization(self), ensure_ascii=False)", "title": "" }, { "docid": "c3f109cd29474b74d36d85d6d3129711", "score": "0.6209368", "text": "def to_str(self):\n import simplejson as json\n if six.PY2:\n import sys\n reload(sys)\n sys.setdefaultencoding(\"utf-8\")\n return json.dumps(sanitize_for_serialization(self), ensure_ascii=False)", "title": "" }, { "docid": "c3f109cd29474b74d36d85d6d3129711", "score": "0.6209368", "text": "def to_str(self):\n import simplejson as json\n if six.PY2:\n import sys\n reload(sys)\n sys.setdefaultencoding(\"utf-8\")\n return json.dumps(sanitize_for_serialization(self), ensure_ascii=False)", "title": "" }, { "docid": "c3f109cd29474b74d36d85d6d3129711", "score": "0.6209368", "text": "def to_str(self):\n import simplejson as json\n if six.PY2:\n import sys\n reload(sys)\n sys.setdefaultencoding(\"utf-8\")\n return json.dumps(sanitize_for_serialization(self), ensure_ascii=False)", "title": "" }, { "docid": "c3f109cd29474b74d36d85d6d3129711", "score": "0.6209368", "text": "def to_str(self):\n import simplejson as json\n if six.PY2:\n import sys\n reload(sys)\n sys.setdefaultencoding(\"utf-8\")\n return json.dumps(sanitize_for_serialization(self), ensure_ascii=False)", "title": "" }, { "docid": "c3f109cd29474b74d36d85d6d3129711", "score": "0.6209368", "text": "def to_str(self):\n import simplejson as json\n if six.PY2:\n import sys\n reload(sys)\n sys.setdefaultencoding(\"utf-8\")\n return json.dumps(sanitize_for_serialization(self), ensure_ascii=False)", "title": "" }, { "docid": "c3f109cd29474b74d36d85d6d3129711", "score": "0.6209368", "text": "def to_str(self):\n import simplejson as json\n if six.PY2:\n import sys\n reload(sys)\n sys.setdefaultencoding(\"utf-8\")\n return json.dumps(sanitize_for_serialization(self), ensure_ascii=False)", "title": "" }, { "docid": "c3f109cd29474b74d36d85d6d3129711", "score": "0.6209368", "text": "def to_str(self):\n import simplejson as json\n if six.PY2:\n import sys\n reload(sys)\n sys.setdefaultencoding(\"utf-8\")\n return json.dumps(sanitize_for_serialization(self), ensure_ascii=False)", "title": "" }, { "docid": "c3f109cd29474b74d36d85d6d3129711", "score": "0.6209368", "text": "def to_str(self):\n import simplejson as json\n if six.PY2:\n import sys\n reload(sys)\n sys.setdefaultencoding(\"utf-8\")\n return json.dumps(sanitize_for_serialization(self), ensure_ascii=False)", "title": "" }, { "docid": "c3f109cd29474b74d36d85d6d3129711", "score": "0.6209368", "text": "def to_str(self):\n import simplejson as json\n if six.PY2:\n import sys\n reload(sys)\n sys.setdefaultencoding(\"utf-8\")\n return json.dumps(sanitize_for_serialization(self), ensure_ascii=False)", "title": "" }, { "docid": "c3f109cd29474b74d36d85d6d3129711", "score": "0.6209368", "text": "def to_str(self):\n import simplejson as json\n if six.PY2:\n import sys\n reload(sys)\n sys.setdefaultencoding(\"utf-8\")\n return json.dumps(sanitize_for_serialization(self), ensure_ascii=False)", "title": "" }, { "docid": "c3f109cd29474b74d36d85d6d3129711", "score": "0.6209368", "text": "def to_str(self):\n import simplejson as json\n if six.PY2:\n import sys\n reload(sys)\n sys.setdefaultencoding(\"utf-8\")\n return json.dumps(sanitize_for_serialization(self), ensure_ascii=False)", "title": "" }, { "docid": "c3f109cd29474b74d36d85d6d3129711", "score": "0.6209368", "text": "def to_str(self):\n import simplejson as json\n if six.PY2:\n import sys\n reload(sys)\n sys.setdefaultencoding(\"utf-8\")\n return json.dumps(sanitize_for_serialization(self), ensure_ascii=False)", "title": "" }, { "docid": "c3f109cd29474b74d36d85d6d3129711", "score": "0.6209368", "text": "def to_str(self):\n import simplejson as json\n if six.PY2:\n import sys\n reload(sys)\n sys.setdefaultencoding(\"utf-8\")\n return json.dumps(sanitize_for_serialization(self), ensure_ascii=False)", "title": "" }, { "docid": "c3f109cd29474b74d36d85d6d3129711", "score": "0.6209368", "text": "def to_str(self):\n import simplejson as json\n if six.PY2:\n import sys\n reload(sys)\n sys.setdefaultencoding(\"utf-8\")\n return json.dumps(sanitize_for_serialization(self), ensure_ascii=False)", "title": "" }, { "docid": "c3f109cd29474b74d36d85d6d3129711", "score": "0.6209368", "text": "def to_str(self):\n import simplejson as json\n if six.PY2:\n import sys\n reload(sys)\n sys.setdefaultencoding(\"utf-8\")\n return json.dumps(sanitize_for_serialization(self), ensure_ascii=False)", "title": "" }, { "docid": "c3f109cd29474b74d36d85d6d3129711", "score": "0.6209368", "text": "def to_str(self):\n import simplejson as json\n if six.PY2:\n import sys\n reload(sys)\n sys.setdefaultencoding(\"utf-8\")\n return json.dumps(sanitize_for_serialization(self), ensure_ascii=False)", "title": "" }, { "docid": "c3f109cd29474b74d36d85d6d3129711", "score": "0.6209368", "text": "def to_str(self):\n import simplejson as json\n if six.PY2:\n import sys\n reload(sys)\n sys.setdefaultencoding(\"utf-8\")\n return json.dumps(sanitize_for_serialization(self), ensure_ascii=False)", "title": "" }, { "docid": "c3f109cd29474b74d36d85d6d3129711", "score": "0.6209368", "text": "def to_str(self):\n import simplejson as json\n if six.PY2:\n import sys\n reload(sys)\n sys.setdefaultencoding(\"utf-8\")\n return json.dumps(sanitize_for_serialization(self), ensure_ascii=False)", "title": "" }, { "docid": "c3f109cd29474b74d36d85d6d3129711", "score": "0.6209368", "text": "def to_str(self):\n import simplejson as json\n if six.PY2:\n import sys\n reload(sys)\n sys.setdefaultencoding(\"utf-8\")\n return json.dumps(sanitize_for_serialization(self), ensure_ascii=False)", "title": "" }, { "docid": "c3f109cd29474b74d36d85d6d3129711", "score": "0.6209368", "text": "def to_str(self):\n import simplejson as json\n if six.PY2:\n import sys\n reload(sys)\n sys.setdefaultencoding(\"utf-8\")\n return json.dumps(sanitize_for_serialization(self), ensure_ascii=False)", "title": "" }, { "docid": "c3f109cd29474b74d36d85d6d3129711", "score": "0.6209368", "text": "def to_str(self):\n import simplejson as json\n if six.PY2:\n import sys\n reload(sys)\n sys.setdefaultencoding(\"utf-8\")\n return json.dumps(sanitize_for_serialization(self), ensure_ascii=False)", "title": "" }, { "docid": "c3f109cd29474b74d36d85d6d3129711", "score": "0.6209368", "text": "def to_str(self):\n import simplejson as json\n if six.PY2:\n import sys\n reload(sys)\n sys.setdefaultencoding(\"utf-8\")\n return json.dumps(sanitize_for_serialization(self), ensure_ascii=False)", "title": "" }, { "docid": "c3f109cd29474b74d36d85d6d3129711", "score": "0.6209368", "text": "def to_str(self):\n import simplejson as json\n if six.PY2:\n import sys\n reload(sys)\n sys.setdefaultencoding(\"utf-8\")\n return json.dumps(sanitize_for_serialization(self), ensure_ascii=False)", "title": "" }, { "docid": "c3f109cd29474b74d36d85d6d3129711", "score": "0.6209368", "text": "def to_str(self):\n import simplejson as json\n if six.PY2:\n import sys\n reload(sys)\n sys.setdefaultencoding(\"utf-8\")\n return json.dumps(sanitize_for_serialization(self), ensure_ascii=False)", "title": "" } ]
504cd070164a98c21131c2e0fc61571e
Exclude certain well types from aggregate or individual metrics.
[ { "docid": "f528baa413011893daa3efb05e640201", "score": "0.0", "text": "def build_exclude_query(query, exclusions, joined_entities):\n if EXCLUDE_LOW_EVENTS in exclusions:\n if WellMetric not in joined_entities:\n query = query.join(WellMetric).filter(WellMetric.accepted_event_count > 1000)\n\n if EXCLUDE_NO_CALL in exclusions:\n # assume a channel has been selected\n if WellChannelMetric not in joined_entities:\n query = query.join(WellChannelMetric)\n query = query.filter(WellChannelMetric.concentration > 0)\n\n return query", "title": "" } ]
[ { "docid": "47d34d37d13fe7831c767a526b208400", "score": "0.59837747", "text": "def remove_measurements(self):\n def ismeasurement(gate):\n return isinstance(gate, qiskit.circuit.measure.Measure)\n self.data = [data for data in self.data if not ismeasurement(data[0])]\n return", "title": "" }, { "docid": "94a9c3cc5d23db80274ad01423aea675", "score": "0.5879248", "text": "def reject_all_outliers(train):\n train = reject_outliers(train,\"Income in EUR\")\n train = reject_outliers(train,\"Age\")\n train = reject_outliers(train,\"Body Height [cm]\")\n train = reject_outliers(train,\"Size of City\")\n train = reject_outliers(train,\"Year of Record\")\n return train", "title": "" }, { "docid": "d134970068c35ac46f4b9b52b4985072", "score": "0.5871861", "text": "def exclude_filters(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['MetricStreamExcludeFilterArgs']]]]:\n return pulumi.get(self, \"exclude_filters\")", "title": "" }, { "docid": "d134970068c35ac46f4b9b52b4985072", "score": "0.5871861", "text": "def exclude_filters(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['MetricStreamExcludeFilterArgs']]]]:\n return pulumi.get(self, \"exclude_filters\")", "title": "" }, { "docid": "379572575bca438e3c772e3cc27b95d6", "score": "0.5850303", "text": "def _filter_metrics(self, _type: Type[TMetric]) -> List[Type[TMetric]]:\n return [metric for metric in self.metrics if issubclass(metric, _type)]", "title": "" }, { "docid": "a65fdd64b7d8a63618c8199e662069da", "score": "0.57918996", "text": "def test_exclude_functionality(self):\n exclude_opts = list(OCPExcludeSerializer._opfields)\n exclude_opts.remove(\"infrastructures\") # Tested separately\n exclude_opts.remove(\"category\")\n for exclude_opt in exclude_opts:\n for view in [OCPCostView, OCPCpuView, OCPMemoryView, OCPVolumeView]:\n with self.subTest(exclude_opt):\n overall_url = f\"?group_by[{exclude_opt}]=*\"\n query_params = self.mocked_query_params(overall_url, view)\n handler = OCPReportQueryHandler(query_params)\n overall_output = handler.execute_query()\n overall_total = handler.query_sum.get(\"cost\", {}).get(\"total\", {}).get(\"value\")\n opt_value = None\n for date_dict in overall_output.get(\"data\", [{}]):\n for element in date_dict.get(f\"{exclude_opt}s\"):\n if f\"No-{exclude_opt}\" != element.get(exclude_opt):\n opt_value = element.get(exclude_opt)\n break\n if opt_value:\n break\n # Grab filtered value\n filtered_url = f\"?group_by[{exclude_opt}]=*&filter[{exclude_opt}]={opt_value}\"\n query_params = self.mocked_query_params(filtered_url, view)\n handler = OCPReportQueryHandler(query_params)\n handler.execute_query()\n filtered_total = handler.query_sum.get(\"cost\", {}).get(\"total\", {}).get(\"value\")\n expected_total = overall_total - filtered_total\n # Test exclude\n exclude_url = f\"?group_by[{exclude_opt}]=*&exclude[{exclude_opt}]={opt_value}\"\n query_params = self.mocked_query_params(exclude_url, view)\n handler = OCPReportQueryHandler(query_params)\n self.assertIsNotNone(handler.query_exclusions)\n excluded_output = handler.execute_query()\n excluded_total = handler.query_sum.get(\"cost\", {}).get(\"total\", {}).get(\"value\")\n excluded_data = excluded_output.get(\"data\")\n # Check to make sure the value is not in the return\n for date_dict in excluded_data:\n grouping_list = date_dict.get(f\"{exclude_opt}s\", [])\n self.assertIsNotNone(grouping_list)\n for group_dict in grouping_list:\n if f\"No-{exclude_opt}\" != opt_value:\n self.assertNotEqual(opt_value, group_dict.get(exclude_opt))\n self.assertAlmostEqual(expected_total, excluded_total, 6)\n self.assertNotEqual(overall_total, excluded_total)", "title": "" }, { "docid": "b3f39c45164b0d3542a2a6acc73ab0ce", "score": "0.57276094", "text": "def exclude_filters(self) -> pulumi.Output[Optional[Sequence['outputs.MetricStreamExcludeFilter']]]:\n return pulumi.get(self, \"exclude_filters\")", "title": "" }, { "docid": "b1ca3718cfd88bb3b1ec856295eaf4c0", "score": "0.57185674", "text": "def test_multi_exclude_functionality(self):\n exclude_opts = list(OCPExcludeSerializer._opfields)\n exclude_opts.remove(\"infrastructures\")\n exclude_opts.remove(\"category\")\n for ex_opt in exclude_opts:\n base_url = f\"?group_by[{ex_opt}]=*&filter[time_scope_units]=month&filter[resolution]=monthly&filter[time_scope_value]=-1\" # noqa: E501\n for view in [OCPVolumeView, OCPCostView, OCPCpuView, OCPMemoryView]:\n query_params = self.mocked_query_params(base_url, view)\n handler = OCPReportQueryHandler(query_params)\n overall_output = handler.execute_query()\n opt_dict = overall_output.get(\"data\", [{}])[0]\n opt_list = opt_dict.get(f\"{ex_opt}s\")\n exclude_one = None\n exclude_two = None\n for exclude_option in opt_list:\n if \"No-\" not in exclude_option.get(ex_opt):\n if not exclude_one:\n exclude_one = exclude_option.get(ex_opt)\n elif not exclude_two:\n exclude_two = exclude_option.get(ex_opt)\n else:\n continue\n if not exclude_one or not exclude_two:\n continue\n url = base_url + f\"&exclude[or:{ex_opt}]={exclude_one}&exclude[or:{ex_opt}]={exclude_two}\"\n with self.subTest(url=url, view=view, ex_opt=ex_opt):\n query_params = self.mocked_query_params(url, view)\n handler = OCPReportQueryHandler(query_params)\n self.assertIsNotNone(handler.query_exclusions)\n excluded_output = handler.execute_query()\n excluded_data = excluded_output.get(\"data\")\n self.assertIsNotNone(excluded_data)\n for date_dict in excluded_data:\n grouping_list = date_dict.get(f\"{ex_opt}s\", [])\n self.assertIsNotNone(grouping_list)\n for group_dict in grouping_list:\n self.assertNotIn(group_dict.get(ex_opt), [exclude_one, exclude_two])", "title": "" }, { "docid": "44b324c312223092332665263d610d6c", "score": "0.55464876", "text": "def test_multi_exclude_functionality(self):\n exclude_opts = list(OCPAllExcludeSerializer._opfields)\n exclude_opts.remove(\"source_type\")\n exclude_opts.remove(\"account\")\n exclude_opts.remove(\"az\")\n exclude_opts.remove(\"instance_type\")\n exclude_opts.remove(\"storage_type\")\n for ex_opt in exclude_opts:\n base_url = f\"?group_by[{ex_opt}]=*&filter[time_scope_units]=month&filter[resolution]=monthly&filter[time_scope_value]=-1\" # noqa: E501\n for view in [OCPAllCostView, OCPAllStorageView, OCPAllInstanceTypeView]:\n query_params = self.mocked_query_params(base_url, view)\n handler = OCPAllReportQueryHandler(query_params)\n overall_output = handler.execute_query()\n opt_dict = overall_output.get(\"data\", [{}])[0]\n opt_list = opt_dict.get(f\"{ex_opt}s\")\n exclude_one = None\n exclude_two = None\n for exclude_option in opt_list:\n if \"No-\" not in exclude_option.get(ex_opt):\n if not exclude_one:\n exclude_one = exclude_option.get(ex_opt)\n elif not exclude_two:\n exclude_two = exclude_option.get(ex_opt)\n else:\n continue\n if not exclude_one or not exclude_two:\n continue\n url = base_url + f\"&exclude[or:{ex_opt}]={exclude_one}&exclude[or:{ex_opt}]={exclude_two}\"\n with self.subTest(url=url, view=view, ex_opt=ex_opt):\n query_params = self.mocked_query_params(url, view)\n handler = OCPAllReportQueryHandler(query_params)\n self.assertIsNotNone(handler.query_exclusions)\n excluded_output = handler.execute_query()\n excluded_data = excluded_output.get(\"data\")\n self.assertIsNotNone(excluded_data)\n for date_dict in excluded_data:\n grouping_list = date_dict.get(f\"{ex_opt}s\", [])\n self.assertIsNotNone(grouping_list)\n for group_dict in grouping_list:\n self.assertNotIn(group_dict.get(ex_opt), [exclude_one, exclude_two])", "title": "" }, { "docid": "cb68c8e45ea7b6597924525d7b4d5df5", "score": "0.55350363", "text": "def exclude(self, *args, **kwargs):\n # return self._filter_or_exclude(True, *args, **kwargs)\n raise NotImplementedError", "title": "" }, { "docid": "7fba54342465d0780172e87554087c73", "score": "0.5490191", "text": "def only_missing_etypes(self):\n have = [enroll[\"etype\"] for enroll in self.data[\"ids\"]]\n return [x for x in self.etypes if x not in have]", "title": "" }, { "docid": "917623b1de7dbbb9784cb71950f4a2bf", "score": "0.54244316", "text": "def _remove_empty_obstype_fields(self):\n remove_obstype = [] # List with observation types, which should be removed from Dataset.\n remove_obstype_sys = {} # Dictionary with obstypes for each GNSS, should be removed from meta['obstypes'].\n for obstype, obs in self.data[\"obs\"].items():\n if not obs or np.all(np.array(obs) == 0.0):\n remove_obstype.append(obstype)\n systems = set(self.data[\"text\"][\"system\"])\n for sys in systems:\n\n # Filter observations depending on GNSS\n idx = np.array(self.data[\"text\"][\"system\"]) == sys\n\n if np.all(np.array(obs)[idx] == 0.0):\n remove_obstype_sys.setdefault(sys, list()).append(obstype)\n\n log.debug(\n f\"The following observation types are removed, because no observations were found: \"\n f\"{' '.join(sorted(remove_obstype))}\"\n )\n\n # Remove empty observation type data fields\n for obstype in remove_obstype:\n for sys in list(self.meta[\"obstypes\"]):\n if obstype in self.meta[\"obstypes\"][sys]:\n self.meta[\"obstypes\"][sys].remove(obstype)\n\n del self.data[\"obs\"][obstype]\n del self.data[\"cycle_slip\"][obstype]\n del self.data[\"signal_strength\"][obstype]\n\n # Remove empty observation types for a given GNSS from meta['obstypes'] and other meta variables\n for sys, obstypes in remove_obstype_sys.items():\n for obstype in obstypes:\n if obstype in self.meta[\"obstypes\"][sys]:\n self.meta[\"obstypes\"][sys].remove(obstype)", "title": "" }, { "docid": "ff73ac8cf4ce41e58a8f43a1215d139c", "score": "0.5364947", "text": "def ignore_member(cls, version, m_name, m_type):\n # This will probably need more granularity as more extensions are added\n if (type_maps.class_is_extension(cls, version) and (\n m_name == \"experimenter\" or\n m_name == \"subtype\")):\n return True\n return loxi_utils.skip_member_name(m_name) or m_type not in scalar_types", "title": "" }, { "docid": "89d3e60ca0ff279c45e87b2e732e87c8", "score": "0.5358879", "text": "def drop_all_metrics(self):\n super(HybridAccessor, self).drop_all_metrics()\n self._metadata_accessor.drop_all_metrics()\n self._data_accessor.drop_all_metrics()", "title": "" }, { "docid": "82eb5cbe19a5ed34ec1d02f15dadaf66", "score": "0.53534263", "text": "def test_exclude_by_users(self):\n df = copy.copy(self.test_df.iloc[: self.limit])\n target = \"K_VRH\"\n exclude = [\"ElementProperty\"]\n\n ep = ElementProperty.from_preset(\"matminer\")\n ep_feats = ep.feature_labels()\n\n # Test to make sure excluded does not show up\n af = AutoFeaturizer(exclude=exclude, preset=\"express\")\n af.fit(df, target)\n df = af.fit_transform(df, target)\n\n self.assertTrue(af.auto_featurizer)\n self.assertIn(\"ElementProperty\", af.exclude)\n self.assertFalse(any([f in df.columns for f in ep_feats]))", "title": "" }, { "docid": "8b239a990d7b68fe62e10da5431ba383", "score": "0.53428864", "text": "def exclude(self, *args, **kwargs):\r\n return self._filter_or_exclude(True, *args, **kwargs)", "title": "" }, { "docid": "0fc85be72122226abc44cd2ca6708b91", "score": "0.53275746", "text": "def test_get_fields_nosubtypes(self):\n fields = GrondwaterFilter.get_fields(include_subtypes=False)\n for field in fields:\n assert field not in ('datum', 'tijdstip', 'peil_mtaw',\n 'betrouwbaarheid', 'methode')", "title": "" }, { "docid": "47bc09250187907ae6ee825ca2bc34cc", "score": "0.5313815", "text": "def graph_exclude_data_dff(self):\n # Data dffs and wmask dffs are only for writing so are not useful for evaluating read delay.\n for inst in self.data_dff_insts:\n self.graph_inst_exclude.add(inst)\n if self.write_size:\n for inst in self.wmask_dff_insts:\n self.graph_inst_exclude.add(inst)\n if self.num_spare_cols:\n for inst in self.spare_wen_dff_insts:\n self.graph_inst_exclude.add(inst)", "title": "" }, { "docid": "2f2a8f844ece3b84e830ca72454d39bc", "score": "0.5290781", "text": "def unfiltered():", "title": "" }, { "docid": "f5886e18f689dcc254872f9acd02704d", "score": "0.52487826", "text": "def meaningful(self):\n return self.exclude(value=0, correction=0)", "title": "" }, { "docid": "f670e7fa1d17f5e47c713a3faccf11da", "score": "0.5230749", "text": "def apply_excludes(self):\n pass", "title": "" }, { "docid": "e5403c106f92832ffa0b5a6f6e284171", "score": "0.5222039", "text": "def test_exclude_infastructures(self):\n # It works on cost endpoint, but not the other views:\n for view in [OCPVolumeView, OCPCostView, OCPCpuView, OCPMemoryView]:\n with self.subTest(view=view):\n # Grab overall value\n overall_url = \"?\"\n query_params = self.mocked_query_params(overall_url, view)\n handler = OCPReportQueryHandler(query_params)\n handler.execute_query()\n ocp_total = handler.query_sum.get(\"cost\", {}).get(\"total\", {}).get(\"value\")\n ocp_raw = handler.query_sum.get(\"cost\").get(\"raw\", {}).get(\"value\")\n # Grab azure filtered value\n azure_url = \"?filter[infrastructures]=azure\"\n query_params = self.mocked_query_params(azure_url, view)\n handler = OCPReportQueryHandler(query_params)\n handler.execute_query()\n azure_filtered_total = handler.query_sum.get(\"cost\", {}).get(\"total\", {}).get(\"value\")\n # Grab gcp filtered value\n gcp_url = \"?filter[infrastructures]=gcp\"\n query_params = self.mocked_query_params(gcp_url, view)\n handler = OCPReportQueryHandler(query_params)\n handler.execute_query()\n gcp_filtered_total = handler.query_sum.get(\"cost\", {}).get(\"total\", {}).get(\"value\")\n # Test exclude\n # we subtract the ocp_raw cost here because we only want cost associated to\n # an infrastructure here, or atleast that is my understanding.\n expected_total = (ocp_total + azure_filtered_total + gcp_filtered_total) - ocp_raw\n exclude_url = \"?exclude[infrastructures]=aws\"\n query_params = self.mocked_query_params(exclude_url, view)\n handler = OCPReportQueryHandler(query_params)\n self.assertIsNotNone(handler.query_exclusions)\n handler.execute_query()\n excluded_result = handler.query_sum.get(\"cost\", {}).get(\"total\", {}).get(\"value\")\n self.assertAlmostEqual(expected_total, excluded_result, 6)", "title": "" }, { "docid": "9b161b41c5b4d36df4135b2ea7a52990", "score": "0.5198749", "text": "def test_parse_exclude_params_type_fail(self):\n types = [\"bad1\", \"aws_tags\"]\n for tag_type in types:\n exclude_params = {\"type\": None}\n exclude_params[\"type\"] = tag_type\n serializer = OCPExcludeSerializer(data=exclude_params)\n self.assertFalse(serializer.is_valid())", "title": "" }, { "docid": "2fa155b7dfbba013ce1b7cbc466b93a7", "score": "0.51906395", "text": "def take_out_wells_with_no_tops(self):\n #### THIS FUNCTION ASSUMES SOME STRUCTURES THAT MIGHT NOT EXIST IN YOUR PROJECT\n #### YOU MAY HAVE TO DO THIS A DIFFERENT WAY\n print(\n \"THIS FUNCTION ASSUMES SOME STRUCTURES THAT MIGHT NOT EXIST IN YOUR PROJECT. It should work find with Mannville default data\"\n )\n #### produces dataframe with no picks that have a value of zero\n noZeroPicks = self.input.picks_df[self.input.picks_df.Pick != 0]\n #### produces dataframe that doesn't have any picks with a quality of negative one, meaning not to be trusted or present\n noNullPicks = noZeroPicks[noZeroPicks.Quality != -1]\n self.picks_df_noNullPicks = noNullPicks", "title": "" }, { "docid": "d8dffa50adc1bfb5b26eb2aa45e05d73", "score": "0.5142001", "text": "def test_exclude_tags(self):\n query_params = self.mocked_query_params(\"?\", OCPAllTagView)\n handler = OCPAllTagQueryHandler(query_params)\n tags = handler.get_tags()\n group_tag = None\n check_no_option = False\n exclude_vals = []\n for tag_dict in tags:\n if len(tag_dict.get(\"values\")) > len(exclude_vals):\n group_tag = tag_dict.get(\"key\")\n exclude_vals = tag_dict.get(\"values\")\n self.assertNotEqual(len(exclude_vals), 0)\n url = f\"?filter[time_scope_units]=month&filter[time_scope_value]=-1&filter[resolution]=daily&group_by[tag:{group_tag}]=*\" # noqa: E501\n query_params = self.mocked_query_params(url, OCPAllCostView)\n handler = OCPAllReportQueryHandler(query_params)\n data = handler.execute_query().get(\"data\")\n if f\"No-{group_tag}\" in str(data):\n check_no_option = True\n previous_total = handler.query_sum.get(\"cost\", {}).get(\"total\", {}).get(\"value\")\n for exclude_value in exclude_vals:\n url += f\"&exclude[tag:{group_tag}]={exclude_value}\"\n query_params = self.mocked_query_params(url, OCPAllCostView)\n handler = OCPAllReportQueryHandler(query_params)\n data = handler.execute_query()\n if check_no_option:\n self.assertIn(f\"No-{group_tag}\", str(data))\n current_total = handler.query_sum.get(\"cost\", {}).get(\"total\", {}).get(\"value\")\n self.assertLess(current_total, previous_total)\n previous_total = current_total", "title": "" }, { "docid": "6b4d65bc87dcb7a81eccff95eaa52b62", "score": "0.51360476", "text": "def filter_types(self, types): # real signature unknown; restored from __doc__\n pass", "title": "" }, { "docid": "ddf26f3fd63dfb1ddfa74b0474cc3113", "score": "0.5111908", "text": "def excludes(self, package):\n raise NotImplementedError", "title": "" }, { "docid": "975ed9a3430bb4989e102c8a7208e670", "score": "0.50944704", "text": "def derive_exclude(self):\n exclude = []\n if self.exclude:\n exclude += self.exclude\n\n return exclude", "title": "" }, { "docid": "ef1daad688a23b418c50023facc8648f", "score": "0.50816983", "text": "def no_filter(x):\n return True", "title": "" }, { "docid": "3da6e21dcf30e9357d3c41d962f4c54e", "score": "0.50760067", "text": "def not_included(self):", "title": "" }, { "docid": "9f0041cd965b311fc90b311c411b0fbe", "score": "0.50666183", "text": "def _remove_experts(self):\n self.experts = [ex for ex in self.experts if np.mean(\n ex.weight) >= self.theta]", "title": "" }, { "docid": "40e79996b50bc5c000b8626ebafe0eaf", "score": "0.5066329", "text": "def exclude_buildingblocks(self, bb_to_ex):\n\n for bb in bb_to_ex:\n if bb in sequence.interprete_dict.keys():\n element = sequence.interprete(bb)\n if element in sequence.AA:\n self.exclude_aminoacids(element)\n elif element in sequence.B:\n self.exclude_branching(element)\n elif element in sequence.CT:\n self.exclude_C_terminal(element)\n elif element in sequence.NT:\n self.exclude_N_capping(element) \n else:\n print(\"can't exclude \", bb)\n else:\n print(\"can't exclude \", bb)", "title": "" }, { "docid": "d153b29db1561e895c025656966326e2", "score": "0.50648767", "text": "def test_recorddao_not_type_list(self):\n get_several = list(self.record_dao.get_all_of_type(types=not_([\"bar\", \"spamrec\",\n \"eggrec\", \"run\"]),\n ids_only=True))\n self.assertEqual(len(get_several), 3)\n six.assertCountEqual(self, get_several,\n [\"spam3\", \"spam3ish\", \"shared_curve_set_and_matching_scalar_data\"])", "title": "" }, { "docid": "d41a04552205a46ffa5f13e50a824dc7", "score": "0.5063712", "text": "def remove_genomewide_negatives(self):\n # TODO(dk) use an attributes tag\n new_files = [\n h5_file for h5_file in self.data_files\n if \"genomewide-negatives\" not in h5_file]\n new_dataloader = H5DataLoader(\n self.data_dir, data_files=new_files, fasta=self.fasta)\n \n return new_dataloader", "title": "" }, { "docid": "bc2dcf2d207331a56edc08ee8a21949a", "score": "0.50565696", "text": "def remove_fit_type(self):\n\n self.spectrum_oned.remove_fit_type()", "title": "" }, { "docid": "48a0c338a39cd5cb33a87a5cbac94568", "score": "0.5042094", "text": "def test_parse_metrics_suppress_error(self):\n message = \"k:1|nope\"\n results = Collector(None)._parse_metrics(message)\n\n assert 0 == len(results)", "title": "" }, { "docid": "2b16d74896d8f5eda7d0c57f0e10179f", "score": "0.50405216", "text": "def test_exclude_tags(self):\n query_params = self.mocked_query_params(\"?\", OCPTagView)\n handler = OCPTagQueryHandler(query_params)\n tags = handler.get_tags()\n group_tag = None\n check_no_option = False\n exclude_vals = []\n for tag_dict in tags:\n if len(tag_dict.get(\"values\")) > len(exclude_vals):\n group_tag = tag_dict.get(\"key\")\n exclude_vals = tag_dict.get(\"values\")\n url = f\"?filter[time_scope_units]=month&filter[time_scope_value]=-1&filter[resolution]=daily&group_by[tag:{group_tag}]=*\" # noqa: E501\n query_params = self.mocked_query_params(url, OCPCostView)\n handler = OCPReportQueryHandler(query_params)\n data = handler.execute_query().get(\"data\")\n if f\"No-{group_tag}\" in str(data):\n check_no_option = True\n returned_values = []\n for date in data:\n date_list = date.get(f\"{group_tag}s\")\n for date_dict in date_list:\n tag_value = date_dict.get(group_tag)\n if tag_value not in returned_values:\n returned_values.append(tag_value)\n exclude_vals = [value for value in exclude_vals if value in returned_values]\n previous_total = handler.query_sum.get(\"cost\", {}).get(\"total\", {}).get(\"value\")\n for exclude_value in exclude_vals:\n url += f\"&exclude[tag:{group_tag}]={exclude_value}\"\n query_params = self.mocked_query_params(url, OCPCostView)\n handler = OCPReportQueryHandler(query_params)\n data = handler.execute_query()\n if check_no_option:\n self.assertIn(f\"No-{group_tag}\", str(data))\n current_total = handler.query_sum.get(\"cost\", {}).get(\"total\", {}).get(\"value\")\n self.assertLess(current_total, previous_total)\n previous_total = current_total", "title": "" }, { "docid": "36487b954aa7bc5c160488b24c5644fd", "score": "0.5039861", "text": "def filter_annotations(img_all_annotations, used_classes):\n\n img_filtered_annotations = {}\n\n # Filter the type of the objects.\n relevant_annotation_indices = [\n i for i, x in enumerate(img_all_annotations['type']) if x in used_classes\n ]\n\n for key in img_all_annotations.keys():\n img_filtered_annotations[key] = (\n img_all_annotations[key][relevant_annotation_indices])\n\n if 'dontcare' in used_classes:\n dont_care_indices = [i for i,\n x in enumerate(img_filtered_annotations['type'])\n if x == 'dontcare']\n\n # bounding box format [y_min, x_min, y_max, x_max]\n all_boxes = np.stack([img_filtered_annotations['2d_bbox_top'],\n img_filtered_annotations['2d_bbox_left'],\n img_filtered_annotations['2d_bbox_bottom'],\n img_filtered_annotations['2d_bbox_right']],\n axis=1)\n\n ious = iou(boxes1=all_boxes,\n boxes2=all_boxes[dont_care_indices])\n\n # Remove all bounding boxes that overlap with a dontcare region.\n if ious.size > 0:\n boxes_to_remove = np.amax(ious, axis=1) > 0.0\n for key in img_all_annotations.keys():\n img_filtered_annotations[key] = (\n img_filtered_annotations[key][np.logical_not(boxes_to_remove)])\n\n return img_filtered_annotations", "title": "" }, { "docid": "f85bf42e5617a2af95e647dacf9ea2f6", "score": "0.5035401", "text": "def dontfeedpet(self):\n self.mypet.fless()", "title": "" }, { "docid": "76af6ff32de5ddec3d926ad6a6112f20", "score": "0.50229496", "text": "def excludeFromDump(self):\n pass", "title": "" }, { "docid": "1dbf4b95263c7aa8c048eac30394af80", "score": "0.5022236", "text": "def consume_metric_or_not(vim_type, metric_type):\n return metric_type in METRICS_WHITE_LIST.get(vim_type, [])", "title": "" }, { "docid": "22d8516d64f741c55d0d8f468e0f9294", "score": "0.50193727", "text": "def __filter(self, inset, gtype):\n \n if gtype != 'all':\n if verbose:\n print(\"Filter type {0} in collections\".format(gtype))\n remove_list = []\n for item in inset:\n do_transact = True\n while(do_transact):\n game = self.game(name=item)\n \n if game is None:\n continue\n \n do_transact = False\n \n isexpansion = game.expansion\n \n if ((gtype == 'expansion' and not isexpansion) or\n (gtype == 'base' and isexpansion)):\n remove_list.append(item)\n\n [inset.discard(item) for item in remove_list]\n \n return inset", "title": "" }, { "docid": "9bd41eb0f69f659fd6ab820e977862b9", "score": "0.5014455", "text": "def dontcleanpet(self):\n self.mypet.cless()", "title": "" }, { "docid": "2d55fa3ea75356e69856f41d66fff71c", "score": "0.5000527", "text": "def filter_excludes(data, loci):\n ## Get all the samples to exclude. The 'or' conditional is a hack to account\n ## for the fact that paramsdict may either be an empty string or a list of\n ## names to exclude. List and \"\" don't append right, so if you do this 'or'\n ## and either of the lists is empty you'll get [\"\", \"1B_0\"]\n excludes = (data.paramsdict[\"excludes\"] or [\"\"]) \\\n + (data.paramsdict[\"outgroups\"] or [\"\"])\n LOGGER.info(\"Excluding these individuals - {}\".format(excludes))\n\n count = 0\n for i, loc in enumerate(loci):\n\n ## Get the count of excludes for logging/stats\n count += len(filter(lambda x: x[0] in excludes, loc))\n \n ## Actually filter the little buggers\n loci[i] = filter(lambda x: x[0] not in excludes, loc)\n\n LOGGER.info(\"Filterered exclude/outgroup sequences - {}\".format(count))\n return loci", "title": "" }, { "docid": "1b183754cb485727b549de8559a7744c", "score": "0.49942797", "text": "def sanitize(self):\n for attribute in ['occupations', 'energies', 'coefficients']:\n array = getattr(self, attribute)\n selection = np.where(np.isnan(array))\n array[selection] = 0.0\n\n selection = np.where(self.types == '-')\n self.types[selection] = 's'", "title": "" }, { "docid": "efe6c49e89365b7112714933f4ed408c", "score": "0.49897572", "text": "def test_parse_exclude_params_type_success(self):\n types = [\"pod\", \"storage\"]\n for tag_type in types:\n exclude_params = {\"type\": None}\n exclude_params[\"type\"] = tag_type\n serializer = OCPExcludeSerializer(data=exclude_params)\n self.assertTrue(serializer.is_valid())", "title": "" }, { "docid": "ae994cd7ca76b56644a4e91070eeb513", "score": "0.49832016", "text": "async def test_nr_of_tests_without_tests(self):\n json = dict(component=dict(measures=[]))\n response = await self.collect(self.metric, get_request_json_return_value=json)\n self.assert_measurement(\n response, value=None, total=None, parse_error=\"KeyError\", landing_url=self.tests_landing_url)", "title": "" }, { "docid": "ecf078381afbd2db4a897b871e919cdc", "score": "0.49816883", "text": "def filter_from_warnings(self): # real signature unknown; restored from __doc__\n pass", "title": "" }, { "docid": "364ddd9a8adc75c3726122bb32b52833", "score": "0.4960375", "text": "def _disable_all_quantizers(self):\n for quantsim_wrapper in self._module_to_quantsim_wrapper_dict.values():\n for quantizer in quantsim_wrapper.input_quantizers + quantsim_wrapper.output_quantizers:\n quantizer.enabled = False\n quantizer.use_symmetric_encodings = False\n for param_quantizer in quantsim_wrapper.param_quantizers.values():\n param_quantizer.enabled = False\n param_quantizer.use_symmetric_encodings = False", "title": "" }, { "docid": "01130c22a7408c646bc71806ee45158e", "score": "0.4932181", "text": "def ignore( self, other ):\r\n if isinstance( other, Suppress ):\r\n if other not in self.ignoreExprs:\r\n self.ignoreExprs.append( other )\r\n else:\r\n self.ignoreExprs.append( Suppress( other ) )\r\n return self", "title": "" }, { "docid": "6218311b95fad0f44c61d073e7ad93ab", "score": "0.49310634", "text": "def test_ignore_excluded_fields(self):\n class ExcludedFieldSerializer(serializers.ModelSerializer):\n class Meta:\n model = UniquenessTogetherModel\n fields = ('id', 'race_name',)\n serializer = ExcludedFieldSerializer()\n expected = dedent(\"\"\"\n ExcludedFieldSerializer():\n id = IntegerField(label='ID', read_only=True)\n race_name = CharField(max_length=100)\n \"\"\")\n assert repr(serializer) == expected", "title": "" }, { "docid": "018cf149195a76bbc1f6b3d389082e97", "score": "0.4927664", "text": "def remove_unwanted_tags(self):\n keep_tags = [\"artist\", \"albumartist\", \"title\", \"tracknumber\", \"album\", \"discnumber\", \"date\", \"genre\"]\n for k in (k for k in self.etags.keys() if k not in keep_tags):\n self.remove_tag(k)", "title": "" }, { "docid": "738ba8a9f8306842a3aedf065677a885", "score": "0.49266225", "text": "def getIgnored():", "title": "" }, { "docid": "acb435a6e4b40049371e55db4ffde4b4", "score": "0.4919252", "text": "def _infer_discard_events(pfx_event):\n\n inferences = []\n\n if pfx_event.has_tag(\"recurring-pfx-event\"):\n inferences.append(\n Inference(\n inference_id=\"hide-recurring-pfx-event\",\n explanation=\"recurring event; hide event\",\n suspicion_level=-1,\n confidence=100,\n labels=[LABEL_HIDE]\n )\n )\n\n if pfx_event.has_tag(\"short-prefix\"):\n inferences.append(\n Inference(\n inference_id=\"hide-short-prefix\",\n explanation=\"prefix is too large (less specific than /8); hide event\",\n suspicion_level=-1,\n confidence=100,\n labels=[LABEL_HIDE]\n )\n )\n\n # if pfx_event.has_tag(\"subpfx-moas\") or pfx_event.has_tag(\"superpfx-moas\"):\n # inferences.append(\n # Inference(\n # inference_id=\"hide-moas-caused-submoas\",\n # explanation=\"a moas event that also causes a submoas; hide\",\n # suspicion_level=-1,\n # confidence=100,\n # labels=[LABEL_HIDE]\n # )\n # )\n\n if pfx_event.has_tag(\"submoas-covered-by-moas-subpfx\") or \\\n pfx_event.has_tag(\"submoas-covered-by-moas-superpfx\"):\n inferences.append(\n Inference(\n inference_id=\"hide-submoas-covered-by-moas\",\n explanation=\"This is a MOAS in the first place\",\n suspicion_level=-1,\n confidence=100,\n labels=[LABEL_HIDE]\n )\n )\n\n if pfx_event.has_tag(\"no-newcomer\") and pfx_event.has_tag(\"less-origins\"):\n inferences.append(\n Inference(\n inference_id=\"hide-shrinking-event\",\n explanation=\"a shrinking moas event; hide event\",\n suspicion_level=-1,\n confidence=100,\n labels=[LABEL_HIDE]\n )\n )\n\n return inferences", "title": "" }, { "docid": "380613d84ca95e36c612d8ac06463e10", "score": "0.49097455", "text": "def ignore( self, other ):\r\n if isinstance( other, Suppress ):\r\n if other not in self.ignoreExprs:\r\n self.ignoreExprs.append( other.copy() )\r\n else:\r\n self.ignoreExprs.append( Suppress( other.copy() ) )\r\n return self", "title": "" }, { "docid": "0381601b43f2b3d894c458c1faf43d3b", "score": "0.4898034", "text": "def ExcludeVariants(self, type_name, variant_to_exclude):\n # Find the list of operand with type `type_name`.\n relevant_operands = filter(lambda operand: operand.type_name == type_name,\n self)\n for operand in relevant_operands:\n # Remove the intersection of the existing variants and variants we do not\n # want.\n for variant in set(operand.variants) & set(variant_to_exclude):\n operand.variants.remove(variant)", "title": "" }, { "docid": "9cbf291c2ef5c49ca1aca1603e334b6f", "score": "0.489669", "text": "def __neg__(self):\n other = None\n return ibmdbpy.aggregation.aggregate_idadf(self, \"neg\", other)", "title": "" }, { "docid": "17b841f93afe04699f9d4184ea5c6363", "score": "0.48855612", "text": "def find_cols_to_exclude(self, df):\n lst = []\n for col in df.columns:\n if (\n \"address\" in str(col)\n or \"first_name\" in str(col)\n or \"last_name\" in str(col)\n or \"username\" in str(col)\n or \"_id\" in str(col)\n or \"date\" in str(col)\n or \"time\" in str(col)\n ):\n lst.append({col: \"Considering excluding because potential PII column.\"})\n elif df[col].isnull().sum() / float(df.shape[0]) >= 0.5:\n lst.append(\n {\n col: \"Considering excluding because {}% of column is null.\".format(\n (df[col].isnull().sum() / float(df.shape[0]) * 100.0)\n )\n }\n )\n elif len(df[col].unique()) <= 1:\n lst.append(\n {\n col: \"Considering excluding because column includes only one value.\"\n }\n )\n elif df[col].dtype == \"datetime64[ns]\":\n lst.append(\n {col: \"Considering excluding because column is a timestamp.\"}\n )\n elif df[col].dtype not in [\"object\", \"bool\"]:\n if df[col].var() < 0.00001:\n lst.append(\n {\n col: \"Considering excluding because column variance is low ({})\".format(\n df[col].var()\n )\n }\n )\n elif df[col].dtype in [\"object\", \"bool\"]:\n if len(df[col].unique()) > 200:\n lst.append(\n {\n col: \"Considering excluding because object column has large number of unique values ({})\".format(\n len(df[col].unique())\n )\n }\n )\n\n [print(x) for x in lst]\n\n return lst", "title": "" }, { "docid": "3ec6d47b81deb22388d3a57727f214c5", "score": "0.48781773", "text": "def test_extra_info_rows_without_metric_source(self):\n project = domain.Project()\n violation_metric = metric.ViolationSuppressions(subject=self.__subject, project=project)\n result = violation_metric.extra_info_rows()\n\n self.assertEqual([], result)", "title": "" }, { "docid": "f183774aba8ad98ad596c57653ee4c17", "score": "0.4875181", "text": "def _not_included(self):", "title": "" }, { "docid": "814492973e0ececca690417d69dce2e2", "score": "0.48491505", "text": "def ignore( self, other ):\r\n if isinstance(other, basestring):\r\n other = Suppress(other)\r\n\r\n if isinstance( other, Suppress ):\r\n if other not in self.ignoreExprs:\r\n self.ignoreExprs.append(other)\r\n else:\r\n self.ignoreExprs.append( Suppress( other.copy() ) )\r\n return self", "title": "" }, { "docid": "73ee36b5b65cc273d266647b4bb78f20", "score": "0.48484337", "text": "def everything_except(\n excluded_types: type | tuple[type, ...],\n) -> SearchStrategy[Any]:\n if not isinstance(excluded_types, tuple):\n excluded_types = tuple([excluded_types])\n\n checked_types = []\n for excluded_type in excluded_types:\n try:\n isinstance(0, excluded_type)\n checked_types.append(excluded_type)\n except TypeError:\n continue\n\n if len(checked_types) == 0:\n return from_type(type).flatmap(from_type)\n\n return (\n from_type(type)\n .flatmap(from_type)\n .filter(lambda x: not isinstance(x, tuple(checked_types)))\n )", "title": "" }, { "docid": "3272b6b12fce94918a1fa402046c047b", "score": "0.48466238", "text": "def without_overlaps(self, other_tier):\n span_groups = self.group_spans_by_containing_span(other_tier,\n allow_partial_containment=True)\n result = []\n for span, group in span_groups:\n if len(group) == 0:\n result.append(span)\n return AnnoTier(result)", "title": "" }, { "docid": "ec3e7f89eea56d6975340fcf19fedcce", "score": "0.48459032", "text": "def filter_empty_scores(self):\n for tune in self.tunes:\n if len(tune.score) == 0:\n print(f\"No input data for {tune.title}\")\n self.tunes.remove(tune)", "title": "" }, { "docid": "8b5378059d46cf2bc79c80ca9ab68250", "score": "0.4840246", "text": "def no_gates(self):\n combined = self.trivials\n if self.gated: combined += self.gated\n return enn.FieldType(self.gspace, combined.representations)", "title": "" }, { "docid": "430b634ce8ef3a42fe670cfc93dd8cd1", "score": "0.48338276", "text": "def _remove_empty_systems(self):\n for sys in list(self.meta[\"obstypes\"].keys()):\n if sys not in self.data[\"text\"][\"system\"]:\n log.debug(f\"No observation given for GNSS {sys!r}. GNSS {sys!r} is removed from Dataset.\")\n del self.meta[\"obstypes\"][sys]", "title": "" }, { "docid": "fab0e0592808bfb721c178224ba9d6dc", "score": "0.4831567", "text": "def test_blacklist(self, insertions, genes):\n\n annotator = WindowAnnotator.from_window_size(\n genes, window_size=20000, blacklist={'ENSMUSG00000026510'})\n annotated = list(annotator.annotate(insertions))\n\n assert 'gene_name' not in annotated[0].metadata", "title": "" }, { "docid": "2c03c999258f84b00b94d990b0dc4ed5", "score": "0.48266947", "text": "def ignoreChecks(self):\r\n \r\n return self._ignoreChecks", "title": "" }, { "docid": "57f2ebfc6a54467e8e860eceba885fa0", "score": "0.48249123", "text": "def uncollect_if_test_sampler(**kwargs):\n clf_dataset_name = kwargs['rf_classifier'].name.split(\"_\")[0] # \"adult_data\" -> \"adult\"\n exp_dataset_name = kwargs['explainer'].name.split(\"_\")[1] # \"at_adult_explainer\" -> \"adult\"\n dataset_name = kwargs['dataset'].name.split(\"_\")[0] # \"adult_data\" -> \"adult\"\n\n conditions = [\n len({clf_dataset_name, exp_dataset_name, dataset_name}) != 1,\n ]\n\n return any(conditions)", "title": "" }, { "docid": "46975340c13acaedcb75a371af5c5802", "score": "0.4824228", "text": "def Exclude_Outliers_Checkbox(self):\n if self.exOutliersCheckbox.isChecked():\n self.exclude1T1Checkbox.setChecked(False)\n\n if self.gui_dict[\"gui_function\"][\"hydrationEdits\"]:\n self.Hydration_Calculator()\n else:\n pass", "title": "" }, { "docid": "83f20213edd5c233432f8ce1c5af8c8d", "score": "0.48221034", "text": "def nan_filter(self):\r\n\r\n for att_name in self.required_catalog_atts:\r\n # treat sky coordinates differently form the other arrays\r\n if att_name == \"coords\":\r\n inds = (\r\n ~np.isnan(self.coords.data.lon)\r\n & ~np.isnan(self.coords.data.lat)\r\n & ~np.isnan(self.coords.data.distance)\r\n )\r\n else:\r\n # all other attributes should be ndarrays or quantity ndarrays\r\n # in either case, they should have a dtype\r\n att = getattr(self, att_name)\r\n if np.issubdtype(att.dtype, np.number):\r\n inds = ~np.isnan(att)\r\n elif np.issubdtype(att.dtype, str):\r\n inds = att != \"\"\r\n elif att.dtype == bool:\r\n pass\r\n else:\r\n warnings.warn(\r\n f\"Cannot filter attribute {att_name} of type {att.dtype}\"\r\n )\r\n\r\n # only need to do something if there are any False in inds:\r\n if not (np.all(inds)):\r\n self.revise_lists(np.where(inds)[0])", "title": "" }, { "docid": "17a208b3eaa7def530d6a77f05b5d213", "score": "0.47985053", "text": "def test_criteria_not_split_if_no_type(self):\n criteria = {'summary': 'test', Key.REMAINING_TIME: '2',\n 'id': 'not in (1, 2, 3)'}\n self.assert_none(self.manager._split_ticket_type(criteria))", "title": "" }, { "docid": "c88b95d2763a792aad481c9c0cd66570", "score": "0.47911814", "text": "def keep_nutri_g_only(self):\n prod_list = self.result_list\n temp = [x for x in prod_list if 'nutrition_grades' in x.keys()]\n self.result_list = temp", "title": "" }, { "docid": "7eca033d9b5036bbfcec64cd93da0b58", "score": "0.47862098", "text": "def removeWeight(self, weight):\n if weight in self.weights:\n self.weights.remove(weight)\n\n for s in self.sampleList:\n if not s.isData and not s.isQCD and not s.isDiscovery:\n if weight in s.weights:\n s.removeWeight(weight)\n\n for syst in self.systDict.values():\n if syst.type == \"weight\":\n if weight in syst.high:\n syst.high.remove(weight)\n if weight in syst.low:\n syst.low.remove(weight)\n return", "title": "" }, { "docid": "f3a7a9a97d5253b5ba570a231375089c", "score": "0.47845492", "text": "def prune_samplers(self,leverage_sum):\n keys_to_del = []\n for r_id in self.sampler.keys():\n u = self.sampler[r_id]['uniform_key']\n lev_score = self.sampler[r_id]['leverage']\n lev_ratio = lev_score / (lev_score + leverage_sum)\n \n # If the uniform weight is too large then remove r_id from the sampler.\n # There is probably a better way to do this to avoid the auxiliary list\n if u > lev_ratio:\n keys_to_del.append(r_id)\n for k in keys_to_del:\n del self.sampler[k]", "title": "" }, { "docid": "92ab194e7e84250a7669ba1bfc13a0f3", "score": "0.47751665", "text": "def apply_preference_excludes(self, qs):\n if not self.show_gay_females:\n qs = qs.exclude(gender='F', sexual_preference=2)\n if not self.show_straight_males:\n qs = qs.exclude(gender='M', sexual_preference=1)\n if not self.show_straight_females:\n qs = qs.exclude(gender='F', sexual_preference=1)\n if not self.show_gay_males:\n qs = qs.exclude(gender='M', sexual_preference=2)\n if not self.show_bisexual_females:\n qs = qs.exclude(gender='F', sexual_preference=3)\n if not self.show_bisexual_males:\n qs = qs.exclude(gender='M', sexual_preference=3)\n\n return qs", "title": "" }, { "docid": "16aa201142d205e480aa2c797be485f3", "score": "0.47750944", "text": "def clearExcluded():\n excluded.clear()", "title": "" }, { "docid": "d230a20f677ce92e7a45a50d72123c5c", "score": "0.4773189", "text": "def _exclude_modules_from_quantization(model: torch.nn.Module, sim: QuantizationSimModel,\n modules_to_ignore: List[torch.nn.Module]):\n name_to_quant_wrapper_dict = {}\n for name, module in sim.model.named_modules():\n name_to_quant_wrapper_dict[name] = module\n\n module_to_name_dict = {}\n for name, module in model.named_modules():\n module_to_name_dict[module] = name\n\n quant_wrappers_to_ignore = []\n for module in modules_to_ignore:\n name = module_to_name_dict[module]\n quant_wrapper = name_to_quant_wrapper_dict[name]\n quant_wrappers_to_ignore.append(quant_wrapper)\n\n sim.exclude_layers_from_quantization(quant_wrappers_to_ignore)", "title": "" }, { "docid": "a7928aaac5977bb88aa3186764268a84", "score": "0.47711077", "text": "def exclude(self, codes, axis='x'):\n self._missingfy(codes, axis=axis, keep_base=False, inplace=True)\n return self", "title": "" }, { "docid": "42144ca224e886884da3d371eca41d3f", "score": "0.47650152", "text": "def ignored_item(item):\n return item['name'].upper() == 'SPARE' or 'x' in item['fmt']", "title": "" }, { "docid": "87f6fe07435563e586fb6be6800c5f01", "score": "0.47649622", "text": "def test_no_listing_exclusions(self):\n self.validate_class_hierarchy(TestedExclusionTypes.NoExclusions)\n self.validate_namespace_listings(TestedExclusionTypes.NoExclusions)\n self.validate_file_listings()\n self.checkAllFilesIncluded()", "title": "" }, { "docid": "3f6e5124bcb6d72f2ff66f6638f7e2e3", "score": "0.47648185", "text": "def exclude_tapebumps(tbd, data, extra):\n import numpy\n # I'm sure it doesn't matter, but add an extra 0.5 pixel to the slop because the tape bump\n # values are in terms of which pixels to include as part of the tape bump. So the edges\n # are an extra 0.5 pixel outside of that.\n extra += 0.5\n x = data['X_IMAGE']\n y = data['Y_IMAGE']\n masks = [(y>tb[0]-extra) & (x>tb[1]-extra) & (y<tb[2]+extra) & (x<tb[3]+extra) for tb in tbd]\n mask = numpy.any(masks, axis=0)\n if sum(mask) > 0:\n print ' masking %d stars for being in or near a bump'%sum(mask)\n print ' tapebumps = ',tbd\n print ' excluded x,y = ',zip(x[mask],y[mask])\n return data[numpy.logical_not(mask)]", "title": "" }, { "docid": "179c0dacd412aaeaa35cc09419ebed2b", "score": "0.4762217", "text": "def test_counts_exclude_tag_without_decisions_needed(self):\n extra_tag = factories.TagFactory.create()\n extra_tagged_trait = factories.TaggedTraitFactory.create()\n study_response = factories.StudyResponseFactory.create(status=models.StudyResponse.STATUS_DISAGREE)\n response = self.client.get(self.get_url())\n context = response.context\n self.assertIn('grouped_study_tag_counts', context)\n counts = context['grouped_study_tag_counts']\n self.assertEqual(len(counts), 1) # One study.\n study1 = counts[0]\n self.assertEqual(study1[1][0]['tt_total'], 1)\n self.assertEqual(study1[1][0]['tt_decision_required_count'], 1)\n self.assertEqual(len(study1[1]), 1) # Only one tag.", "title": "" }, { "docid": "2fccea1839cd72c65a9d473f734b2cee", "score": "0.4748887", "text": "def test_trace_blacklist():\n trace_factory.get_trace().url_patterns_to_ignore = set(('test.net', 'test2.net'))\n assert epsagon.http_filters.is_payload_collection_blacklisted('http://www.test.net')\n assert epsagon.http_filters.is_payload_collection_blacklisted('http://www.bla.test.net')\n assert not epsagon.http_filters.is_payload_collection_blacklisted('http://www.test.new.net')\n trace_factory.get_trace().url_patterns_to_ignore = set()\n assert not epsagon.http_filters.is_payload_collection_blacklisted('http://www.test.net')\n assert not epsagon.http_filters.is_payload_collection_blacklisted('http://www.bla.test.net')", "title": "" }, { "docid": "ae55291cb864b31c270404c86ad44d77", "score": "0.47418103", "text": "def test_filtered_warning(warning_fixture):\n warnings.filterwarnings(\"ignore\", category=SuspiciousUsageWarning)\n look_out = network.Bus()\n with warnings.catch_warnings(record=True) as w:\n network.Sink(outputs={look_out: \"A typo!\"})\n assert len(w) == 0", "title": "" }, { "docid": "556bc0a3d9cc4714e3a681c0635459c1", "score": "0.47374457", "text": "def clean_panel(self):\n if 'softmax' in self.args.MAB_algo and not self.explanation_type:\n expl_tuples = set()\n forbidden_groupBy = list()\n forbidden_aggregation = list()\n for expl, attributes_list in self.explanations_to_apply.items():\n if 'groupBy' in expl:\n if 'remove' in expl:\n forbidden_groupBy = attributes_list\n self.panel.groupBy = [att for att in self.panel.groupBy if att not in attributes_list]\n if 'softmax' in self.args.MAB_algo and not self.explanation_type:\n expl_tuples.add(('-', 'groupBy'))\n else:\n self.panel.groupBy += attributes_list\n if 'softmax' in self.args.MAB_algo and not self.explanation_type:\n expl_tuples.add(('+', 'groupBy'))\n elif 'aggregation' in expl:\n if 'remove' in expl:\n forbidden_aggregation = attributes_list\n for att in attributes_list:\n del self.panel.aggregates[att]\n if 'softmax' in self.args.MAB_algo and not self.explanation_type:\n expl_tuples.add(('-', 'aggregation'))\n else:\n for agg_att in self.explanations_to_apply['aggregation_to_add']:\n functions = choose_functions(agg_att)\n # if 'sum' in functions and len(functions) > 1:\n # functions.remove('sum')\n self.panel.aggregates[agg_att] = functions\n if 'softmax' in self.args.MAB_algo and not self.explanation_type:\n expl_tuples.add(('+', 'aggregation'))\n else:\n for agg_att, functions in attributes_list.items():\n for func in functions:\n if func in self.panel.aggregates[agg_att]:\n self.panel.aggregates[agg_att].remove(func)\n if 'softmax' in self.args.MAB_algo and not self.explanation_type:\n expl_tuples.add(('-', func))\n else:\n self.panel.aggregates[agg_att].append(func)\n if 'softmax' in self.args.MAB_algo and not self.explanation_type:\n expl_tuples.add(('+', func))\n self.panel.attributes_to_vector()\n self.ranking.calculate_ranking('groupBy', self.panel, self.diversity, forbidden_groupBy)\n self.ranking.calculate_ranking('aggregation', self.panel, self.diversity, forbidden_aggregation)\n if 'softmax' in self.args.MAB_algo:\n if self.explanation_type: # if MAB iteration\n self.applied_explanations = [self.explanation_type]\n self.explanation_type = None\n else: # if user_explanation iteration\n self.applied_explanations = [expl for expl in self.explanations if (expl.remove_or_add, expl.dimension) in expl_tuples]\n \n return forbidden_groupBy, forbidden_aggregation", "title": "" }, { "docid": "e4fcc8f7d847e3f4936693b218c265d1", "score": "0.472435", "text": "def rule_nonsoundness_from_specialization_nonsoundness(\r\n general: InferenceRule, specialization: InferenceRule, model: Model) \\\r\n -> Model:\r\n assert specialization.is_specialization_of(general)\r\n assert not evaluate_inference(specialization, model)\r\n # Task 4.9\r", "title": "" }, { "docid": "acee3152e400f897b585b9e280bc7e8b", "score": "0.47158518", "text": "def _remove_empty_images(self):\n non_empty_images = set()\n for obj in self.obj_annotations.values():\n non_empty_images.add(obj[\"image_id\"])\n self.samples = [sample for sample in self.samples if sample in non_empty_images]", "title": "" }, { "docid": "07fa013c756a9ea29a8cdc21a7a88baf", "score": "0.47128284", "text": "def filter_out_vulnerability(self, vuln):\n return vuln", "title": "" }, { "docid": "335e6289d2ab513e8835877618400e82", "score": "0.47122744", "text": "def __filter_lfhu__(self):\n self.__filter_lfh__()\n self.__filter_unknown__()", "title": "" }, { "docid": "5c10c11c2a4e2c104d5b66c885bd3fc9", "score": "0.47089565", "text": "def ignore(self, other):\n if isinstance(other, basestring):\n other = Suppress(other)\n\n if isinstance(other, Suppress):\n if other not in self.ignoreExprs:\n self.ignoreExprs.append(other)\n else:\n self.ignoreExprs.append(Suppress(other.copy()))\n return self", "title": "" }, { "docid": "a398686b3283ba34dcb26a99ea1fa361", "score": "0.47047096", "text": "def remove_grains_not_in_map(self):\n _,not_in_map,_ = self.compute_grains_map_table_intersection()\n self.remove_grains_from_table(not_in_map)\n return", "title": "" }, { "docid": "7a8242277795fe00f0242837bfd7c956", "score": "0.47033793", "text": "def is_area_ignored(data):\r\n x, y, screen = data[\"x\"], data[\"y\"], data[\"display\"]\r\n if screen in self.rois and not any(in_area((x, y), a) for a in self.rois[screen]) \\\r\n or screen in self.rods and any(in_area((x, y), a) for a in self.rods[screen]):\r\n return True\r\n return False", "title": "" }, { "docid": "eafe966ad1acdca539fff5de5905236b", "score": "0.47027546", "text": "def test_miss_shot_type():\n with session_scope() as session:\n all_shot_types = session.query(distinct(Miss.shot_type)).all()\n all_shot_types = [shot_type for (shot_type,) in all_shot_types]\n for shot_type in all_shot_types:\n # null is an acceptable shot type, too\n if shot_type is None:\n continue\n assert shot_type in VALID_SHOT_TYPES", "title": "" }, { "docid": "f0fe44b23ad4a353c3ad0c4206567150", "score": "0.47023416", "text": "def _collect_db_metrics(self, whitelist):\n database_type = self._cfg['databaseConfiguration']['databaseType']\n if database_type == 'influxdb':\n pass\n elif database_type == 'vertica':\n self._add_vertica_metrics(whitelist)\n else:\n log.warn('Failed finding database type in %s', self.CONFIG_FILE)", "title": "" }, { "docid": "b0725a63addc8c08d6ed9712e26ecd3c", "score": "0.469623", "text": "def testNotUgly(self):\n for nonugly in self.nonUglys:\n result = UglyNumbers.is_ugly(nonugly)\n self.assertFalse(result)", "title": "" }, { "docid": "26228e9a289f2f9e0e01f2c2e7e232d7", "score": "0.46953133", "text": "def test_subfilter_invalid_ignore(db_session):\n query_builder = ModelResourceQueryBuilder()\n query = db_session.query(Album)\n subfilters = {\n \"tracks\": SubfilterInfo(\n filters={\"track_id\": {\"$bad\": 5}}\n )\n }\n query = query_builder.apply_subquery_loads(\n query=query,\n resource=AlbumResource(\n session=db_session),\n subfilters=subfilters,\n embeds=[],\n dialect_override=False,\n strict=False\n )\n result = query.all()\n assert len(result) > 0", "title": "" }, { "docid": "f03f7712dfe296d204b1f5e98db9abff", "score": "0.4692336", "text": "def removeWeight(self, weight):\n if weight in self.weights:\n self.weights.remove(weight)\n for syst in self.systDict.values():\n if syst.type == \"weight\":\n if weight in syst.high:\n syst.high.remove(weight)\n if weight in syst.low:\n syst.low.remove(weight)\n return", "title": "" }, { "docid": "5350acd980e546f17a3d85ddea805222", "score": "0.46903497", "text": "def test_no_blacklist(self, data):\n # pylint: disable=no-value-for-parameter\n data.draw(data.draw(dbus_signature_strategy()))", "title": "" }, { "docid": "544966776092db7b3f1fc11143b1efb0", "score": "0.46798372", "text": "def summary_only():", "title": "" }, { "docid": "2dbb7080650fbc4337940cc8bbe36130", "score": "0.46749455", "text": "def noncreatures(self):\n if self.api_type == 'scryfall':\n return self.where(type_line='creature', invert=True)\n else:\n return self.where(type=['creature'], invert=True)", "title": "" } ]
bd8e878260d0ab711a53aff554de6151
Selects a piece when it is clicked.
[ { "docid": "2075c1dd5a97e285e31d5632515370a7", "score": "0.61129594", "text": "def handleMouseRelease(self, event):\n \n # if the piece is already selected when it is clicked, it deselects it\n # and changes its border color back\n if self._selected:\n self.changeColorBack()\n self._board.deselect()\n self._selected = False\n return\n \n # returns if there is already a piece selected\n if self._board.isSelected():\n return\n \n # if the piece is active, it activates it and tells the board that there\n # is a piece selected\n if self._active:\n self._board.select()\n self._selected = True\n self._back.setBorderColor('black')\n self._board.report(self)\n else:\n return", "title": "" } ]
[ { "docid": "219190222d15d19ce3881bf63f04aa43", "score": "0.7438641", "text": "def click(self, event) -> None:\n colsize = rowsize = 64\n\n # The canvas is 512 by 512.\n # But our board is 8 by 8\n # This converts our clicked position to what it is on the board.\n # (int floors the result of the division (quotient, I know))\n current_row = int(event.x / colsize)\n current_col = int(event.y / rowsize)\n\n print(\"You clicked {}, {}\".format(current_col, current_row))\n \n clicked_position = [current_col, current_row]\n piece = self.selected_piece\n piece_position = self.selected_piece.pos\n \n # If we already have a piece selected.\n if piece is not self.board.nothing:\n # move the piece\n self.move(piece_position, clicked_position)\n # deselect\n self.selected_piece = self.board.nothing\n # highlight nothing\n self.highlighted = []\n # refresh the board\n self.refresh()\n # draw the pieces (as they may have moved)\n self.draw_pieces()\n # If we have no selected piece.\n else:\n # select the piece and highlight the appropriate squares\n self.highlight(clicked_position)\n # refresh the board\n self.refresh()\n # no need to redraw pieces as they can't have moved.", "title": "" }, { "docid": "02b6df11cd6cd06eae889fa03523d07b", "score": "0.7349623", "text": "def select_piece(self):\n color = self.board.turn_color\n for x, rows in enumerate(self.buttons):\n for y, button in enumerate(rows):\n piece = self.board.find_piece(Position(x, y))\n if piece.color == color and \\\n piece.moves != [] and \\\n piece.moves != None:\n func = partial(self.show_moves, piece)\n button.configure(\n command=func\n )", "title": "" }, { "docid": "da6bad7f46a2feeb8e675c41104b0df8", "score": "0.71844727", "text": "def Select(self, row, col):\n piece = self.board.GetPiece(row, col)\n \n # If the selected piece is clicked again, it is deselected.\n if self.selectedPiece and self.selectedPiece == piece:\n self.selectedPiece.ToggleSelect()\n self.selectedPiece = None\n # Reset the valid moves because nothing is selected.\n self.validMoves = {}\n else:\n # If nothing is selected, then try to select the clicked piece.\n if not self.selectedPiece:\n # Can not select a piece at an empty location or the opposite player's\n # piece.\n if not piece or piece.color != self.turn:\n return\n\n # Selecting the clicked piece and calculating its valid moves.\n self.selectedPiece = piece\n self.selectedPiece.ToggleSelect()\n self.validMoves = self.board.ValidMoves(self.selectedPiece)\n # A piece is already selected. Try to move the selected piece to the location\n # which is currently clicked.\n else:\n # Can only move to an empty location and that too when it is one of the\n # valid moves.\n if not piece:\n if (row, col) in self.validMoves:\n self._move(row, col)\n self.selectedPiece.ToggleSelect()\n self.ChangePlayer() \n\n # If the same player's piece is selected again, then deselect the previous\n # piece and select this one.\n elif piece.color == self.turn:\n self.selectedPiece.ToggleSelect()\n self.selectedPiece = piece\n self.selectedPiece.ToggleSelect()\n self.validMoves = self.board.ValidMoves(self.selectedPiece)", "title": "" }, { "docid": "f0805d8eb3237d8b0c18eab3741ea17b", "score": "0.6904481", "text": "def clicked_selector(self,event):\n for plant in self.selector.plants:\n if plant.rect.collidepoint(event.pos) and plant.ready:\n if self.energy >= plant.cost:\n self.selector.select_plant(plant)\n self.plant_cursor = plant.image.copy()\n return True\n self.selector.selected = None", "title": "" }, { "docid": "ff4d275c1abc6d19c41eefb7d9204911", "score": "0.68031174", "text": "def select(group, x, y):\n for tower in group.get(): # get one tower each loop\n if tower.is_clicked(x, y): # detect if tower.is_clicked\n tower.get_selected(True)\n else:\n tower.get_selected(False)", "title": "" }, { "docid": "823c727c2cece0bb51e9257b9cf60e8c", "score": "0.6801079", "text": "def selected_piece(self, piece):\n\n\t\treturn Checkerpiece(piece.color, piece.x, piece.y)", "title": "" }, { "docid": "fa3d98875ea62c72c8c49f0f36b8fe79", "score": "0.67442137", "text": "def select_element(self):\n logging.debug(\"element selected\")\n if len(self._contents) > 0:\n self.to_background()\n self._contents[self.pointer][1]()\n self.to_foreground()\n if self.path_chosen:\n self.deactivate()\n else:\n self.to_foreground()", "title": "" }, { "docid": "f8abe2256810d4235e1bb73e694ba651", "score": "0.661268", "text": "def _start_selecting(self, event):\n self._selecting = True\n canvas = self._canvas\n x = canvas.canvasx(event.x)\n y = canvas.canvasy(event.y)\n self._sstart = (x, y)\n if not self._sobject:\n self._sobject = canvas.create_rectangle(\n self._sstart[0], self._sstart[1], x, y,\n dash=(3,5), outline='#0000ff'\n )\n canvas.itemconfigure(self._sobject, state=tk.NORMAL)", "title": "" }, { "docid": "dc51ab438da67792625f8871cc4865ec", "score": "0.66075176", "text": "def select_point(self, x):", "title": "" }, { "docid": "4a88502654d2c88f19cefb6f28dc94d5", "score": "0.65724677", "text": "def select_tile(self, event):\n height = len(self.tiles) // 3\n # Obtain coordinates of the selected tile\n tile_x = int(event.x / 64)\n tile_y = int(self.canvas.yview()[0] * height + event.y / 64)\n\n # Ensure selection is within bounds\n if tile_x < 0 or tile_y < 0:\n return\n elif tile_x > 3 or tile_y > height:\n return\n\n # Ensure selection is a valid tile\n if tile_y * 3 + tile_x >= len(self.tiles):\n return\n\n # Obtain the selected tile\n self.selected_index.set(tile_y * 3 + tile_x)\n\n # Redraw the selection box\n self.canvas.delete(\"selection_box\")\n self.canvas.create_rectangle((tile_x * 64, tile_y * 64, tile_x * 64 + 64, tile_y * 64 + 64),\n fill=\"blue\",\n outline=\"blue\",\n width=2,\n stipple=\"gray50\",\n tag=\"selection_box\")", "title": "" }, { "docid": "411ec01b0f78d4d2815f6740ddb0d563", "score": "0.6495575", "text": "def on_mouse_press(self, x, y, button, key_modifiers):\r\n if self.display_state == GAME_STARTED:\r\n\r\n pieces = arcade.get_sprites_at_point((x,y), self.board.pieces)\r\n if len(pieces) > 0 and pieces[0].type == self.turn:\r\n #draws lines\r\n self.board.get_valid_moves(pieces[0])\r\n self.state = SELECTED\r\n return\r\n \r\n if self.state == SELECTED:\r\n #piece selected by user\r\n tiles = arcade.get_sprites_at_point((x,y), self.tile_list)\r\n if len(tiles) > 0:\r\n #returns a boolean\r\n init_x = self.board.selected_piece.pos_x\r\n init_y = self.board.selected_piece.pos_y\r\n valid_move = self.board.move_piece(init_x, init_y, tiles[0].pos_x, tiles[0].pos_y, self.turn)\r\n if valid_move:\r\n self.state = not SELECTED\r\n self.winner = self.board.check_end_state(self.turn)\r\n self.turn *= -1\r\n\r\n #disable controls\r\n if self.winner != NONE:\r\n self.turn = NONE\r\n elif self.game_type == HUMAN_V_AI and self.turn == WHITE:\r\n self.handle_ai_move(init_x, init_y, tiles[0].pos_x, tiles[0].pos_y, self.turn)\r\n else:\r\n self.error_message = \"INVALID MOVE\"\r\n \r\n return\r\n elif self.display_state == SELECT_SIZE:\r\n click = arcade.get_sprites_at_point((x,y), self.select_size)\r\n if len(click) > 0:\r\n if click[0].center_y == SCREEN_HEIGHT * 0.75:\r\n self.start_game(6)\r\n else:\r\n self.start_game(8)\r\n elif self.display_state == SELECT_TYPE:\r\n click = arcade.get_sprites_at_point((x,y), self.select_size)\r\n if len(click) > 0:\r\n if click[0].center_y == SCREEN_HEIGHT * 0.75:\r\n self.game_type = HUMAN_V_HUMAN\r\n else:\r\n self.game_type = HUMAN_V_AI\r\n self.display_state = SELECT_SIZE", "title": "" }, { "docid": "b1d96aa54fdc77aca5d4fff61ea5f142", "score": "0.6465317", "text": "def _click(self, event=None):\n parent_name = event.widget.winfo_parent().split('.')\n\n self.sel_ind = None\n self.selected_row = None\n\n counts = 0\n _children = self.list_canvas.winfo_children()\n if self.selected_w is not None:\n try:\n self.selected_w.configure(relief='', borderwidth=0)\n _shade(self.selected_w.winfo_children())\n self.selected_w = None\n except Exception:\n pass\n\n for win_ in _children:\n w_name = win_.winfo_name()\n if parent_name[len(parent_name)-1] == w_name:\n self._select(win_)\n self.sel_ind = counts\n\n else:\n if counts == len(_children):\n self.selected_w = None\n counts = counts + 1", "title": "" }, { "docid": "b9fbdbf5a8befc725f8ec1581792defd", "score": "0.64467895", "text": "def TargetFromSelection(self):", "title": "" }, { "docid": "d9cc220de1c25c9253ad81f4fc44e890", "score": "0.644065", "text": "def get_piece_click(self, event):\n if self.DEBUG_PRINT_FUNCTIONS:\n pass;\n print \"got_piece_click\"\n if self.piece != None:\n self.c.itemconfig(self.piece_square, outline=\"green\", width=1)\n try:\n self.piece_square, self.piece = self.c.find_overlapping(event.x, event.y, event.x, event.y)\n print \"self.piece_square, self.piece: \", self.c.find_overlapping(event.x, event.y, event.x, event.y)\n except ValueError:\n return\n self.got_piece = 1\n\n if self.check_piece(): # positive numbers are failure, for check_piece\n self.piece_square = None\n self.piece = None\n self.got_piece = 0\n else:\n self.c.itemconfig(self.piece_square, outline=\"blue\", width=3)", "title": "" }, { "docid": "5f141e99244d7a0463fbb76054c5da08", "score": "0.6436179", "text": "def select_tile(self, event):\n # Obtain coordinates of the selected tile\n tile_x = int(event.x / 64)\n tile_y = int(self.selection_canvas.yview()[0] * self.selection_height + event.y / 64)\n\n # Ensure selection is within bounds\n if tile_x < 0 or tile_y < 0:\n return\n elif tile_x > 16 or tile_y > self.selection_height:\n return\n\n # Ensure selection is a valid tile\n if tile_y * 16 + tile_x >= len(self.selection_translator):\n self.selected_tile.set(-1)\n else:\n # Obtain the selected tile\n self.selected_tile.set(self.selection_translator[tile_y * 16 + tile_x])\n\n # Redraw the selection box\n self.selection_canvas.delete(\"selection_box\")\n self.selection_canvas.create_rectangle((tile_x * 64, tile_y * 64, tile_x * 64 + 64, tile_y * 64 + 64),\n fill=\"orange\",\n outline=\"orange\",\n width=2,\n stipple=\"gray50\",\n tag=\"selection_box\")\n\n # Update the currently open group pane\n if self.selected_group.get() != \"Select Group\":\n self.group_panes[self.selected_group.get()].set_tile(self.selected_tile.get())", "title": "" }, { "docid": "92a77133ddb1050993c187dd30e7f27c", "score": "0.64117575", "text": "def selectCell(self, event):\n x, y = event.x, event.y\n rows, cols = self.addPieceMenu.rows, self.addPieceMenu.cols\n mw, sc = self.addPieceMenu.marginWidth, self.addPieceMenu.cellSize\n if mw < x < mw + cols * sc and mw < y < mw + rows * sc:\n j = int(x - self.addPieceMenu.marginWidth) / \\\n self.addPieceMenu.cellSize\n i = int(y - self.addPieceMenu.marginWidth) / \\\n self.addPieceMenu.cellSize\n self.addPieceMenu.itemconfigure(self.addPieceMenu.cells[i][j], \\\n fill = self.addPieceMenu.emptyColor if \\\n self.addPieceMenu.shape[i][j] else self.addPieceMenu.color)\n self.addPieceMenu.shape[i][j] = not self.addPieceMenu.shape[i][j]", "title": "" }, { "docid": "376aa8f0a645738b7753a01c9667b0ee", "score": "0.6405179", "text": "def clickOn(self, selectElt):\n selectElt.super().click()", "title": "" }, { "docid": "2a1063e8980a117c7af1cf338314bc72", "score": "0.63924015", "text": "def selectVertex(self, selectable):\n if selectable == self.primarySelected: # Clears all vertices if original is reclicked\n self.unselectPrimary()\n\n elif self.primarySelected is not None: # Checks if original already clicked\n\n if self.secondarySelected is None: # What to do if there is no secondarySelected\n self.selectSecondary(selectable)\n\n elif selectable == self.secondarySelected: # If already primarySelected, unselect\n self.unselectSecondary()\n\n else: # If not primarySelected, unselect\n self.unselectSecondary()\n self.selectSecondary(selectable)\n\n else:\n self.primarySelected = selectable\n selectable.setColor(selectable.selectedColor)\n\n pygame.display.update()", "title": "" }, { "docid": "70eaf9095010df57b83ff7fb05153e2d", "score": "0.6370214", "text": "def selected(self, sel):\r\n pass", "title": "" }, { "docid": "5659d54d14953c1e374a68702a4f695f", "score": "0.6364428", "text": "def report(self, piece):\n self._selectedpiece = piece", "title": "" }, { "docid": "a6c9fba08d3cc37b2eef2d52cd0a120c", "score": "0.63300836", "text": "def clicked(self) -> None:\r\n self.selected = True\r\n self.color1 = COLOURS['yellow']\r\n for neighbour in self.neighbours:\r\n neighbour.selected = False\r\n neighbour.color1 = THECOLORS['white']", "title": "" }, { "docid": "3052f210deda2fef3e81cc92de42b645", "score": "0.6323697", "text": "def get_user_click(self):\n x, y = pygame.mouse.get_pos()\n # Determine if click is:\n # On bottom menu\n if y > 600:\n pass\n # On right side menu\n elif x > 600:\n pass\n # If on board:\n else:\n # Convert coordinates into space\n selected_space = self.convert_coordinates_to_space(x, y)\n # If piece is not already selected:\n if not self.curr_selected_piece:\n\n # Validate and set curr_selected_piece to this piece\n if self.is_piece_of_curr_player(selected_space):\n self.new_piece_selected(selected_space)\n\n # Else if piece already selected:\n else:\n # Determine if selected space is in possible moves\n\n # If space is current selected space\n if selected_space == self.curr_selected_piece.position:\n self.deselect_piece()\n\n # Else if space in possible moves:\n elif selected_space in self.curr_poss_moves:\n #### Check if piece is a king!!! ###\n # Check if selected space is king and in poss_castle_move\n if self.curr_selected_piece.name == 'King' and selected_space in self.chess_board.get_castle_moves_for_curr_player():\n # Castle that king\n self.add_move(self.curr_selected_piece.position, selected_space)\n self.chess_board.castle_king(self.curr_selected_piece, selected_space)\n\n else:\n # Move selected piece to this spot\n self.add_move(self.curr_selected_piece.position, selected_space)\n self.move_piece(self.curr_selected_piece, selected_space)\n\n if self.curr_selected_piece.name == 'Pawn' and selected_space[1] == 0 or selected_space[1] == 7:\n self.chess_board.board[selected_space[0]][selected_space[1]] = None\n self.chess_board.board[selected_space[0]][selected_space[1]] = Queen(self.chess_board.curr_player, selected_space)\n\n # Deselect current piece and remove poss moves\n self.deselect_piece()\n # Change current player\n self.change_curr_player()\n\n # Check for checkmate and get new list of all possible moves\n self.all_poss_moves = self.get_all_poss_moves()\n checkmate = True\n for piece_pos in self.all_poss_moves:\n if len(self.all_poss_moves[piece_pos]) != 0:\n checkmate = False\n if checkmate:\n self.draw_window()\n self.message_display('Checkmate!', (400, 300))\n winner = 'White' if self.chess_board.curr_player == 'b' else 'Black'\n self.message_display('%s wins!' % winner, (400, 400))\n pygame.display.update()\n time.sleep(2)\n quit()\n\n # Else if another piece of curr player:\n elif selected_space in [piece.position for piece in self.chess_board.get_curr_player_pieces()]:\n # Make that piece current selected piece\n self.new_piece_selected(selected_space)\n\n # Else (random non-selectable space):\n else:\n # Deselect current move\n self.deselect_piece()", "title": "" }, { "docid": "4cca87d6618a54c69462b0527c93274e", "score": "0.63132703", "text": "def click(self):\r\n self.clicked = True\r\n self.setBorder(YELLOW, 3)\r\n self.board.possibleMoves(self.number)", "title": "" }, { "docid": "a797c5d8b16318dc3f1439d9b308ab52", "score": "0.6306219", "text": "def __click_cell(self, event):\r\n x, y = event.x, event.y\r\n # The click must be inside the grid\r\n if margin < x < width - margin and margin < y < height - margin:\r\n self.canvas.focus_set()\r\n # Computes line and columns in fuction of the click\r\n row, col = int((y - margin) / side), int((x - margin) / side)\r\n # If it is already clicked, we deselect it\r\n if self.original[row, col] is None:\r\n self.row, self.col = row, col\r\n # Highlights the selected case\r\n self.__selection_click()", "title": "" }, { "docid": "4e5a835b62ed8b37ea90566c7ced8c18", "score": "0.63021004", "text": "def select_move(self, piece: Piece, position):\n self.reset_buttons()\n self.board.recalculate(piece, position, self.ask_promotion_type)\n self.board.delete_self_check()\n self.board.turn_counter += 1\n self.board.turn_color = int(not self.board.turn_color)\n self.draw()\n if self.board.check == True:\n self.board.check_mate = \\\n self.board.ischeckmate(self.board.turn_color)\n if self.board.check_mate == True:\n self.test_frame.configure(\n text='Check mate!'\n )\n else:\n self.test_frame.configure(text='Check!')\n else:\n message = Gui.turn_color_dict[self.board.turn_color] + \\\n ', it\\'s your turn'\n self.test_frame.configure(text=message)\n self.select_piece()", "title": "" }, { "docid": "6595462cae7b5c772cc68ce53c5065e0", "score": "0.63006", "text": "def MouseClicked(self, item):", "title": "" }, { "docid": "f32ff8057806554b123e594d8b924cd1", "score": "0.62851626", "text": "def click_event(self, event):\n self.start_x, self.start_y = self.event_to_coords(event)\n self.selected_image = event.widget.find_withtag('current')[0]", "title": "" }, { "docid": "28400b805c2fbaa82f1a4c090de3b532", "score": "0.62847763", "text": "def select(self):\n self.selected = True", "title": "" }, { "docid": "83ecd59a64cba17b236450f0336bdeab", "score": "0.6266126", "text": "def OnLeftClick(self, x, y, keys=0, attachment=0):\n shape = self.GetShape()\n print(shape.__class__, shape.GetClassName(), shape.a)\n canvas = shape.GetCanvas()\n dc = wx.ClientDC(canvas)\n canvas.PrepareDC(dc)\n\n if shape.Selected():\n shape.Select(False, dc)\n canvas.Redraw(dc)\n else:\n redraw = False\n shapeList = canvas.GetDiagram().GetShapeList()\n toUnselect = []\n for s in shapeList:\n if s.Selected():\n toUnselect.append(s)\n\n shape.Select(True, dc)\n\n if toUnselect:\n for s in toUnselect:\n s.Select(False, dc)\n canvas.Redraw(dc)", "title": "" }, { "docid": "70ddfce6097904384b3370e51aabcd89", "score": "0.6263467", "text": "def mouseClicked(self, e):", "title": "" }, { "docid": "e25af26163835ad4461c055ad3b4f839", "score": "0.6262412", "text": "def highlight_selected(self, coordinates):\n\n\t\tself.pieces[coordinates] = Checkerpiece(\"yellow\", *coordinates)", "title": "" }, { "docid": "30253a3ceaee7e3c77624931d8963159", "score": "0.62505174", "text": "def selectLine(lineSelected,lineToSelect):", "title": "" }, { "docid": "9b0d355e89bbea5a6947ee5cb480398b", "score": "0.62486845", "text": "def selection_clicked(self, event):\n # return if in not in selection mode \n if self._active != 'SELECT': return\n\n # If we're already in the middle of a selection, pressing another\n # button works to \"cancel\" \n if self._ids_selection != []:\n for selection_id in self._ids_selection:\n self.canvas.mpl_disconnect(selection_id)\n self.release(event)\n self.draw()\n self._selectionStart = None\n self._button_pressed = None\n self._ids_selection = []\n return\n\n if event.button == 1:\n self._button_pressed = 1\n elif event.button == 3:\n self._button_pressed = 3\n else:\n self._button_pressed = None\n return\n\n self._selectionStart = []\n \n x, y = event.x, event.y\n for a in self.canvas.figure.get_axes():\n if (x is not None and y is not None and a.in_axes(event)):\n self._selectionStart.append((x, y, a))\n\n if self.centeredSelection:\n id1 = self.canvas.mpl_connect('motion_notify_event', \n self.selection_move_centered)\n else:\n id1 = self.canvas.mpl_connect('motion_notify_event', \n self.selection_move)\n\n self._ids_selection = [id1]\n\n self.press(event)", "title": "" }, { "docid": "2c47939f2974804247ad6fc3319574e4", "score": "0.62416637", "text": "def SetSelection(self, selection):", "title": "" }, { "docid": "2c47939f2974804247ad6fc3319574e4", "score": "0.62416637", "text": "def SetSelection(self, selection):", "title": "" }, { "docid": "e60d5de21e55aecfb7c8eb2e95b110fb", "score": "0.6236269", "text": "def select(self): \n if self.point in range(len(self.playlist)):\n if self.playlist[self.point] in self.selectedSongs: \n try:\n self.selected = self.selectedSongs.index(self.playlist[self.point])\n self.Ui.AudioTrack.topLevelItem(self.selected).setSelected(True)\n except:\n self.selected = None\n if not self.fetch:\n try:\n self.Playlist_Window.Playlist.topLevelItem(self.point).setSelected(True)\n except:\n pass", "title": "" }, { "docid": "f5c8db2f1e0c190762a90cb494846dd0", "score": "0.62336105", "text": "def __selection_click(self):\r\n # Cleares the previously selected cell\r\n self.canvas.delete(\"cursor\")\r\n if self.row >= 0 and self.col >= 0:\r\n x0 = margin + self.col * side + 1\r\n y0 = margin + self.row * side + 1\r\n x1 = margin + (self.col + 1) * side - 1\r\n y1 = margin + (self.row + 1) * side - 1\r\n # Creates the rectangle\r\n self.canvas.create_rectangle(x0, y0, x1, y1, outline=\"#00bfff\", tags=\"cursor\")", "title": "" }, { "docid": "675d9152fa97870cab797137812ede8f", "score": "0.6225665", "text": "def get_piece_click(self, event):\n if self.DEBUG_PRINT_FUNCTIONS:\n pass; print \"got_piece_click\"\n if self.piece != None:\n self.c.itemconfig(self.piece_square, outline=\"black\", width=1)\n try:\n self.piece_square, self.piece=self.c.find_overlapping(event.x, event.y, event.x, event.y)\n except ValueError:\n return 0\n self.got_piece=1\n \n if self.check_piece(): #positive numbers are failure, for check_piece\n self.piece_square=None\n self.piece=None\n self.got_piece=0\n else:\n self.c.itemconfig(self.piece_square, outline=\"blue\", width=3)", "title": "" }, { "docid": "2775250d736502e8e784d4f739f38f6d", "score": "0.62237567", "text": "def play(self):\n\n\t\t# Variables for the piece selected and the turn counter.\n\n\t\tself.selected = None\n\t\tself.turn = 0\n\n\t\t# Infinite loop.\n\n\t\twhile True:\n\n\t\t\t# Calculate whose turn it is.\n\n\t\t\tif self.turn % 2 == 0:\n\t\t\t\tcolor = \"white\"\n\t\t\telse:\n\t\t\t\tcolor = \"red\"\n\n\t\t\t# Handle events.\n\n\t\t\tfor event in pygame.event.get():\n\n\t\t\t\t# If the user is pressing ESCAPE, exit the game.\n\n\t\t\t\tif event.type == pygame.KEYDOWN and event.key == pygame.K_ESCAPE:\n\t\t\t\t\tsys.exit(0)\n\n\t\t\t\t# If the user is clicking their mouse... (this is where you start coding!)\n\n\t\t\t\telif event.type == pygame.MOUSEBUTTONDOWN:\n\n\t\t\t\t\t########## Your code starts here. ##########\n\t\t\t\t\t# Remember to consult the manual for help! #\n\t\t\t\t\t############################################\n\n\t\t\t\t\t# We need to find out which tile the user clicked... we need to get the coordinates.\n\n\t\t\t\t\tcoordinates = self.get_coordinates(event)\n\n\t\t\t\t\t# If the player selected their own non-highlighted piece...\n\t\t\t\t\t# (Remember to give valid inputs, see the manual!)\n\n\t\t\t\t\tif self.player_selected_their_own_piece(color, coordinates):\n\n\t\t\t\t\t\t# The current piece is the piece at the coordinate.\n\n\t\t\t\t\t\tpiece = self.pieces[coordinates]\n\n\t\t\t\t\t\t# Set self.selected to a copy of the selected piece.\n\n\t\t\t\t\t\tself.selected = self.selected_piece(piece)\n\n\t\t\t\t\t\t# Highlight the selected piece.\n\t\t\t\t\t\t# (Remember to give valid inputs, see the manual!)\n\n\t\t\t\t\t\tself.highlight_selected(coordinates)\n\n\t\t\t\t\t# If the player selected their own highted piece...\n\t\t\t\t\t# (Remember to give valid inputs, see the manual!)\n\n\t\t\t\t\telif self.player_selected_their_own_highlighted_piece(color, coordinates):\n\n\t\t\t\t\t\t# Unhighlight the piece.\n\n\t\t\t\t\t\tself.unhighlight_selected(coordinates)\n\n\t\t\t\t\t# Otherwise if a piece is highlighted and the player selects another tile...\n\n\t\t\t\t\telif self.piece_highlighted_and_player_selects_another_tile(color, coordinates):\n\n\t\t\t\t\t\t# If a piece move is successful...\n\n\t\t\t\t\t\tif self.move_highlighted_piece_to_was_successful(coordinates) == True:\n\n\t\t\t\t\t\t\t# Increment turns by 1.\n\n\t\t\t\t\t\t\tself.turn += 1\n\n\t\t\t\t\t################ Your code ends here. ######################\n\t\t\t\t\t# Good job! You're all done. You should run your code now. #\n\t\t\t\t\t############################################################\n\n\t\t\t# Show the display.\n\n\t\t\tself.show_display()", "title": "" }, { "docid": "41685a58706ec94da62059cf1102a821", "score": "0.6205957", "text": "def getClick(event, x, y, _, __):\n global _selectedPoints\n global _currentImage\n\n if event == cv2.EVENT_LBUTTONDOWN:\n _selectedPoints.append((float(x), float(y)))\n cv2.circle(_currentImage, (x, y), 5, (0, 255, 0))\n cv2.imshow(_getPointsWindowName, _currentImage)", "title": "" }, { "docid": "a1a91afc8eb93a3c70cc61ff60385b22", "score": "0.61956036", "text": "def select(self, color):\n\n self.selected = color\n\n if color: self.pan_to_color(color)\n\n self.queue_draw()\n self.emit('select-color', color)", "title": "" }, { "docid": "976eea6e39c5bb9a3a8631cbc1aa4d91", "score": "0.61948246", "text": "def item_clicked(self, index):\n self.item_index = index\n self.select_item()", "title": "" }, { "docid": "d5d2e0231ed477658de819f859aa2dc8", "score": "0.61879176", "text": "def select( self ):\n assert self.selected is False\n self.selected = True\n\n # call selection hook\n self.handle_select()", "title": "" }, { "docid": "8ec0514812969a276b2bb7afe78b5542", "score": "0.6182614", "text": "def select(self):\n self._selected = True", "title": "" }, { "docid": "1e0955e81085f9ffa92b41eba2db587e", "score": "0.617391", "text": "def select_item(self, keys):\n self.pos_list = self.make_select_item_pos_list()\n\n pos = self.pos_list[self.index]\n self.rect.x = pos[0] - 60\n self.rect.y = pos[1] + 20\n\n self.check_input(keys)", "title": "" }, { "docid": "2b739e6ca246d798ea1a2ec3c02e4172", "score": "0.6171045", "text": "def test_selection_select(cvi):\n cvi.canvas.add(cvi.item)\n selection = Selection(cvi.item, cvi.view)\n assert cvi.item not in cvi.view.selected_items\n selection.select()\n assert cvi.item in cvi.view.selected_items\n assert cvi.item is cvi.view.focused_item\n selection.unselect()\n assert cvi.item not in cvi.view.selected_items\n assert None is cvi.view.focused_item", "title": "" }, { "docid": "e556f1cf2b5b97569e2647e5cb2e2082", "score": "0.6160461", "text": "def highlight_piece(self, row, col, piece):\r\n square_id = self.get_square_id(row, col)\r\n self.highlight_square(row, col, colour=\"#646F40\")\r\n self.selected_piece = piece\r\n self.highlighted.append((row, col))\r\n for row_, col_ in self.get_possible_squares(row, col):\r\n piece, colour = self.square_contains_piece(row_, col_)\r\n # Move Square\r\n if colour is None:\r\n self.highlight_move_square(row_, col_, colour=\"#646F40\")\r\n # Piece Square\r\n else:\r\n self.highlight_piece_square(row_, col_, colour=\"#646F40\")\r\n self.highlighted.append((row_, col_))", "title": "" }, { "docid": "337da9cab0e14bb173fef685749bcaa2", "score": "0.61459255", "text": "def select_p(self, p):\n c = self.c\n w = self.main_window\n ap = self.p_to_ap(p)\n # Be careful during startup.\n if not (w and w.tree):\n return\n if 'select' in g.app.debug:\n tag = 'py.app.select_p'\n print('%30s: %4s %s %s' % (tag, len(p.b), p.gnx, p.h))\n w.tree.set_ap(ap)\n # #1142: This code was not the problem.\n body = c.frame.body.wrapper\n w.body.set_text(body.s)\n w.body.set_insert_point(body.ins, body.sel)", "title": "" }, { "docid": "2950e984766af620208b7c665d193efe", "score": "0.61266476", "text": "def select_action(self, event) :\n widget = event.widget\n selection = widget.curselection()\n if selection :\n index = int(selection[0])\n values = dict(zip(('index', 'offset', 'title', 'size'), widget.get(index)))\n self.notify('detail', **values)", "title": "" }, { "docid": "7e498f91c843470ceb16fef451fadec0", "score": "0.6112048", "text": "def OnClickRelease( self, event ):\n node = HotMapNavigator.findNodeAtPosition(self.hot_map, event.GetPosition())\n self.SetSelected( node, event.GetPosition() )", "title": "" }, { "docid": "d219ac84558718ae2379d01291a23df1", "score": "0.6111472", "text": "def select_action(self, event) :\n widget = event.widget\n selection = widget.selection()\n if selection :\n item = widget.item(selection[0])\n self.notify('detail', values=item['values'])", "title": "" }, { "docid": "ecfc8894987d0385d325705f0185cd68", "score": "0.60980904", "text": "def mouseSelect(self,event=None,deselect=0):\n #print 'In select'\n self.x1=self.canvas.canvasx(event.x)\n self.y1=self.canvas.canvasy(event.y)\n self.clearSelBox()\n items = self.canvas.find_overlapping(self.x0,self.y0,self.x1,self.y1)\n #print items\n self.select(items,deselect)", "title": "" }, { "docid": "85721ca662e22f59b72213ddf5d1baa9", "score": "0.6096528", "text": "def check_click(self, x_value, y_value):\r\n for num in range(self.connection.my_player.low_range,\r\n self.connection.my_player.low_range + 4):\r\n # e.g for \"red\" - range(0, 4), for \"green\" - range(4, 8)\r\n piece = self.connection.my_player. \\\r\n my_pieces[num - self.connection.my_player.low_range]\r\n # Gets index 0-3 to use my_pieces.\r\n pos = piece.get_position()\r\n if piece.movable:\r\n if piece.image.get_width() == 64:\r\n # If you clicked a piece, move them (if you rolled)\r\n if pos is not None and piece.image.get_rect(\r\n topleft=(coOrds[pos][0] - 7,\r\n coOrds[pos][1] - 25)).collidepoint(x_value, y_value):\r\n self.click_piece(num)\r\n break\r\n # If you clicked a piece in home and you rolled 6, move\r\n # them out.\r\n elif piece.image.get_rect(\r\n topleft=(self.board.home_coords[num])). \\\r\n collidepoint(x_value, y_value) and self.connection.my_player \\\r\n .roll == 6:\r\n self.click_piece(num)\r\n break\r\n else:\r\n # If you clicked a piece, move them (if you rolled)\r\n if piece.image.get_rect(topleft=(\r\n coOrds[pos][0], coOrds[pos][1])).collidepoint(x_value, y_value):\r\n self.click_piece(num)\r\n break", "title": "" }, { "docid": "08050758b16df8940dc2881626fd93cd", "score": "0.60928094", "text": "def click_piece(self, num):\r\n self.board.move_piece(num, self.connection.my_player.roll)\r\n self.connection.send_movement(num, self.connection.my_player.roll)\r\n self.connection.end_roll()", "title": "" }, { "docid": "299d50c416a19d3b1be975f0cbf3ee82", "score": "0.6089458", "text": "def on_mouse_press(self, x, y, button, modifiers):\n #Only left clicks are valid\n if (button == arcade.MOUSE_BUTTON_RIGHT):\n return\n #Get the square coordinates\n self.square_selected = squareClicked(x , y)\n #If square_coordinates are not valid end this function\n if (self.square_selected == None):\n #Diselect the grid square and dehighlight the squares \n update_grid_textures(self.square_selected, self.my_grid_sprites, self.my_textures) \n self.square_selected_possible_vales = None\n return\n\n #Check if the square is not empty \n y , x = self.square_selected\n if (self.my_grid[y][x] != 0):\n #Do not save the selected square\n self.square_selected = None\n #Diselect the grid square and dehighlight the squares \n update_grid_textures(self.square_selected, self.my_grid_sprites, self.my_textures) \n self.square_selected_possible_vales = None\n return \n\n #Highlight the selected square and its corresponding row and column \n update_grid_textures(self.square_selected, self.my_grid_sprites, self.my_textures)\n\n #Safe the possible values \n self.square_selected_possible_vales = getPossibleValesInSquare(self.square_selected, self.my_grid)", "title": "" }, { "docid": "3001ffff822188378086ecdaccd4503f", "score": "0.607852", "text": "def SelectItem(self, item, select=True):", "title": "" }, { "docid": "17a5ac30599ea9a8c97cc3b0e420e819", "score": "0.60760474", "text": "def click_button_select(self):\n self.click_element(self.BUTTON_SELECT)", "title": "" }, { "docid": "ec2ab88c6d7855e21093b2762de2660b", "score": "0.6067318", "text": "def _keep_selecting(self, event):\n canvas = self._canvas\n x = canvas.canvasx(event.x)\n y = canvas.canvasy(event.y)\n canvas.coords(self._sobject,\n self._sstart[0], self._sstart[1], x, y)", "title": "" }, { "docid": "7e41940cc7af327efe5851f513be267a", "score": "0.6065461", "text": "def select(self, position, selected):\n self.items[position]['selected'] = selected", "title": "" }, { "docid": "88da34d8735771bd7a568e9ed280fa60", "score": "0.6055804", "text": "def mouse_click(self, event):\n x, y = event.x, event.y\n if PADDING < x < FRAME_WIDTH - PADDING and PADDING < y < FRAME_HEIGHT - PADDING:\n self.canvas.focus_set()\n row, col = (y - PADDING) / CELL_WIDTH, (x - PADDING) / CELL_WIDTH\n\n if DEBUG:\n print 'cell clicked: (%s, %s)' % (row, col)\n\n if not (0 <= row <= 8 and 0 <= col <= 8):\n return\n\n if (row, col) == (self.row, self.col):\n self.row = self.col = -1\n elif self.sudoku.puzzle[row * 9 + col] == '0':\n self.row, self.col = row, col\n else:\n self.row = self.col = -1\n\n self.canvas.delete('select')\n if self.row >= 0 and self.col >= 0:\n self.canvas.create_rectangle(\n PADDING + self.col * CELL_WIDTH + 1,\n PADDING + self.row * CELL_WIDTH + 1,\n PADDING + (self.col + 1) * CELL_WIDTH - 1,\n PADDING + (self.row + 1) * CELL_WIDTH - 1,\n outline='blue', tags='select'\n )", "title": "" }, { "docid": "b7cb85f8b91af28dd4b8d7bf9072c72c", "score": "0.60555977", "text": "def click(self, ev):\n \n if ev.type == 'touchstart':\n if len(ev.targetTouches)>1:\n return\n \n # New mouse / finger position\n self.X0, self.Y0 = self.mouse_pos(ev)\n\n if self.tool == 'pen':\n ctx.lineWidth = self.line_width\n ctx.strokeStyle = self.color\n\n elif self.tool == 'select':\n self.store = ctx.getImageData(0, 0, zone.width, zone.height)\n\n elif self.tool == 'rubber':\n ctx.fillStyle = self.bgcolor\n\n self.drawing = True", "title": "" }, { "docid": "da89e314e7fcf2c527eb1a60c6884c80", "score": "0.60477", "text": "def modify_selected(self, view, tile_x, tile_y, limited=False):\n if not self.render_mode:\n self.common_draw(view, tile_x, tile_y, self._modify_selected, limited)\n else:\n self.common_draw(view, tile_x, tile_y, self._modify_render_offset, limited)", "title": "" }, { "docid": "c41c6c33e23be4878f03494fdaff5bc4", "score": "0.6040477", "text": "def on_mouse_press(self, x, y, button):\n\n if (x,y) in self:\n x = x // ( 6*self._scale)\n y = y // (13*self._scale)\n s = (int(self._scroll)+self.rows)*self.cols\n\n start = y*self.cols + x\n self._selection = start+s, start+s\n self._program[\"selection\"] = start, start", "title": "" }, { "docid": "d3de4888536fb7d6768ee7daa9c36be5", "score": "0.603018", "text": "def selectShapesButtonPressed(self):\n print 'selectShapesButtonPressed'", "title": "" }, { "docid": "ac8f9152636d1700f5d9e05f14f6a6ec", "score": "0.6029377", "text": "def action(self,objet,truc,idpt):\n if truc.type == gtk.gdk.BUTTON_PRESS :\n if idpt == (self.actu+1): #Action to execute if the selected point is the following of previous one\n xd,yd,xa,ya=self.POINT[(idpt-1)].x,self.POINT[(idpt-1)].y,self.POINT[idpt].x,self.POINT[idpt].y\n item = self.ROOT.add(gnomecanvas.CanvasLine,\n points=(xd,yd,xa,ya),\n fill_color='black',\n width_units=1.5)\n\n\n if idpt == 2: # Always raise the first point\n self.POINT[self.MAX].raise_to_top()\n self.TEXT[self.MAX].raise_to_top()\n\n self.POINT[idpt].hide()\n self.TEXT[idpt].hide()\n if idpt == self.MAX : #Action to execute if all points have been selected in good way\n gcompris.set_background(self.ROOT, self.data[self.gcomprisBoard.sublevel][0][2])\n self.gamewon = 1\n self.timeout = gobject.timeout_add(1500, self.lauch_bonus) # The level is complete -> Bonus display\n\n else : #Action to execute if the selected point isn't the last one of this level\n #self.POINT[(idpt+1)].set(fill_color='blue') #Set color in blue to next point. Too easy ???\n self.actu=self.actu+1 #self.actu update to set it at actual value of selected point", "title": "" }, { "docid": "0cbed21829eb25eb3c9ae5d1315fc7e7", "score": "0.6023838", "text": "def clickEvent(self):\n self.activated.emit(self.text())", "title": "" }, { "docid": "c2ba8e738b43335dd41e11b4c4314030", "score": "0.60225546", "text": "def select_tower(self, x, y):\n\t\tbox_size = self.map_height // len(self.map)\n\t\ti, j = x // box_size, y // box_size\n\t\tif self.map[j][i] == 2:\n\t\t\tx, y = i * box_size + box_size // 2, j * box_size + box_size // 2 \n\t\t\ttower = None\n\t\t\tfor t in self.towers:\n\t\t\t\tif t.x == x and t.y == y:\n\t\t\t\t\ttower = t\n\t\t\t\t\tbreak\n\t\t\t# If it's a click on the currently selected tower\n\t\t\tif self.tower_selected == tower:\n\t\t\t\tself.tower_selected = None\n\t\t\telse:\n\t\t\t\tself.tower_selected = tower\n\t\t\t# Un-selecting an enemy when a tower is selected\n\t\t\tself.enemy_selected = None\n\n\t\telse:\n\t\t\tself.tower_selected = None\n\t\t\tself.enemy_selected = None", "title": "" }, { "docid": "4f4253e9d62931e6eef759166537db98", "score": "0.6020156", "text": "def on_click(self, kind, name):\n if kind == \"device\":\n self.select_devices([name])\n self.emit(Qt.SIGNAL(\"graphicItemSelected(QString)\"), name)\n elif kind == \"section\":\n self.zoom_to_section(name)", "title": "" }, { "docid": "a757e0481663559db13ebeb3c3aacd18", "score": "0.6015027", "text": "def click_board(self, event):\n\n if GameOps.is_checkmate:\n return\n field_idx = self.coord_to_field(event)\n if field_idx is None:\n return\n field_name = self.index2label(field_idx)\n board_idx = self.label2index_game(field_name)\n\n self.layout['field_idx'] = field_idx\n piece = self.board_state[board_idx]\n piece_color = piece.color if piece else 0\n move_count = GameOps.move_count\n\n if self.click_idx == 0: # select piece\n self.first_board_click(piece, field_idx, piece_color, move_count)\n else:\n self.second_board_click(piece, field_idx, piece_color, field_name, move_count)\n return", "title": "" }, { "docid": "8224dc38d7c9aa838f27e71ed6c0463c", "score": "0.60142624", "text": "def multsel(self):\n def toggle_selector(event):\n if toggle_selector.RS.active:\n toggle_selector.RS.set_active(False)\n if not toggle_selector.RS.active:\n toggle_selector.RS.set_active(True)\n\n self.state='SMP'\n toggle_selector.RS = RectangleSelector(self.ax, self.point_select_callback,\n drawtype='box', useblit=True,\n button=[1,3], # don't use middle button\n minspanx=5, minspany=5,\n spancoords='pixels')\n self.selector = toggle_selector.RS\n self.update_state()", "title": "" }, { "docid": "1e74d3929ed4a3d719ec7fa1a59bcba4", "score": "0.6012243", "text": "def select(node, **_):\n uistate.update_button_state(node)", "title": "" }, { "docid": "815da5944d077ef1004c950f0dfa303a", "score": "0.60108274", "text": "def SetSelected( self, node, point=None, propagate=True ):\n if node == self.selectedNode:\n return\n self.selectedNode = node\n self.UpdateDrawing()\n if node:\n wx.PostEvent( self, SquareSelectionEvent( node=node, point=point, map=self ) )", "title": "" }, { "docid": "05667e9e21cdb084a61c502a4e23d3f1", "score": "0.60057104", "text": "def _handle_click(self, pipe):\n if self._panel_selection is not None:\n self._panel_selection(pipe)", "title": "" }, { "docid": "cdfb43cd60c80ab3a762c138527460c6", "score": "0.60041386", "text": "def click(self, event):\r\n self.clicked.append((event.x, event.y))\r\n if len(self.clicked) > numEvents:\r\n del self.clicked[0]\r\n self.visit()", "title": "" }, { "docid": "d36f5c9e3246778264700809a67589a1", "score": "0.600275", "text": "def clickedBy(self, clicker):\n GG.model.room_item.GGRoomItem.clickedBy(self, clicker)\n if GG.utils.checkNeighbour(clicker.getPosition(), self.getPosition()):\n clicker.setSelectedItem(self)", "title": "" }, { "docid": "5ce6607656185e3a40d22f7915e9326d", "score": "0.60002637", "text": "def SetMainSelection(self, selection):", "title": "" }, { "docid": "27f82af9e09321222d642471e7ef05e1", "score": "0.59927964", "text": "def select_shape(self, point, multiple_selection_mode):\n for shape in reversed(self.shapes):\n if not shape.containsPoint(point):\n continue\n if multiple_selection_mode:\n if shape not in self.selectedShapes:\n self.selectedShapes.append(shape)\n self.selectionChanged.emit()\n else:\n self.selectedShapes = [shape]\n self.selectionChanged.emit()\n return\n # If clicked on nothing, deselect all.\n self.deSelectShape()", "title": "" }, { "docid": "6f01adb3db6ff8138f3587edbed8b403", "score": "0.59911543", "text": "def click(self):\n self.press()\n self.release()", "title": "" }, { "docid": "d338296543d6a8b5834348607c95fb44", "score": "0.59845114", "text": "def click(self):\n self.patch.send('mouse %i %i 1 0' % (self.x + 1, self.y + 1))\n self.patch.send('mouseup %i %i 1 0' % (self.x + 1, self.y + 1))", "title": "" }, { "docid": "a3beab537b06a4a8f31c34571202d005", "score": "0.59838724", "text": "def new_piece_selected(self, new_space):\n self.curr_selected_piece = self.chess_board.get_piece_at(new_space)\n self.curr_poss_moves = self.get_curr_poss_moves()", "title": "" }, { "docid": "d9f770b5f30d870d4474863273d99e0a", "score": "0.5976082", "text": "def _start_select(self, event):\n if self.component.active_tool in (None, self):\n self.component.active_tool = self\n else:\n self._enabled = False\n self._screen_start = (event.x, event.y)\n self._screen_end = None\n self.event_state = \"selecting\"\n event.window.set_pointer(self.pointer)\n event.window.set_mouse_owner(self, event.net_transform())\n self.selecting_mouse_move(event)\n return", "title": "" }, { "docid": "b925f50b0f1421bcf86fe8539c9d8467", "score": "0.59681755", "text": "def _pressed(self, evt):\n x, y, widget = evt.x, evt.y, evt.widget\n item = widget.identify_row(y)\n column = widget.identify_column(x)\n if not column or item not in self._items:\n return\n item_values = widget.item(item)['values']\n if not len(item_values):\n return\n text = item_values[(int(column[1]) - 1)]\n if not text:\n return\n bbox = widget.bbox(item, column)\n if not bbox:\n return\n text = '%02d' % text\n self._selection = (text, item, column)\n self._show_selection(text, bbox)", "title": "" }, { "docid": "31c6b10c848fcb483d8e0b734bf4b38d", "score": "0.5957512", "text": "def point_select_callback(self,eclick, erelease):\n #eclick and erelease are the press and release events'\n self.update_state()\n if not (self.shift_is_held or self.ctrl_is_held):\n self.selectpt=[]\n self.selectseg=[]\n x1, y1 = eclick.xdata, eclick.ydata\n x2, y2 = erelease.xdata, erelease.ydata\n\n # print(x1,x2,y1,y2)\n if x1>x2:\n x1,x2=x2,x1\n if y1>y2:\n y1,y2=y2,y1\n if not (np.allclose(x1,x2) and np.allclose(y1,y2)) :\n try:\n selectpt,selectseg = self.L.get_zone([x1,x2,y1,y2])\n\n if not self.ctrl_is_held:\n self.selectpt.extend(selectpt)\n self.selectseg.extend(selectseg)\n self.selectseg=filter(lambda x: self.L.Gs.node[x]['connect'][0] in self.selectpt\n and self.L.Gs.node[x]['connect'][1] in self.selectpt,\n self.selectseg)\n\n self.selectpt=np.unique(self.selectpt).tolist()\n self.selectseg=np.unique(self.selectseg).tolist()\n else:\n [self.selectpt.pop(self.selectpt.index(x)) for x in selectpt if x in self.selectpt]\n [self.selectseg.pop(self.selectseg.index(x)) for x in selectseg if x in self.selectseg]\n except:\n if len(self.selectpt) == 0:\n self.modeIni()\n self.update_state()\n # print('empty selection')\n\n self.plotselptseg(self.selectpt)\n self.selected='pt'\n if len(self.selectpt) == 1:\n self.nsel=self.selectpt[0]\n self.state='SP1'\n self.update_state()\n else :\n self.modeIni()\n self.update_state()", "title": "" }, { "docid": "f0c25d8401217bb6f86cca077c3217db", "score": "0.59525204", "text": "def selectionne(event):\n pass", "title": "" }, { "docid": "5016b6eb898ba24eb7919432282cd4f1", "score": "0.5935918", "text": "def on_mouse_clicked(self):\n pass", "title": "" }, { "docid": "1807db0545ce7ef4d5302b18aac460e8", "score": "0.59345615", "text": "def left_click_actor(self, act):\n if act.selected:\n self.unselect_actor(act)\n else:\n self.select_actor(act)", "title": "" }, { "docid": "526a607fe8b5fcd985988ad600a3a659", "score": "0.5930846", "text": "def onClick(self: object, event: object):\n r = event.x // (self.options[\"piece_size\"] + self.options[\"border\"]) + 1\n c = event.y // (self.options[\"piece_size\"] + self.options[\"border\"]) + 1\n try:\n self.game.move(r, c)\n self.redrawCanvas()\n self.status()\n except:\n pass", "title": "" }, { "docid": "0b49263e5b9f14bc225dcd2794653b4d", "score": "0.5930005", "text": "def child_clicked(self, pos):\n raise NotImplementedError", "title": "" }, { "docid": "29b9077a1cc3662f113a85a40b9ad3ea", "score": "0.59286916", "text": "def pointSelect(self, event):\n\n if event.selection == self.sel_point:\n # same point(s) selected again, turn point(s) off\n self.pyslip.DeleteLayer(self.sel_point_layer)\n self.sel_point_layer = None\n self.sel_point = None\n elif event.selection:\n # some other point(s) selected, delete previous selection, if any\n if self.sel_point_layer:\n self.pyslip.DeleteLayer(self.sel_point_layer)\n\n # remember selection (need copy as highlight modifies attributes)\n self.sel_point = copy.deepcopy(event.selection)\n\n # choose different highlight colour for different type of selection\n selcolour = '#00ffff'\n if event.type == pySlipQt.PySlipQt.EVT_PYSLIPQT_SELECT: # TODO better visibility (like pySlip)\n selcolour = '#0000ff'\n\n # get selected points into form for display layer\n # delete 'colour' and 'radius' attributes as we want different values\n highlight = []\n for (x, y, d) in event.selection:\n del d['colour'] # AddLayer...() ensures keys exist\n del d['radius']\n highlight.append((x, y, d))\n\n # layer with highlight of selected poijnts\n self.sel_point_layer = \\\n self.pyslip.AddPointLayer(highlight, map_rel=True,\n colour=selcolour,\n radius=5, visible=True,\n show_levels=MRPointShowLevels,\n name='<sel_pt_layer>')\n\n # make sure highlight layer is BELOW selected layer\n self.pyslip.PlaceLayerBelowLayer(self.sel_point_layer,\n self.point_layer)\n # else: we ignore an empty selection\n\n return True", "title": "" }, { "docid": "465d5e9d939d40f72e2602f8329a27f1", "score": "0.5926622", "text": "def tile_select(self, event):\n self.click +=1\n if self.click > 5:\n self.click = 0\n #boolean value of true/false compared to KenKen_board_data file\n self.tile_guess.set(self.tile_ident[self.click])\n try:\n #exception handling to take care of \" \" value comparison\n user_guess = eval(self.tile_guess.get())\n tile_val = tile_value(self.tile_number, self.board_num)\n except:\n user_guess = (self.tile_guess.get())\n tile_val = tile_value(self.tile_number, self.board_num)\n if user_guess == tile_val: #self documenting...\n self.right_answer = True\n else:\n self.right_answer = False", "title": "" }, { "docid": "1bed338a836772849f5abe1046ec6815", "score": "0.5912959", "text": "def click(self,locator):\r\n self.do_command(\"click\", [locator,])", "title": "" }, { "docid": "1bed338a836772849f5abe1046ec6815", "score": "0.5912959", "text": "def click(self,locator):\r\n self.do_command(\"click\", [locator,])", "title": "" }, { "docid": "15247cc35aae7839f8366b2e903825f9", "score": "0.59109485", "text": "def _setSelected(self, xBoard, yBoard): \n self._selected = (xBoard, yBoard)", "title": "" }, { "docid": "caab3c822788bb7c9259c69a6ae61bff", "score": "0.59015024", "text": "def give_click(self, button, mouse_button):", "title": "" }, { "docid": "63db2e4fef4fd369e097cae7a7054d75", "score": "0.5893464", "text": "def piece_on(self, pos):", "title": "" }, { "docid": "6a246d372325f0b41da4ce53f4e97e22", "score": "0.58927417", "text": "def MouseClickBegin(self, item):", "title": "" }, { "docid": "2a47a1c457d9c193e3867596b134199f", "score": "0.58899444", "text": "def on_click(self, e):\n # Don't react when in move, attack or game over mode.\n if (self.mode == Modes.Moving or\n self.mode == Modes.GameOver):\n return\n \n # make sure we have focus and that it was the left mouse button\n if (e.type == pygame.MOUSEBUTTONUP\n and e.button == 1\n and pygame.mouse.get_focused()):\n \n # If this is in the map, we're dealing with units or tiles\n if self.map.rect.collidepoint(e.pos):\n # Get the tile's position\n to_tile_pos = self.map.tile_coords(e.pos)\n\n # get the unit at the mouseclick\n unit = self.get_unit_at_screen_pos(e.pos)\n \n if unit:\n # clicking the same unit again deselects it and, if\n # necessary, resets select mode\n if unit == self.sel_unit:\n self.change_mode(Modes.Select)\n self.sel_unit = None\n\n # select a new unit\n elif (self.mode == Modes.Select and\n unit.team == self.cur_team):\n self.sel_unit = unit\n SoundManager.play(SELECT_SOUND)\n \n # Attack\n elif (self.mode == Modes.ChooseAttack and\n self.sel_unit and\n to_tile_pos in self._attackable_tiles):\n # Attack the selected tile\n self.sel_unit_attack(to_tile_pos)\n else:\n # No unit there, so a tile was clicked\n if (self.mode == Modes.ChooseMove and\n self.sel_unit and\n to_tile_pos in self._movable_tiles):\n \n # Move to the selected tile\n self.sel_unit_move(to_tile_pos)\n \n # Otherwise, the user is interacting with the GUI panel\n else:\n # Check which button was pressed\n for button in self.buttons:\n # If the button is enabled and has a click function, call\n # the function\n if ((not button.condition or button.condition()) and\n self.get_button_rect(button).collidepoint(e.pos)):\n button.onClick()\n \n # Play the button sound\n SoundManager.play(BUTTON_SOUND)", "title": "" }, { "docid": "a451b014af833025aa2e9925358955aa", "score": "0.58857626", "text": "def clicked_at(self, *pos):\n logging.debug('Enter GridWorldGUI.clicked_at')\n #todo: change this if patches become turtles (unlikely)\n patch = self.subject.patch_at(pos)\n self.handle_click1(subject=patch, location=pos)", "title": "" }, { "docid": "2dafe7024714daa1b1aa127a92c3670e", "score": "0.58836776", "text": "def toggle_selected(self, event):\n if self._selected.get() == 1:\n self._selected.set(0)\n else:\n self._selected.set(1)\n self.set_image(self._last_roll, self._selected.get())", "title": "" }, { "docid": "c19e1ad1dcc38a2a77a98b3ec8a59d36", "score": "0.5882185", "text": "def on_mouse_press(self, x, y, button):\n pass", "title": "" } ]
6ecb231ff5096ce78fbd6defbb166c28
r"""__bool__(DoubleVector self) > bool
[ { "docid": "4692d5c6054462722656174c599970a4", "score": "0.8644724", "text": "def __bool__(self) -> \"bool\":\n return _model.DoubleVector___bool__(self)", "title": "" } ]
[ { "docid": "1df04d9ce4808370831cdb9d7edd5e08", "score": "0.8609209", "text": "def __bool__(self) -> \"bool\":\n return _model.DoubleVectorVector___bool__(self)", "title": "" }, { "docid": "1df04d9ce4808370831cdb9d7edd5e08", "score": "0.8609209", "text": "def __bool__(self) -> \"bool\":\n return _model.DoubleVectorVector___bool__(self)", "title": "" }, { "docid": "ec23ab9ccaa216c7b7d0f32e6973c3ba", "score": "0.812003", "text": "def __bool__(self) -> \"bool\":\n return _model.FloatVectorVector___bool__(self)", "title": "" }, { "docid": "ec23ab9ccaa216c7b7d0f32e6973c3ba", "score": "0.812003", "text": "def __bool__(self) -> \"bool\":\n return _model.FloatVectorVector___bool__(self)", "title": "" }, { "docid": "d1e8c3ee7a75a51f1d27e7b9132f4732", "score": "0.80444705", "text": "def __bool__(self) -> \"bool\":\n return _model.FloatVector___bool__(self)", "title": "" }, { "docid": "d1e8c3ee7a75a51f1d27e7b9132f4732", "score": "0.80444705", "text": "def __bool__(self) -> \"bool\":\n return _model.FloatVector___bool__(self)", "title": "" }, { "docid": "b7452a5812c938d1037c67527962a362", "score": "0.79254204", "text": "def __bool__(self) -> \"bool\":\n return _model.Int64Vector___bool__(self)", "title": "" }, { "docid": "79daef45efe50ce0678568aef10a63b0", "score": "0.79063845", "text": "def __bool__(self) -> \"bool\":\n return _model.IntVector___bool__(self)", "title": "" }, { "docid": "79daef45efe50ce0678568aef10a63b0", "score": "0.79063845", "text": "def __bool__(self) -> \"bool\":\n return _model.IntVector___bool__(self)", "title": "" }, { "docid": "0d818e4a33a014fbc9dd2f997bcc8364", "score": "0.7814082", "text": "def __nonzero__(self) -> \"bool\":\n return _model.DoubleVector___nonzero__(self)", "title": "" }, { "docid": "0d818e4a33a014fbc9dd2f997bcc8364", "score": "0.7814082", "text": "def __nonzero__(self) -> \"bool\":\n return _model.DoubleVector___nonzero__(self)", "title": "" }, { "docid": "bba761c3d0a60d6765dadb461f636b61", "score": "0.78094125", "text": "def __nonzero__(self) -> \"bool\":\n return _model.DoubleVectorVector___nonzero__(self)", "title": "" }, { "docid": "bba761c3d0a60d6765dadb461f636b61", "score": "0.78094125", "text": "def __nonzero__(self) -> \"bool\":\n return _model.DoubleVectorVector___nonzero__(self)", "title": "" }, { "docid": "06e11ca7e2417e6501da8d0fbb7ff103", "score": "0.7493335", "text": "def __bool__(self) -> \"bool\":\n return _model.Int8Vector___bool__(self)", "title": "" }, { "docid": "046a771f7067cc0cf94dc2f3364300e8", "score": "0.7379923", "text": "def __bool__(self):\n return _openshot.FieldVector___bool__(self)", "title": "" }, { "docid": "72dae11c9ef5afc0094164e39d087ea1", "score": "0.7363791", "text": "def __bool__(self):\n return _openshot.PointsVector___bool__(self)", "title": "" }, { "docid": "bbfc3f6ad828acd19eb72955329b66c8", "score": "0.72632545", "text": "def __nonzero__(self) -> \"bool\":\n return _model.FloatVectorVector___nonzero__(self)", "title": "" }, { "docid": "bbfc3f6ad828acd19eb72955329b66c8", "score": "0.72632545", "text": "def __nonzero__(self) -> \"bool\":\n return _model.FloatVectorVector___nonzero__(self)", "title": "" }, { "docid": "83cefcc47ace2330b51359bdb68c1c53", "score": "0.71888757", "text": "def __nonzero__(self) -> \"bool\":\n return _model.FloatVector___nonzero__(self)", "title": "" }, { "docid": "83cefcc47ace2330b51359bdb68c1c53", "score": "0.71888757", "text": "def __nonzero__(self) -> \"bool\":\n return _model.FloatVector___nonzero__(self)", "title": "" }, { "docid": "acc02d4c43287909eab614b3c882f9ce", "score": "0.70877445", "text": "def __bool__(self):\n return _openshot.CoordinateVector___bool__(self)", "title": "" }, { "docid": "1d2df2ca5f4477ee4f16a4cf25b428dd", "score": "0.6978973", "text": "def __bool__(self):\n return self.__nonzero__()", "title": "" }, { "docid": "fd95d06635030f03ff8a5313b4fb9e1d", "score": "0.69786924", "text": "def __ne__(self, that: 'vnl_vectorD') -> \"bool\":\n return _vnl_vectorPython.vnl_vectorD___ne__(self, that)", "title": "" }, { "docid": "34a019b06cb65d0e1eafe10fdb88a8b4", "score": "0.6957412", "text": "def __bool__(self):\r\n return False if self == 0 else True", "title": "" }, { "docid": "7391ee7e62e962cb973c67fd087c9120", "score": "0.69444835", "text": "def __bool__(self):\n return bool(abs(self))", "title": "" }, { "docid": "dece589ec2fb3020db46e4a7d107f215", "score": "0.6921934", "text": "def __bool__(self) -> \"bool\":\n return _model.StringVector___bool__(self)", "title": "" }, { "docid": "dece589ec2fb3020db46e4a7d107f215", "score": "0.6921934", "text": "def __bool__(self) -> \"bool\":\n return _model.StringVector___bool__(self)", "title": "" }, { "docid": "43dc215b013bb2ecd36802ef2b10f6a1", "score": "0.6864139", "text": "def __bool__(self): \n return bool(abs(self))", "title": "" }, { "docid": "68c4bbb1654d3474900a0a41d22b5c44", "score": "0.6859721", "text": "def __nonzero__(self) -> \"bool\":\n return _model.Int64Vector___nonzero__(self)", "title": "" }, { "docid": "85d94de959ecdd92797612698b7612d9", "score": "0.6854265", "text": "def __ne__(self, that: 'vnl_vectorF') -> \"bool\":\n return _vnl_vectorPython.vnl_vectorF___ne__(self, that)", "title": "" }, { "docid": "d267cfcf3fe6ef266946e4a6054be174", "score": "0.6795451", "text": "def __nonzero__(self) -> \"bool\":\n return _model.IntVector___nonzero__(self)", "title": "" }, { "docid": "d267cfcf3fe6ef266946e4a6054be174", "score": "0.6795451", "text": "def __nonzero__(self) -> \"bool\":\n return _model.IntVector___nonzero__(self)", "title": "" }, { "docid": "f3763f5018768e44df3b1b37531232b4", "score": "0.67602944", "text": "def __eq__(self, that: 'vnl_vectorD') -> \"bool\":\n return _vnl_vectorPython.vnl_vectorD___eq__(self, that)", "title": "" }, { "docid": "ec7ac4d253b6aa38263f29b635da2ab7", "score": "0.67210364", "text": "def __ne__(self, that: 'vnl_vectorUL') -> \"bool\":\n return _vnl_vectorPython.vnl_vectorUL___ne__(self, that)", "title": "" }, { "docid": "cf4592fbad0b57a3414bc93f341e8e72", "score": "0.66705656", "text": "def __ne__(self, that: 'vnl_vectorLD') -> \"bool\":\n return _vnl_vectorPython.vnl_vectorLD___ne__(self, that)", "title": "" }, { "docid": "5fdbdef4c307298e633a2a733ef29f3e", "score": "0.6657429", "text": "def __eq__(self, that: 'vnl_vectorF') -> \"bool\":\n return _vnl_vectorPython.vnl_vectorF___eq__(self, that)", "title": "" }, { "docid": "6132effc2bf1bba5476bdddf4936c018", "score": "0.6631465", "text": "def __bool__(self):\n return bool(self.value)", "title": "" }, { "docid": "cd20e5a81bd5a97791516f5283db27de", "score": "0.6630465", "text": "def __bool__(self):\n return self.bool", "title": "" }, { "docid": "809c94e068db7d8d0d50a82b9b412c7c", "score": "0.6620562", "text": "def __ne__(self, that: 'vnl_vectorSLL') -> \"bool\":\n return _vnl_vectorPython.vnl_vectorSLL___ne__(self, that)", "title": "" }, { "docid": "b1ff2848ea39c3c0ead1965ac987c107", "score": "0.6615403", "text": "def __ne__(self, that: 'vnl_vectorSL') -> \"bool\":\n return _vnl_vectorPython.vnl_vectorSL___ne__(self, that)", "title": "" }, { "docid": "d5bf754c0180c3ee13fc67d45267f481", "score": "0.6613736", "text": "def __ne__(self, that: 'vnl_vectorCF') -> \"bool\":\n return _vnl_vectorPython.vnl_vectorCF___ne__(self, that)", "title": "" }, { "docid": "7bd349a0f40d3bda62ecf9793d673fe2", "score": "0.6601893", "text": "def __bool__(self) -> bool:\r\n return self._value", "title": "" }, { "docid": "0429e21b0ac5c1f144043ea497280ec5", "score": "0.6597759", "text": "def __bool__(self) -> bool:\n return bool(self.value)", "title": "" }, { "docid": "948aea2897d5c1abad667141fee1aa41", "score": "0.65781343", "text": "def __ne__(self, that: 'vnl_vectorSI') -> \"bool\":\n return _vnl_vectorPython.vnl_vectorSI___ne__(self, that)", "title": "" }, { "docid": "2a44893076bf47dc5f8a74c229c58bfd", "score": "0.655767", "text": "def __ne__(self, that: 'vnl_vectorULL') -> \"bool\":\n return _vnl_vectorPython.vnl_vectorULL___ne__(self, that)", "title": "" }, { "docid": "226df36bcf6d0bb93a704034401b40d1", "score": "0.6556685", "text": "def __ne__(self, that: 'vnl_vectorSS') -> \"bool\":\n return _vnl_vectorPython.vnl_vectorSS___ne__(self, that)", "title": "" }, { "docid": "96ffddaeeca0923101ca1e73f1eb6d1d", "score": "0.6553336", "text": "def __ne__(self, that: 'vnl_vectorSC') -> \"bool\":\n return _vnl_vectorPython.vnl_vectorSC___ne__(self, that)", "title": "" }, { "docid": "7bb909515a5fdfc3306b50bbfd6e8e9a", "score": "0.6551553", "text": "def __ne__(self, that: 'vnl_vectorUC') -> \"bool\":\n return _vnl_vectorPython.vnl_vectorUC___ne__(self, that)", "title": "" }, { "docid": "4715ed98cce571256fa8e7dddf558f80", "score": "0.65361464", "text": "def __bool__(self):\n\t\treturn bool(self._N)", "title": "" }, { "docid": "79bab258a7f1a3c32073d282582ed718", "score": "0.6484494", "text": "def __bool__(self) -> bool:\n pass", "title": "" }, { "docid": "c750624614259eea99a71053aaf33f7c", "score": "0.64827514", "text": "def __ne__(self, that: 'vnl_vectorUS') -> \"bool\":\n return _vnl_vectorPython.vnl_vectorUS___ne__(self, that)", "title": "" }, { "docid": "e05a6c020af39602846c02feb2cbfbd5", "score": "0.6478606", "text": "def operator_eq(self, v: 'vnl_vectorD') -> \"bool\":\n return _vnl_vectorPython.vnl_vectorD_operator_eq(self, v)", "title": "" }, { "docid": "cf665c678734684106c8b8d6be9de988", "score": "0.6446189", "text": "def __bool__(self):\n pass", "title": "" }, { "docid": "6106638310316be661bbbd352e353c54", "score": "0.64363706", "text": "def __nonzero__(self) -> \"bool\":\n return _model.Int8Vector___nonzero__(self)", "title": "" }, { "docid": "13d3d0cd6e989ee0513db497c196c32b", "score": "0.64354885", "text": "def __bool__(self):\r\n return self!=Matrix(self.m,self.n)", "title": "" }, { "docid": "056348364246bfa6d32568b7905472ed", "score": "0.6431774", "text": "def __nonzero__(self):\r\n return False", "title": "" }, { "docid": "572a665e9b94504502e7f1fc533423b1", "score": "0.6424128", "text": "def __bool__(self):\n return self.is_solution()", "title": "" }, { "docid": "a4cee29daa4356844cb23ea54e502403", "score": "0.64236474", "text": "def empty(self) -> \"bool\":\n return _model.DoubleVector_empty(self)", "title": "" }, { "docid": "a4cee29daa4356844cb23ea54e502403", "score": "0.64236474", "text": "def empty(self) -> \"bool\":\n return _model.DoubleVector_empty(self)", "title": "" }, { "docid": "b9ce4ca8475a65fa0fdd0917cecfcf9a", "score": "0.64183563", "text": "def empty(self) -> \"bool\":\n return _model.DoubleVectorVector_empty(self)", "title": "" }, { "docid": "b9ce4ca8475a65fa0fdd0917cecfcf9a", "score": "0.64183563", "text": "def empty(self) -> \"bool\":\n return _model.DoubleVectorVector_empty(self)", "title": "" }, { "docid": "92e2519b27d005236de8805fb8fcf4f0", "score": "0.64177513", "text": "def __eq__(self, that: 'vnl_vectorUL') -> \"bool\":\n return _vnl_vectorPython.vnl_vectorUL___eq__(self, that)", "title": "" }, { "docid": "59e2ac18d86d48b2644de53230dd5c9a", "score": "0.64099985", "text": "def __nonzero__(self):\n return bool(self._not or self._a)", "title": "" }, { "docid": "e0d11f07b400110c87f3afdb4f44c769", "score": "0.6393844", "text": "def __eq__(self, that: 'vnl_vectorLD') -> \"bool\":\n return _vnl_vectorPython.vnl_vectorLD___eq__(self, that)", "title": "" }, { "docid": "8fc6feea3ff601e44451a1bd78d2e7a5", "score": "0.638642", "text": "def __bool__(self):\n return _openshot.MappedFrameVector___bool__(self)", "title": "" }, { "docid": "f5696c456002fbfe505d56a17f431439", "score": "0.6375704", "text": "def __ge__(self, other):\n return (self.n_vector >= other.n_vector).all()", "title": "" }, { "docid": "8b31cc689b9db46ba5fa75a0a8b24866", "score": "0.6353509", "text": "def __nonzero__(self):\n return _openshot.FieldVector___nonzero__(self)", "title": "" }, { "docid": "f47cca5878803e747bfb015c0e5231c4", "score": "0.63447976", "text": "def __eq__(self, that: 'vnl_vectorSL') -> \"bool\":\n return _vnl_vectorPython.vnl_vectorSL___eq__(self, that)", "title": "" }, { "docid": "c7d6632ec79b8de60bcb582fd3cb19d4", "score": "0.6342411", "text": "def __nonzero__(self):\n return _openshot.PointsVector___nonzero__(self)", "title": "" }, { "docid": "59870d8167d201e310fde262b86eba4d", "score": "0.6340969", "text": "def __bool__(self):", "title": "" }, { "docid": "60ff0d018e1732186330d477ecb2d942", "score": "0.6339781", "text": "def __bool__(self):\n for _ in self:\n return True\n return False", "title": "" }, { "docid": "8e7ab4918b2b78d416e826b272d7e25e", "score": "0.6336702", "text": "def __nonzero__(self):\r\n return True", "title": "" }, { "docid": "49b2b9cfd8fc7903015b27d05de491c9", "score": "0.63271075", "text": "def __eq__(self, that: 'vnl_vectorSLL') -> \"bool\":\n return _vnl_vectorPython.vnl_vectorSLL___eq__(self, that)", "title": "" }, { "docid": "c3e8673c6c56bd9ace16ff7062c58867", "score": "0.6319252", "text": "def __ne__(self, that: 'vnl_vectorUI') -> \"bool\":\n return _vnl_vectorPython.vnl_vectorUI___ne__(self, that)", "title": "" }, { "docid": "fabaf6240521d4ccb30161d3b24313f4", "score": "0.62938905", "text": "def __bool__(self) -> bool:\n ...", "title": "" }, { "docid": "a3c5c10d18202eda376e5e75fe2253cb", "score": "0.6293869", "text": "def __gt__(self, x):\n return self.num * x.den > x.num * self.den", "title": "" }, { "docid": "3cd4687baa095acf5c9e6d2695382531", "score": "0.6290965", "text": "def __eq__(self, that: 'vnl_vectorULL') -> \"bool\":\n return _vnl_vectorPython.vnl_vectorULL___eq__(self, that)", "title": "" }, { "docid": "b809d099e02b4b4d69cd61ca036212de", "score": "0.62842035", "text": "def __eq__(self, that: 'vnl_vectorSI') -> \"bool\":\n return _vnl_vectorPython.vnl_vectorSI___eq__(self, that)", "title": "" }, { "docid": "9069573c6c51152064682f6e390ba962", "score": "0.62768734", "text": "def __bool__(self) -> bool:\n return len(self.elements) > 0", "title": "" }, { "docid": "9cd322242fca05bd72375538cbcc3834", "score": "0.62674487", "text": "def __bool__(self) -> Any:\n return self.result", "title": "" }, { "docid": "b9f61d548c8ad72d3cbe8b37baee457d", "score": "0.62672615", "text": "def is_equal(self, rhs: 'vnl_vectorD', tol: 'double') -> \"bool\":\n return _vnl_vectorPython.vnl_vectorD_is_equal(self, rhs, tol)", "title": "" }, { "docid": "a62432fd10033567e0a4a33e76b87a63", "score": "0.6266427", "text": "def __bool__(self):\n return any(self)", "title": "" }, { "docid": "90d4bc0dce03fa1d4476dd47ab0c0b64", "score": "0.6264634", "text": "def __eq__(self, that: 'vnl_vectorCF') -> \"bool\":\n return _vnl_vectorPython.vnl_vectorCF___eq__(self, that)", "title": "" }, { "docid": "0635feecff47ef2e959ae6df19e64c59", "score": "0.62495947", "text": "def operator_eq(self, v: 'vnl_vectorF') -> \"bool\":\n return _vnl_vectorPython.vnl_vectorF_operator_eq(self, v)", "title": "" }, { "docid": "7fefe2b427bc11491ef9b6f9354224c5", "score": "0.6239953", "text": "def __bool__(self, item):\n return True", "title": "" }, { "docid": "d0ce78d2822a7b4c6c47fc4b91054398", "score": "0.6212615", "text": "def isvector(t):\n return t[3] < 1.0", "title": "" }, { "docid": "d7cde70142af3d50273c535fcf99a58a", "score": "0.6209083", "text": "def __eq__(self, that: 'vnl_vectorSS') -> \"bool\":\n return _vnl_vectorPython.vnl_vectorSS___eq__(self, that)", "title": "" }, { "docid": "07dbb511e5a898d73618ba5e752c3d5b", "score": "0.6203829", "text": "def __le__(self,rhs):\n if not isinstance(rhs,self.__class__):\n raise Exception(\"RHS is not a valid MultQ\")\n return self.x<=rhs.x", "title": "" }, { "docid": "18081e8667c907c029aa819ff7f199c0", "score": "0.61771715", "text": "def __bool__(self) -> bool:\n return True", "title": "" }, { "docid": "68be88fa91d4b507e9bf7e7f6be23c37", "score": "0.6173027", "text": "def __bool__(self):\n return True", "title": "" }, { "docid": "68be88fa91d4b507e9bf7e7f6be23c37", "score": "0.6173027", "text": "def __bool__(self):\n return True", "title": "" }, { "docid": "68be88fa91d4b507e9bf7e7f6be23c37", "score": "0.6173027", "text": "def __bool__(self):\n return True", "title": "" }, { "docid": "68be88fa91d4b507e9bf7e7f6be23c37", "score": "0.6173027", "text": "def __bool__(self):\n return True", "title": "" }, { "docid": "68be88fa91d4b507e9bf7e7f6be23c37", "score": "0.6173027", "text": "def __bool__(self):\n return True", "title": "" }, { "docid": "68be88fa91d4b507e9bf7e7f6be23c37", "score": "0.6173027", "text": "def __bool__(self):\n return True", "title": "" }, { "docid": "e93e4a9893fe7eb6cade410d1130aba1", "score": "0.616921", "text": "def __eq__(self, that: 'vnl_vectorSC') -> \"bool\":\n return _vnl_vectorPython.vnl_vectorSC___eq__(self, that)", "title": "" }, { "docid": "588e45cfc2a3183754ca47d692425bcf", "score": "0.61661", "text": "def __gt__(self, other):\n return self - other > 0", "title": "" }, { "docid": "8946ab9a9441b9c188836c16891cac6f", "score": "0.6160751", "text": "def __eq__(self, that: 'vnl_vectorUC') -> \"bool\":\n return _vnl_vectorPython.vnl_vectorUC___eq__(self, that)", "title": "" }, { "docid": "cc49a455ae1f2747f147b6668c5f7a6b", "score": "0.6159246", "text": "def booleanOp(*args, **kwargs):\n \n pass", "title": "" } ]
2293d781810fba97ee398b65fb1842c4
Constructs a ResNet18 model.
[ { "docid": "3bb9cb5b6202c6c99c26eae0170eee3a", "score": "0.0", "text": "def se_resnet18(num_classes):\n model = ResNet(SEBasicBlock, [2, 2, 2, 2], num_classes=num_classes)\n model.avgpool = nn.AdaptiveAvgPool2d(1)\n return model", "title": "" } ]
[ { "docid": "2c8d7eb1332a08e0aab78e517cc4e588", "score": "0.7498665", "text": "def resnet18(pretrained=True, **kwargs):\n model = ResNet(BasicBlock, [2, 2, 2, 2], **kwargs)\n # model.class_classifier: (512, cls)\n if pretrained:\n model.load_state_dict(model_zoo.load_url(\n model_urls['resnet18']), strict=False)\n return model", "title": "" }, { "docid": "e184a84b70e22b346858b212c7ebf18a", "score": "0.73664284", "text": "def resnet18(pretrained=False):\n model = ResNet(BasicBlock, [2, 2, 2, 2])\n if pretrained:\n model.load_state_dict(model_zoo.load_url(model_urls['resnet18']))\n return model", "title": "" }, { "docid": "ee23a0fb3994b62da5304cc18f7d9f44", "score": "0.73350024", "text": "def resnet18(pretrained=False, **kwargs):\n model = ResNet(BasicBlock, [2, 2, 2, 2], **kwargs)\n if pretrained:\n model.load_state_dict(model_zoo.load_url(model_urls['resnet18']), strict=False)\n return model", "title": "" }, { "docid": "d3f25c726adefe2e03821062dd45ed59", "score": "0.7325735", "text": "def resnet18(pretrained=False):\n model = ResNet(BasicBlock, [2, 2, 2, 2])\n if pretrained:\n model.load_state_dict(model_zoo.load_url(model_urls['resnet18']))\n return model", "title": "" }, { "docid": "fa55b6f63a589b8e29a752508f52cc2f", "score": "0.7303929", "text": "def resnet18(pretrained=False, **kwargs):\n model = ResNet(BasicBlock, [2, 2, 2, 2], **kwargs)\n if pretrained:\n model.load_state_dict(model_zoo.load_url(model_urls['resnet18']))\n return model", "title": "" }, { "docid": "fa55b6f63a589b8e29a752508f52cc2f", "score": "0.7303929", "text": "def resnet18(pretrained=False, **kwargs):\n model = ResNet(BasicBlock, [2, 2, 2, 2], **kwargs)\n if pretrained:\n model.load_state_dict(model_zoo.load_url(model_urls['resnet18']))\n return model", "title": "" }, { "docid": "fa55b6f63a589b8e29a752508f52cc2f", "score": "0.7303929", "text": "def resnet18(pretrained=False, **kwargs):\n model = ResNet(BasicBlock, [2, 2, 2, 2], **kwargs)\n if pretrained:\n model.load_state_dict(model_zoo.load_url(model_urls['resnet18']))\n return model", "title": "" }, { "docid": "fa55b6f63a589b8e29a752508f52cc2f", "score": "0.7303929", "text": "def resnet18(pretrained=False, **kwargs):\n model = ResNet(BasicBlock, [2, 2, 2, 2], **kwargs)\n if pretrained:\n model.load_state_dict(model_zoo.load_url(model_urls['resnet18']))\n return model", "title": "" }, { "docid": "fa55b6f63a589b8e29a752508f52cc2f", "score": "0.7303929", "text": "def resnet18(pretrained=False, **kwargs):\n model = ResNet(BasicBlock, [2, 2, 2, 2], **kwargs)\n if pretrained:\n model.load_state_dict(model_zoo.load_url(model_urls['resnet18']))\n return model", "title": "" }, { "docid": "fa55b6f63a589b8e29a752508f52cc2f", "score": "0.7303929", "text": "def resnet18(pretrained=False, **kwargs):\n model = ResNet(BasicBlock, [2, 2, 2, 2], **kwargs)\n if pretrained:\n model.load_state_dict(model_zoo.load_url(model_urls['resnet18']))\n return model", "title": "" }, { "docid": "fa55b6f63a589b8e29a752508f52cc2f", "score": "0.7303929", "text": "def resnet18(pretrained=False, **kwargs):\n model = ResNet(BasicBlock, [2, 2, 2, 2], **kwargs)\n if pretrained:\n model.load_state_dict(model_zoo.load_url(model_urls['resnet18']))\n return model", "title": "" }, { "docid": "fa55b6f63a589b8e29a752508f52cc2f", "score": "0.7303929", "text": "def resnet18(pretrained=False, **kwargs):\n model = ResNet(BasicBlock, [2, 2, 2, 2], **kwargs)\n if pretrained:\n model.load_state_dict(model_zoo.load_url(model_urls['resnet18']))\n return model", "title": "" }, { "docid": "d673818959da897b8ae4afc5ffd8e464", "score": "0.7274093", "text": "def resnet18(pretrained=False, **kwargs):\n model = ResNet(QBasicBlock, BasicBlock, [2, 2, 2, 2], **kwargs)\n if pretrained:\n model.load_state_dict(model_zoo.load_url(model_urls['resnet18']))\n return model", "title": "" }, { "docid": "8b752678b3b1ab920c4aa009ec034a33", "score": "0.7196154", "text": "def resnet18(**kwargs):\n model = ResNet(BasicBlock, [2, 2, 2, 2], **kwargs)\n return model", "title": "" }, { "docid": "8b752678b3b1ab920c4aa009ec034a33", "score": "0.7196154", "text": "def resnet18(**kwargs):\n model = ResNet(BasicBlock, [2, 2, 2, 2], **kwargs)\n return model", "title": "" }, { "docid": "8b752678b3b1ab920c4aa009ec034a33", "score": "0.7196154", "text": "def resnet18(**kwargs):\n model = ResNet(BasicBlock, [2, 2, 2, 2], **kwargs)\n return model", "title": "" }, { "docid": "6e363880e4dd0c25cdca3b53a7f54622", "score": "0.7011926", "text": "def resnet18(pretrained=False, **kwargs):\n model = ResNet(BasicBlock, [2, 2, 2, 2], **kwargs)\n if pretrained:\n print('load pretrained model ... ')\n model.load_state_dict(remove_fc(model_zoo.load_url(model_urls['resnet18'])), strict=False)\n return model", "title": "" }, { "docid": "ffb8b73de3ccdedc7637ee3ff9df67ce", "score": "0.6997773", "text": "def resnet18(pretrained=False, **kwargs):\n model = ResNet(BasicBlock, [2, 2, 2, 2], **kwargs)\n if pretrained:\n if model.additional_blocks:\n model.load_state_dict(\n model_zoo.load_url(\n model_urls['resnet18']),\n strict=False)\n return model\n model.load_state_dict(model_zoo.load_url(model_urls['resnet18']))\n return model", "title": "" }, { "docid": "2dcb957c269cc80a84e5f6ec59d6bd4e", "score": "0.6949178", "text": "def resnet18(pretrained=False, **kwargs):\n model = ResNet(BasicBlock, [2, 2, 2, 2], **kwargs)\n if pretrained:\n state_dict = model_zoo.load_url(model_urls['resnet18'])\n for key in list(state_dict.keys()):\n # print(key)\n if 'fc' in key:\n del state_dict[key]\n model.load_state_dict(state_dict,strict=False)\n return model", "title": "" }, { "docid": "edbddd4d29195714e54fc655dfc04cda", "score": "0.6891461", "text": "def resnet18(activation_root=None, **kwargs):\n model_name = 'resnet18'\n activation_root = os.path.join(activation_root, model_name)\n model = ResNet(BasicBlock, [2, 2, 2, 2], activation_root=activation_root, **kwargs)\n\n state_dict = get_state_dict(model_name)\n model.load_state_dict(state_dict)\n\n return model", "title": "" }, { "docid": "3b9bb8f5c74e20aced45e902b210079e", "score": "0.68614376", "text": "def initialize_model():\n model = models.resnet18(pretrained=False)\n model.conv1 = nn.Conv2d(1, 64, kernel_size=7, stride=2, padding=3, bias=False)\n num_ftrs = model.fc.in_features\n model.fc = nn.Linear(num_ftrs, 25)\n\n return model", "title": "" }, { "docid": "988d47c60ec64636aa2c89dfdad5e447", "score": "0.67034423", "text": "def resnet18(num_classes=1000, pretrained='imagenet'):\n model = models.resnet18(pretrained=False)\n if pretrained is not None:\n settings = pretrained_settings['resnet18'][pretrained]\n model = load_pretrained(model, num_classes, settings)\n model = modify_resnets(model)\n return model", "title": "" }, { "docid": "0aa97a9d026b6f99378d2dda10b1b347", "score": "0.6668339", "text": "def resnet18(num_class,**kwargs):\n model = CRFResNet(BasicBlock, [2, 2, 2, 2], num_classes=num_class, **kwargs)\n\n return model", "title": "" }, { "docid": "f127830dc42cfdc600ba6263f57edc7e", "score": "0.66443634", "text": "def resnet18():\n return ResNet(BasicBlock, [2, 2, 2, 2])", "title": "" }, { "docid": "fddfab6e8ff34dabec025e8e01eee28c", "score": "0.64547086", "text": "def resnet10(**kwargs):\n model = ResNet(BasicBlock, [1, 1, 1, 1], **kwargs)\n return model", "title": "" }, { "docid": "a7fe5b78d38aa346ccdc544897d425d0", "score": "0.6426945", "text": "def TinyImageNetResNet18():\n return _resnet(\"resnet18\", BasicBlock, [2, 2, 2, 2], 64)", "title": "" }, { "docid": "6df156c887b62cad9976bca37c939fd2", "score": "0.6400317", "text": "def resnet34(**kwargs):\n model = ResNet(BasicBlock, [3, 4, 6, 3], **kwargs)\n return model", "title": "" }, { "docid": "6df156c887b62cad9976bca37c939fd2", "score": "0.6400317", "text": "def resnet34(**kwargs):\n model = ResNet(BasicBlock, [3, 4, 6, 3], **kwargs)\n return model", "title": "" }, { "docid": "6df156c887b62cad9976bca37c939fd2", "score": "0.6400317", "text": "def resnet34(**kwargs):\n model = ResNet(BasicBlock, [3, 4, 6, 3], **kwargs)\n return model", "title": "" }, { "docid": "ef14266bd2b64cedc20201a1576faa4f", "score": "0.6388115", "text": "def resnet34(pretrained=False):\n model = ResNet(BasicBlock, [3, 4, 6, 3])\n if pretrained:\n model.load_state_dict(model_zoo.load_url(model_urls['resnet34']))\n return model", "title": "" }, { "docid": "dc9db4def5033b31448217960d1c93ed", "score": "0.6356469", "text": "def resnet34(pretrained=False):\n model = ResNet(BasicBlock, [3, 4, 6, 3])\n if pretrained:\n model.load_state_dict(model_zoo.load_url(model_urls['resnet34']))\n return model", "title": "" }, { "docid": "737e34968cd32e960fc5266fc5638b57", "score": "0.6329808", "text": "def resnet18():\n out_features = params['out_features']\n\n model = models.resnet18(pretrained=True)\n\n # To freeze layers\n for param in model.parameters():\n param.requires_grad = False\n\n # New output layers\n num_ftrs = model.fc.in_features\n model.fc = nn.Linear(num_ftrs, out_features)\n\n return model", "title": "" }, { "docid": "3d01349fa521a80a842e838b1538feac", "score": "0.62975574", "text": "def resnet12MTLofficial(**kwargs):\n model = ResNetMTLOfficial(**kwargs)\n return model", "title": "" }, { "docid": "fd4339f6d77323edca3fc0941dc77aab", "score": "0.62887967", "text": "def resnet34(pretrained=False, **kwargs):\n model = ResNet(BasicBlock, [3, 4, 6, 3], **kwargs)\n if pretrained:\n model.load_state_dict(model_zoo.load_url(model_urls['resnet34']))\n return model", "title": "" }, { "docid": "fd4339f6d77323edca3fc0941dc77aab", "score": "0.62887967", "text": "def resnet34(pretrained=False, **kwargs):\n model = ResNet(BasicBlock, [3, 4, 6, 3], **kwargs)\n if pretrained:\n model.load_state_dict(model_zoo.load_url(model_urls['resnet34']))\n return model", "title": "" }, { "docid": "fd4339f6d77323edca3fc0941dc77aab", "score": "0.62887967", "text": "def resnet34(pretrained=False, **kwargs):\n model = ResNet(BasicBlock, [3, 4, 6, 3], **kwargs)\n if pretrained:\n model.load_state_dict(model_zoo.load_url(model_urls['resnet34']))\n return model", "title": "" }, { "docid": "fd4339f6d77323edca3fc0941dc77aab", "score": "0.62887967", "text": "def resnet34(pretrained=False, **kwargs):\n model = ResNet(BasicBlock, [3, 4, 6, 3], **kwargs)\n if pretrained:\n model.load_state_dict(model_zoo.load_url(model_urls['resnet34']))\n return model", "title": "" }, { "docid": "85ef7de25c53d47c46fd95a029db9232", "score": "0.62863344", "text": "def sk_resnet18(bn_momentum=0.1, pretrained=False, output_stride=16,sparable = False):\n model = ResNet(BasicBlock, [2, 2, 2, 2], bn_momentum, pretrained, output_stride,mode ='resnet18',sparable = sparable)\n return model", "title": "" }, { "docid": "c385438175827cf18eee1cddf26a0911", "score": "0.6280657", "text": "def resnet34(pretrained=False, **kwargs):\n model = ResNet(BasicBlock, [3, 4, 6, 3], **kwargs)\n if pretrained:\n model.load_state_dict(model_zoo.load_url(model_urls['resnet34']))\n return model", "title": "" }, { "docid": "192347264b3c1bc3caf59a65e39a4bc5", "score": "0.627192", "text": "def resunet18(depth=64):\n model = ResUNet([2, 2, 2, 2], depth=depth)\n return model", "title": "" }, { "docid": "2cb40a6c6f1d532d9bcbdf115e2ef3d4", "score": "0.62369335", "text": "def resnet18(num_classes=200, osmeflag=False, nparts=1, pretrained=False, progress=True, **kwargs):\n return _resnet('resnet18', BasicBlock, [2, 2, 2, 2], num_classes, osmeflag, nparts,\n pretrained, progress, **kwargs)", "title": "" }, { "docid": "fea2766fabe1dbada9ce0b13ceede41d", "score": "0.6230088", "text": "def resnet34(pretrained=False, **kwargs):\n model = ResNet(BasicBlock, [3, 4, 6, 3], **kwargs)\n if pretrained:\n model.load_state_dict(model_zoo.load_url(model_urls['resnet34']), strict=False)\n return model", "title": "" }, { "docid": "28380797ffc1c36eabb5bca14783d38b", "score": "0.61884975", "text": "def resnet18(kwargs):\n k_bits = kwargs['k_bits']\n num_layers = kwargs['num_layers']\n pre_k_bits = kwargs['pre_k_bits']\n ratio = kwargs['ratio']\n init_k_bits = 8\n\n model,model_k_bits = _resnet('resnet18',QuantBasicBlock,[2,2,2,2],init_k_bits= init_k_bits,\\\n pre_k_bits=pre_k_bits,k_bits=k_bits,num_layers=num_layers,ratio=ratio)\n\n return model,model_k_bits", "title": "" }, { "docid": "8d52d1758cf3234ced5d2e314a17e467", "score": "0.6174723", "text": "def resnet12(**kwargs):\n model = ResNet(BasicBlock4ResNet12, [1, 1, 1, 1], **kwargs)\n return model", "title": "" }, { "docid": "c964afb90f337bf682ff3c3605125846", "score": "0.61741924", "text": "def resnet18_2plus1d(pretrained=False, reversed=False, **kwargs):\n model = ResNet(BasicBlock, [2, 2, 2, 2], decomposed=True, reversed=reversed, **kwargs)\n if pretrained:\n print('load pretrained model ... ')\n model.load_state_dict(remove_fc(model_zoo.load_url(model_urls['resnet18_2plus1'])), strict=False)\n return model", "title": "" }, { "docid": "e569b651e67960cafcb78c6e5eabd907", "score": "0.61642814", "text": "def resnet34(pretrained=False, **kwargs):\n model = ResNet(QBasicBlock, [3, 4, 6, 3], **kwargs)\n if pretrained:\n model.load_state_dict(model_zoo.load_url(model_urls['resnet34']))\n return model", "title": "" }, { "docid": "878377213df598d379cf4aae0804e5c7", "score": "0.61606437", "text": "def resnet34(pretrained = False, **kwargs):\n model = ResNet(BasicBlock, [3, 4, 6, 3], **kwargs)\n if pretrained:\n model.load_state_dict(remove_fc(model_zoo.load_url(model_urls['resnet34'])), strict=False)\n return model", "title": "" }, { "docid": "297a175754d38100b2f5d1d37f834a56", "score": "0.6149945", "text": "def get_tl_model():\n res_34 = resnet_34(num_classes=2)\n res_34.load_weights('resnet_34_weight.hdf5')\n\n layers = [layer for layer in res_34.layers]\n layer_names = [layer.name for layer in res_34.layers]\n print(layer_names[138])\n x = layers[138].output\n x = GlobalAveragePooling1D()(x)\n x = Dense(2, activation='sigmoid')(x)\n\n custom_model = Model(res_34.input, x)\n for layer in custom_model.layers[:120]:\n layer.trainable = False\n\n return custom_model", "title": "" }, { "docid": "55aa63f264300ff57d3ec6e41c6b41a9", "score": "0.6131804", "text": "def make_res2net():\n print(\"Construct {}\".format(args.name))\n if args.name == \"res2net50\":\n model = res2net50(pretrained=False)\n pretrained_net = torch.load(\"./res2net50.pth\")\n print(\"check res2net50 pretrained_net: {}\".format(type(pretrained_net)))\n model.load_state_dict(pretrained_net)\n return model\n elif args.name == \"res2next_dla60\":\n model = res2next_dla60(pretrained=False)\n pretrained_net = torch.load(\"./res2next_dla60.pth\")\n print(\"check res2next_dla60 pretrained_net: {}\".format(type(pretrained_net)))\n model.load_state_dict(pretrained_net)\n return model\n elif args.name == \"res2next50\":\n model = res2next50(pretrained=False)\n pretrained_net = torch.load(\"./res2next50.pth\")\n print(\"check res2next50 pretrained_net: {}\".format(type(pretrained_net)))\n model.load_state_dict(pretrained_net)\n return model\n else:\n raise ValueError(\"Not a valid name: {}!\".format(args.name))", "title": "" }, { "docid": "b182a242313621e884c20990690493a6", "score": "0.6094363", "text": "def resnet34(pretrained=False, **kwargs):\n model = ResNet(BasicBlock, [3, 4, 6, 3], **kwargs)\n if pretrained:\n model.load_state_dict(\n remove_fc(model_zoo.load_url(model_urls['resnet34'])))\n return model", "title": "" }, { "docid": "3cc65504535b0bb13cc44ff1d5a19d47", "score": "0.60882014", "text": "def _build_model():\n settings = {}\n settings['model'] = 'semi_ridge_anchor'\n settings['numtopics'] = 20\n settings['numtrain'] = 1\n settings['expgrad_epsilon'] = 1e-4\n return models.build(RNG, settings)", "title": "" }, { "docid": "0c40dfa4ee938998987c68d9a75a758a", "score": "0.6061518", "text": "def resnet18_v1b_89(pretrained=False, root='~/.mxnet/models', ctx=cpu(0), **kwargs):\n model = ResNetV1b(BasicBlockV1b, [2, 2, 2, 2], name_prefix='resnetv1b_', **kwargs)\n dirname = os.path.dirname(__file__)\n json_filename = os.path.join(dirname, 'resnet%d_v%db_%.1fx' % (18, 1, 2.6) + \".json\")\n with open(json_filename, \"r\") as jsonFile:\n params_shapes = json.load(jsonFile)\n if pretrained:\n from ..model_store import get_model_file\n params_file = get_model_file('resnet%d_v%db_%.1fx' % (18, 1, 2.6), tag=pretrained,\n root=root)\n prune_gluon_block(model, model.name, params_shapes, params=ndarray.load(params_file),\n pretrained=True, ctx=ctx)\n else:\n prune_gluon_block(model, model.name, params_shapes, params=None, pretrained=False, ctx=ctx)\n if pretrained:\n from ...data import ImageNet1kAttr\n attrib = ImageNet1kAttr()\n model.synset = attrib.synset\n model.classes = attrib.classes\n model.classes_long = attrib.classes_long\n return model", "title": "" }, { "docid": "240b4677d508160070ce4db2c3844fb7", "score": "0.6048692", "text": "def pretrained_resnet18():\n model = torchvision.models.resnet18(pretrained=True)\n\n # Fix parameters of all pre-trained layers\n for param in model.parameters():\n param.requires_grad = False\n\n num_ftrs = model.fc.in_features\n model.fc = nn.Linear(num_ftrs, 2)\n\n return model", "title": "" }, { "docid": "32a877a0882838296115ca54c182dc62", "score": "0.6004656", "text": "def resnet18_v1b_custom(nclass=400, pretrained=False, pretrained_base=True,\n use_tsn=False, partial_bn=False, use_kinetics_pretrain=True,\n num_segments=1, num_crop=1, root='~/.mxnet/models',\n ctx=mx.cpu(), **kwargs):\n model = ActionRecResNetV1b(depth=18,\n nclass=nclass,\n partial_bn=partial_bn,\n num_segments=num_segments,\n num_crop=num_crop,\n dropout_ratio=0.5,\n init_std=0.01)\n\n if use_kinetics_pretrain and not pretrained:\n from gluoncv.model_zoo import get_model\n kinetics_model = get_model('resnet18_v1b_kinetics400', nclass=400, pretrained=True)\n source_params = kinetics_model.collect_params()\n target_params = model.collect_params()\n assert len(source_params.keys()) == len(target_params.keys())\n\n pretrained_weights = []\n for layer_name in source_params.keys():\n pretrained_weights.append(source_params[layer_name].data())\n\n for i, layer_name in enumerate(target_params.keys()):\n if i + 2 == len(source_params.keys()):\n # skip the last dense layer\n break\n target_params[layer_name].set_data(pretrained_weights[i])\n model.collect_params().reset_ctx(ctx)\n return model", "title": "" }, { "docid": "8eb72ad4d2e6a74d2203a40a4af21d7c", "score": "0.59974957", "text": "def resnet34_module(pretrained=False, **kwargs):\n model = ResNet(BasicBlock, [3, 4, 6, 3], **kwargs)\n if pretrained:\n model.load_state_dict(model_zoo.load_url(model_urls['resnet34']))\n return model", "title": "" }, { "docid": "222c7a1d63e369e272f4869888d31582", "score": "0.598853", "text": "def model():\n model = sysml.Model(\"NCC-1701\")\n return model", "title": "" }, { "docid": "982c464be9e253785cbaaf166e40c402", "score": "0.59846926", "text": "def resnet34(pretrained=False, **kwargs):\n model = ResNet(BasicBlock, [3, 4, 6, 3], **kwargs)\n if pretrained:\n if model.additional_blocks:\n model.load_state_dict(\n model_zoo.load_url(\n model_urls['resnet34']),\n strict=False)\n return model\n model.load_state_dict(model_zoo.load_url(model_urls['resnet34']))\n return model", "title": "" }, { "docid": "47f2a547c3f48254cab457efc246ca30", "score": "0.59652233", "text": "def resnet34(activation_root=None, **kwargs):\n model_name = 'resnet34'\n activation_root = os.path.join(activation_root, model_name)\n model = ResNet(BasicBlock, [3, 4, 6, 3], activation_root=activation_root, **kwargs)\n\n state_dict = get_state_dict(model_name)\n model.load_state_dict(state_dict)\n\n return model", "title": "" }, { "docid": "2d70922fb4390ccefcbddf9929f512b3", "score": "0.5961116", "text": "def __init__(self, resnet_size, data_format=None, num_classes=NUM_CLASSES,\n resnet_version=resnet_model.DEFAULT_VERSION,\n dtype=resnet_model.DEFAULT_DTYPE):\n\n # TODO remove the checking process, which seems to be unecessary.\n # if resnet_size % 6 != 2:\n # raise ValueError('resnet_size must be 6n + 2:', resnet_size)\n\n # TODO the block should be 8 for ResNet-18\n # num_blocks = (resnet_size - 2) // 6\n num_blocks = 4\n\n # TODO define of ResNet is not correct.\n super(ResNetModel, self).__init__(\n resnet_size=resnet_size,\n bottleneck=False,\n num_classes=num_classes,\n num_filters=64,\n kernel_size=3,\n conv_stride=1,\n first_pool_size=None,\n first_pool_stride=None,\n block_sizes=[num_blocks] * 3,\n block_strides=[1, 2, 2],\n resnet_version=resnet_version,\n data_format=data_format,\n dtype=dtype\n )", "title": "" }, { "docid": "f2bebc22734015baef8bc73af97f4865", "score": "0.5958711", "text": "def make_ResNet34(): \n initializer = tf.keras.initializers.he_uniform()\n #CNN network\n inp = layers.Input(shape=[128, 128, 3], name='input_image')\n X = DefaultConv2D(64, kernel_size=7, strides=2)(inp)\n X = layers.BatchNormalization()(X)\n X = layers.Activation(\"relu\")(X)\n X = layers.MaxPool2D(pool_size=3, strides=2, padding=\"SAME\")(X)\n prev_filters = 64\n for filters in [64] * 3 + [128] * 4 + [256] * 6 + [512] * 3:\n strides = 1 if filters == prev_filters else 2\n X = ResidualUnit(filters, strides=strides)(X)\n prev_filters = filters\n X = layers.GlobalAvgPool2D()(X)\n x = layers.Flatten()(X)\n #x = layers.Dense(1024,activation='relu',kernel_initializer=initializer)(x)\n #x = layers.BatchNormalization()(x)\n last = layers.Dense(64,activation='softmax')(x)\n\n return tf.keras.Model(inputs=inp, outputs=last)", "title": "" }, { "docid": "b6513a9967d52f0efc8df7cbc4fc26ad", "score": "0.59526396", "text": "def make_model(self, name='indra_assembled', description='An Indra Auto-Curated network', version='1.0', debug=True ):\n # save debug status, this is used later in hte \"add\" functions\n self.debug = debug\n \n # add @context to the CX file\n self.myCX.set_namespaces([{ \"pubmed\": \"http://identifiers.org/pubmed/\",\n \"HGNC\": \"http://identifiers.org/hgnc/\",\n \"MGI\": \"http://www.informatics.jax.org/searchtool/Search.do?query=\", \n \"RGD\": \"https://rgd.mcw.edu/rgdweb/elasticResults.html?category=Gene&species=Rat&cat1=General&sp1=&postCount=1&term=\",\n \"chebi\": \"http://identifiers.org/CHEBI:\",\n \"uniprot\":\"http://www.uniprot.org/uniprot/?query=\",\n # Still have to figure out: (below are old db's not sure if actually used)\n \"cas\": \"http://identifiers.org/cas/\",\n \"hprd\": \"http://identifiers.org/hprd/\", \n \"KEGG Compound\": \"http://identifiers.org/kegg.compound/\" }])\n\n # Add extra indra statements depending on settings\n for stmt in self.statements:\n if isinstance(stmt, Modification):\n self._add_modification(stmt)\n if isinstance(stmt, SelfModification):\n self._add_self_modification(stmt)\n elif isinstance(stmt, RegulateActivity) or isinstance(stmt, RegulateAmount):\n self._add_regulation(stmt)\n elif isinstance(stmt, Complex):\n self._add_complex(stmt)\n elif isinstance(stmt, Gef):\n self._add_gef(stmt)\n elif isinstance(stmt, Gap):\n self._add_gap(stmt)\n \n # Add network Attributes\n self.myCX.set_name( name )\n self.myCX.add_network_attribute(name='description', values= description )#, type=ATTRIBUTE_DATA_TYPE.STRING) #<- removed type variable\n self.myCX.add_network_attribute(name='version', values= version )#, type=ATTRIBUTE_DATA_TYPE.STRING) #<- removed type variable\n \n self.cx_json = json.dumps( self.myCX.to_cx() ,indent=2)\n return self.cx_json", "title": "" }, { "docid": "fa7daba604ce7b10095551f69e0eb837", "score": "0.5935368", "text": "def create_model(self) -> LightningModule:\n pass", "title": "" }, { "docid": "d9c7bbbdbc7611e65dae4201eb2a0d6b", "score": "0.5904462", "text": "def build_model(self):\n\n network_name = self.config[\"network_name\"]\n num_classes = self.config[\"num_classes\"]\n pretrained = self.config[\"pretrained_imagenet\"]\n loss_type = self.config[\"loss_type\"]\n self.num_images = self.loss_type_dict[loss_type]\n\n # TODO: Need to fix resnet; currently does not work\n if \"resnet\" in network_name:\n num_layers = int(network_name.split(\"resnet\")[1])\n model = networks.resnet(num_layers, num_classes, pretrained)\n elif network_name == \"alexnet\":\n model = networks.alexnet(self.num_images, num_classes, pretrained)\n else:\n raise RuntimeError(\"%s is an invalid network_name\" % network_name)\n\n model = model.to(self.device)\n return model", "title": "" }, { "docid": "194a54cedc03ff40d6df088246c39e48", "score": "0.587439", "text": "def __init__(self):\n self.model = ResNet50(weights='imagenet')\n logging.info('ResNet50 ready to predict out of 1000 objects')", "title": "" }, { "docid": "35b6268f16042d5287c97d51f46c8236", "score": "0.5873216", "text": "def _construct_model(self):\n self.model = RankSVM(self.n_object_features)", "title": "" }, { "docid": "a8cccd06d334bf39f591149c2d6c27dc", "score": "0.5814251", "text": "def pose_resnet18_v1b(**kwargs):\n return get_pose_resnet('resnet18_v1b', **kwargs)", "title": "" }, { "docid": "ca890ccf01b646769df9b6b8460049b3", "score": "0.57957405", "text": "def construct_model(self) -> None:\n raise NotImplementedError", "title": "" }, { "docid": "4a508c58ae79c510ead20f3e9c4352a4", "score": "0.5771366", "text": "def resnet34(num_class,**kwargs):\n model = CRFResNet(BasicBlock, [3, 4, 6, 3], num_classes=num_class, **kwargs)\n\n return model", "title": "" }, { "docid": "8ab7e304ad2eaa8f0d4f737b4d14af4f", "score": "0.57625246", "text": "def build_model(cls, args, task):\n\n # make sure all arguments are present in older models\n seq2seq_architecture(args)\n\n if not hasattr(args, \"max_source_positions\"):\n args.max_source_positions = 2048\n if not hasattr(args, \"max_target_positions\"):\n args.max_target_positions = 2048\n\n tgt_dict = task.target_dictionary\n tgt_dict.add_symbol(\"<ctc_blank>\")\n\n encoder = cls.build_encoder(args, tgt_dict)\n lm = cls.build_lm(args, tgt_dict)\n\n dim_phone, dim_hidden, vocab_size = encoder.d, 1024, lm.dim_output\n hidden_layer = NonlinearLayer(\n vocab_size, dim_hidden, bias=False, activation_fn=nn.ReLU\n )\n gating_network = NonlinearLayer(\n dim_hidden + dim_phone,\n dim_hidden,\n bias=True,\n activation_fn=nn.Sigmoid,\n )\n output_projections = NonlinearLayer(\n dim_hidden + dim_phone,\n vocab_size,\n bias=False,\n activation_fn=nn.ReLU,\n )\n decoder = cls.build_decoder(\n args, hidden_layer, gating_network, output_projections, lm)\n\n return cls(args, encoder, decoder, tgt_dict)", "title": "" }, { "docid": "f946bcc9b1334ae47ff3674bb1d1c3c6", "score": "0.5740129", "text": "def pose_resnet18_v1(**kwargs):\n return get_pose_resnet('resnet18_v1', **kwargs)", "title": "" }, { "docid": "43cd49fd5d5da02ec6f377d3d3bcf12c", "score": "0.5737549", "text": "def build_model(base_model_cfg=\"vgg\"):\r\n if base_model_cfg == \"vgg\":\r\n return EGNet(base_model_cfg, *extra_layer(base_model_cfg, Vgg16()))\r\n if base_model_cfg == \"resnet\":\r\n return EGNet(base_model_cfg, *extra_layer(base_model_cfg, resnet50()))\r\n raise ValueError(\"unknown config\")", "title": "" }, { "docid": "9064238f2f1cfd120019827ec3e909a4", "score": "0.57208407", "text": "def build_model(cls, args, task):\n # make sure all arguments are present in older models\n w2v_cif_bert_architecture(args)\n\n lm = cls.build_bert(args)\n encoder = cls.build_encoder(args) # encoder\n\n tgt_dict = task.target_dictionary\n\n return cls(args, encoder, lm, tgt_dict)", "title": "" }, { "docid": "9e7ab6a82f23d28316ffacf32f2b7ef2", "score": "0.57150924", "text": "def get_model(model_name):\n if model_name == 'resnet_3d':\n net = r3d_18(pretrained=False, num_classes=num_classes)\n elif model_name == 'resnet_mixed_conv':\n net = mc3_18(pretrained=False, num_classes=num_classes)\n elif model_name == 'resnet_2_1d':\n net = r2plus1d_18(pretrained=False, num_classes=num_classes)\n else :\n sys.exit('Error: Incorrect model name')\n return net", "title": "" }, { "docid": "6159a84f0d0c3fcdc40967dbc2fc5539", "score": "0.5709897", "text": "def __init__(self):\n setting.load([\"max_gram\",\"file_lm\",\"weights\"])\n self.maxGram = int(setting.max_gram)\n sys.stderr.write(\"[GentileModel] Loading language model... \\n\")\n self.lm = LanguageModel(lm_file=setting.file_lm, n=setting.max_gram)\n self.weights = setting.weights\n self.lenWeights = len(self.weights)\n self.weightsForRules = self.weights[:-1]\n self.cacheLMProbs = {}", "title": "" }, { "docid": "61b27d176eae2f3defcb77b5558bd975", "score": "0.5701194", "text": "def resnet34():\n return ResNet(BasicBlock, [3, 4, 6, 3])", "title": "" }, { "docid": "51aa71020dc18a2fae0a52336e95608f", "score": "0.5700848", "text": "def build_model(cls, args, task):\n # make sure all arguments are present in older models\n w2v_cif_bert_architecture(args)\n tgt_dict = task.target_dictionary\n\n bert, to_vocab = cls.build_bert(args)\n encoder = cls.build_encoder(args) # encoder\n\n return cls(args, encoder, bert, to_vocab, tgt_dict)", "title": "" }, { "docid": "3bda737959ebbf8c919e55866b5a975b", "score": "0.5690763", "text": "def resnet101(pretrained=False):\n model = ResNet(Bottleneck, [3, 4, 23, 3])\n if pretrained:\n model.load_state_dict(model_zoo.load_url(model_urls['resnet101']))\n return model", "title": "" }, { "docid": "e944d2411fafdd1ed466216f93af661e", "score": "0.56868505", "text": "def resnet18_v1b_kinetics400(nclass=400, pretrained=False, pretrained_base=True,\n use_tsn=False, partial_bn=False,\n num_segments=1, num_crop=1, root='~/.mxnet/models',\n ctx=mx.cpu(), **kwargs):\n model = ActionRecResNetV1b(depth=18,\n nclass=nclass,\n partial_bn=partial_bn,\n num_segments=num_segments,\n num_crop=num_crop,\n dropout_ratio=0.5,\n init_std=0.01)\n\n if pretrained:\n from ..model_store import get_model_file\n model.load_parameters(get_model_file('resnet18_v1b_kinetics400',\n tag=pretrained, root=root))\n from ...data import Kinetics400Attr\n attrib = Kinetics400Attr()\n model.classes = attrib.classes\n model.collect_params().reset_ctx(ctx)\n return model", "title": "" }, { "docid": "3e2f69a2eda315205d7bb9b06d863361", "score": "0.5685932", "text": "def resnet152(pretrained=False):\n model = ResNet(Bottleneck, [3, 8, 36, 3])\n if pretrained:\n model.load_state_dict(model_zoo.load_url(model_urls['resnet152']))\n return model", "title": "" }, { "docid": "63e7734b40e3684229c2b743a21d89f8", "score": "0.5685799", "text": "def create_model(base_net):\n # model = Sequential()\n # model.add(Dense(42, activation='relu'))\n # model.add((Dense(6, activation='sigmoid')))\n \n \n if base_net == 'vgg': \n x = VGG16(weights='imagenet', include_top=False)\n elif base_net == 'resnet':\n x = ResNet50(weights='imagenet', include_top=False)\n \n for layer in x.layers[1:]:\n layer.trainable = False\n\n input = Input(shape=(256,256,3), name='image_input')\n x = x(input)\n\n x = Flatten(name='flatten')(x)\n x = Dense(4096, activation='relu', name='fc1')(x)\n x = Dense(4096, activation='relu', name='fc2')(x)\n x = Dense(LABELS, activation='sigmoid', name='predictions')(x)\n\n model = Model(input=input, output=x)\n\n model.compile(loss='binary_crossentropy', optimizer='adam', metrics=['accuracy'])\n\n model.summary()\n\n return model", "title": "" }, { "docid": "62a3eff04f69daa579929e1548236510", "score": "0.568361", "text": "def __init__(self, model_config, num_labels, device):\n super(RobertaForBinaryClassification, self).__init__(model_config)\n self.tokenizer = AutoTokenizer.from_pretrained(\"roberta-base\")\n self.tokenizer.pad_token = self.tokenizer.eos_token\n model_config.num_labels = num_labels\n self.num_labels = num_labels\n self.model = RobertaModel.from_pretrained(\"roberta-base\", output_attentions=True).to(device)\n self.dropout = torch.nn.Dropout(model_config.hidden_dropout_prob)\n self.classifier = torch.nn.Linear(model_config.hidden_size, num_labels)\n self.loss_fn = BCEWithLogitsLoss()\n self.apply(self._init_weights)", "title": "" }, { "docid": "f744981520f4b23e5c9ae75e58e673cc", "score": "0.56800985", "text": "def build_model(cls, args, task):\n\n # make sure all arguments are present in older models\n w2v_seq2seq_lm_architecture(args)\n\n if not hasattr(args, \"max_source_positions\"):\n args.max_source_positions = 2048\n if not hasattr(args, \"max_target_positions\"):\n args.max_target_positions = 2048\n\n tgt_dict = task.target_dictionary\n\n decoder_embed_tokens = build_embedding(tgt_dict, args.decoder_embed_dim)\n encoder = cls.build_encoder(args)\n lm = cls.build_lm(args, tgt_dict)\n decoder = cls.build_decoder(args, tgt_dict, decoder_embed_tokens, lm)\n\n return cls(args, encoder, decoder)", "title": "" }, { "docid": "f3e135ceedbbae9032a4d81bc90284c4", "score": "0.5679732", "text": "def resnet26(**kwargs):\n model = ResNet(Bottleneck, [2, 2, 2, 2], **kwargs)\n return model", "title": "" }, { "docid": "dd290ee93b4e06d0d25c35087dee7ca1", "score": "0.567748", "text": "def resnet152(pretrained=False):\n model = ResNet(Bottleneck, [3, 8, 36, 3])\n if pretrained:\n model.load_state_dict(model_zoo.load_url(model_urls['resnet152']))\n return model", "title": "" }, { "docid": "0042a98d4f07a1c07c23e21329e1de67", "score": "0.56769466", "text": "def resnet18_pt_mcn(weights_path=None, **kwargs):\n model = Resnet18_pt_mcn()\n if weights_path:\n state_dict = torch.load(weights_path)\n model.load_state_dict(state_dict)\n return model", "title": "" }, { "docid": "26a9d2a7806f0f0b752fa32e86604201", "score": "0.5674696", "text": "def create_model(self):\n\t\tself._send_packet([_REGMODEL])\n\t\treturn self._get_packet(12)[0]", "title": "" }, { "docid": "a2f8fe10e37fe545d793929755e74864", "score": "0.56637084", "text": "def __init__(self):\n print(\"making model\")\n super().__init__()\n self.conv = torch.nn.Sequential(\n torch.nn.Conv1d(16, 80, 1, groups=4),\n torch.nn.BatchNorm1d(80),\n torch.nn.ReLU(),\n torch.nn.Conv1d(80, 32, 1),\n torch.nn.BatchNorm1d(32),\n torch.nn.ReLU(),\n _ResidueModule(32),\n _ResidueModule(32),\n torch.nn.AvgPool1d(2),\n _ResidueModule(32),\n _ResidueModule(32),\n torch.nn.AvgPool1d(2),\n _ResidueModule(32),\n _ResidueModule(32),\n torch.nn.AvgPool1d(2),\n _ResidueModule(32),\n _ResidueModule(32),\n torch.nn.AdaptiveAvgPool1d(1),\n )\n self.classifier = torch.nn.Linear(32, 3)", "title": "" }, { "docid": "4228f297b4e365908ed862f65130dce4", "score": "0.56617606", "text": "def __build_new_model(self):\n self.model = Models.build_model()", "title": "" }, { "docid": "d324cd329f2a17b15580211fe54dc224", "score": "0.56605244", "text": "def __init__(self,mode='fc0'):\n\n path = '/deep/group/aihc-bootcamp-spring2020/cxr_fewer_samples/experiments/jingbo/resnet18_mocov2_20200617-021146_SLURM1534372/'\n path += \"checkpoint_0003.pth.tar\"\n checkpoint = torch.load(path, map_location=\"cpu\")\n state_dict = dict((key[7:], value)\n for (key, value) in checkpoint['state_dict'].items())\n model = moco.builder.MoCo(\n models.__dict__['resnet18'],\n\n #Parameter from Jingbo's checkpoint - Different from default!!\n K=49152,\n mlp=True,\n pretrained=False)\n model.load_state_dict(state_dict)\n if mode == 'fc1':\n self.model = model.encoder_q\n elif mode == 'fc0':\n\n #STRANGE: without flatten(), output size is [x, 512, 1, 1]\n self.model = torch.nn.Sequential(*list(model.encoder_q.children())[:-1],torch.nn.Flatten())\n else:\n self.model = torch.nn.Sequential(*list(model.encoder_q.children())[:-2],torch.nn.Flatten())\n self.model.eval()", "title": "" }, { "docid": "16767140ee343f62b8d6236c974cae6c", "score": "0.56564", "text": "def build_model(model_name):\r\n blocks_args, global_params = get_model_params(model_name, None)\r\n print('blocks_args= {}'.format(blocks_args))\r\n print('global_params= {}'.format(global_params))\r\n model = MixnetModel(blocks_args, global_params)\r\n\r\n return model", "title": "" }, { "docid": "5905ad916fd50f7839b15154e0caaef3", "score": "0.565083", "text": "def __init__(self, layers: list):\n \n super().__init__()\n mapping_dict = {\"conv1_1\": 0, \"conv1_2\": 2,\n \"conv2_1\": 5, \"conv2_2\": 7,\n \"conv3_1\": 10, \"conv3_2\": 12, \"conv3_3\": 14, \"conv3_4\": 16,\n \"conv4_1\": 19, \"conv4_2\": 21, \"conv4_3\": 23, \"conv4_4\": 25,\n \"conv5_1\": 28, \"conv5_2\": 30, \"conv5_3\": 32, \"conv5_4\": 34}\n \n # create an integer mapping for the layer names; +1 to get the output of ReLu layer\n self.layers = [mapping_dict[layer] + 1 for layer in layers]\n \n self.vgg19 = models.vgg19(pretrained = True, progress = True).features\n self.vgg19 = self.vgg19.to(device).eval() # Keep the model in .eval() mode only", "title": "" }, { "docid": "9684fd0d22fdb816a8ed6d7fdba0b9fc", "score": "0.5631836", "text": "def _construct_model(self):\n self.logger.info(\"Construct model..\")\n self.model = FATEObjectRanker(self.n_object_features)\n self.model.set_tunable_parameters(n_hidden_set_units=self.n_hidden_set_units,\n n_hidden_set_layers=self.n_hidden_set_layers,\n n_hidden_joint_units=self.n_hidden_joint_units,\n n_hidden_joint_layers=self.n_hidden_joint_layers,\n reg_strength=self.reg_strength, learning_rate=self.learning_rate,\n batch_size=self.batch_size)\n self.logger.info(\"Finished constructing model\")", "title": "" }, { "docid": "59857065d11bdf54c42bc2af5ce87d08", "score": "0.5630086", "text": "def __init__(self, model_config, input_shape, width_multiplier=1, stem=\"32\", weight_decay=1e-4, latent_dim=None):\n super(ResNetBackbone, self).__init__() \n\n self.input_s = input_shape\n self.batch_norm_decay = 0.9\n self.batch_norm_epsilon = 1e-5\n self.weight_decay = weight_decay\n self.latent_dim = latent_dim\n\n self.stem = stem\n\n self.width_multiplier = width_multiplier\n\n self.classes = 0\n self.include_top = 0\n self.resNet_depth = model_config[\"resNetDepth\"]\n\n layer_configuarations = {\n 18: [2, 2, 2, 2],\n 34: [3, 4, 6, 3],\n 50: [3, 4, 6, 3],\n 101: [3, 4, 23, 3],\n 152: [3, 8, 36, 3],\n 200: [3, 24, 36, 3]\n }\n\n if self.resNet_depth not in layer_configuarations:\n raise ValueError('Not a valid resNet_depth:', self.resNet_depth)\n\n if self.resNet_depth > 34:\n block = self.bottleneck_block\n else:\n block = self.residual_block \n\n layers = layer_configuarations[self.resNet_depth]\n self.resNet = self.resnet_v1_generator(input_shape, block, layers, self.width_multiplier)", "title": "" }, { "docid": "ebe336985262eb156d7d21bbd0d92d4f", "score": "0.56279933", "text": "def make_model(num_i=4, num_o=4):\n return Model(num_i, num_o)", "title": "" }, { "docid": "76149588ab039a30ef32212d72cb11fb", "score": "0.56246656", "text": "def build_model(cls, args, task):\n # make sure all arguments are present in older models\n w2v_cif_bert_architecture(args)\n\n if not hasattr(args, \"max_source_positions\"):\n args.max_source_positions = 2048\n if not hasattr(args, \"max_target_positions\"):\n args.max_target_positions = 2048\n\n lm = cls.build_bert(args)\n encoder = cls.build_encoder(args) # encoder\n assigner = cls.build_assigner(args, encoder.d)\n\n tgt_dict = task.target_dictionary\n\n return cls(args, encoder, assigner, lm, tgt_dict)", "title": "" }, { "docid": "050877479bb6ab4dd8a3f396052a9048", "score": "0.56239206", "text": "def resnet34(num_classes=1000, pretrained='imagenet'):\n model = models.resnet34(pretrained=False)\n if pretrained is not None:\n settings = pretrained_settings['resnet34'][pretrained]\n model = load_pretrained(model, num_classes, settings)\n model = modify_resnets(model)\n return model", "title": "" }, { "docid": "fdbda852a42bee1c2e900cf268f129db", "score": "0.56236404", "text": "def init_model(model_architecture, pretrained=False):\n model_func = _BASE_MODELS.get(model_architecture, resnet18)\n model = model_func(pretrained=pretrained)\n\n return model", "title": "" }, { "docid": "020b6f8a39beb5d1d87e52e46894786f", "score": "0.5621156", "text": "def make_model(self, *args):\n m = pyo.ConcreteModel()\n for a in args:\n setattr(m, a, pyo.Var())\n m.R = pyo.Param(initialize=self.R)\n m.MW = pyo.Param(initialize=self.MW)\n m.T_star = pyo.Param(initialize=self.T_star)\n m.rho_star = pyo.Param(initialize=self.rho_star)\n m.Tc = pyo.Param(initialize=self.Tc)\n m.rhoc = pyo.Param(initialize=self.rhoc)\n m.Pc = pyo.Param(initialize=self.Pc)\n m.Tt = pyo.Param(initialize=self.Tt)\n m.Pt = pyo.Param(initialize=self.Pt)\n m.rhot_l = pyo.Param(initialize=self.rhot_l)\n m.rhot_v = pyo.Param(initialize=self.rhot_v)\n m.P_min = pyo.Param(initialize=self.P_min)\n m.P_max = pyo.Param(initialize=self.P_max)\n m.rho_max = pyo.Param(initialize=self.rho_max)\n m.T_min = pyo.Param(initialize=self.T_min)\n m.T_max = pyo.Param(initialize=self.T_max)\n return m", "title": "" }, { "docid": "0759e3fe9ead7f99a7be4d81c6321144", "score": "0.56168133", "text": "def __init__(self):\n super().__init__()\n self.add(\n ResourceURI(\n 'kenning:///models/classification/pytorch_pet_dataset_mobilenetv2_full_model.pth' # noqa: E501\n ),\n 'torch',\n 'PyTorchPetDatasetMobileNetV2'\n )\n self.add(\n ResourceURI(\n 'kenning:///models/classification/pytorch_pet_dataset_mobilenetv2.pth' # noqa: E501\n ),\n 'torch_weights',\n 'PyTorchPetDatasetMobileNetV2'\n )\n self.add(\n ResourceURI(\n 'kenning:///models/classification/tensorflow_pet_dataset_mobilenetv2.h5' # noqa: E501\n ),\n 'keras',\n 'TensorFlowPetDatasetMobileNetV2'\n )", "title": "" }, { "docid": "ecfd36ace504899bb68a1e240b0a01a0", "score": "0.5612638", "text": "def __init__(self):\n super().__init__()\n self.resnet = nn.Sequential(\n *list(models.resnet101(pretrained=True).children())[:-1]\n )\n for param in self.resnet.parameters():\n param.requires_grad = False\n self.linear = list(models.resnet101(pretrained=True).children())[-1]", "title": "" }, { "docid": "4f55a3493b53c49ddc6b1ff0c67f9824", "score": "0.5605384", "text": "def __init__(self, load=False, num_input=32, num_views=100, num_output=100, learning_rate=0.001,\n model_name=\"default_model\", variation=0):\n self.learning_rate = learning_rate\n self.learning_decay = 0.995\n self.num_input = num_input\n self.num_views = num_views\n self.aux_info = num_views * 2 # Both current and visited\n self.num_output = num_output\n self.model_name = model_name\n self.folder = \"Models/\"\n self.activation_function = \"sigmoid\"\n self.load = load\n\n if load:\n self.model = self.load_model(model_name)\n else: #What type of architecture the model should have\n if variation == 0:\n self.model = self.generate_main_architecture_model()\n elif variation == 1:\n self.model = self.generate_first_variation_model()\n elif variation == 2:\n self.model = self.generate_second_variation_model()\n else:\n print(\"No valid model variation set\")\n return\n self.compile_model()", "title": "" } ]
c180c31a7a2b1a63b65435bd5ca34831
makes a colon delimited list from List
[ { "docid": "fa75ac79feca99b06e944e975eadf784", "score": "0.7750035", "text": "def makelist(List):\n clist=\"\"\n for element in List:\n clist=clist+element+\":\"\n return clist[:-1]", "title": "" } ]
[ { "docid": "11d1cb31209e79b9f40e262fba2d7268", "score": "0.651048", "text": "def splitingFunction(splitList):\n newlist = []\n for item in splitList:\n split = item.split(':')[0]\n newlist.append(split)\n return newlist", "title": "" }, { "docid": "8669db494fbd1b91d74366e9904f2d6d", "score": "0.64027977", "text": "def _format_list(value: List) -> List:\n return value", "title": "" }, { "docid": "70e0731c9915787f0698509afcaab7cc", "score": "0.6296165", "text": "def format_list(list):\n \n print \",\".join(list)", "title": "" }, { "docid": "909b9fce2e56a9f87aa827a480236640", "score": "0.6231475", "text": "def __split_list_settings(value, separator):\n\n stripped = (a.strip() for a in value.split(separator))\n return list(a for a in stripped if len(a))", "title": "" }, { "docid": "24570844110a59e2c6734df8ff7411a5", "score": "0.6216427", "text": "def list_str(l):\r\n return \"[\" + \", \".join([p.__str__() for p in l]) + \"]\"", "title": "" }, { "docid": "00b160b8af2c8d5448bc98bc3996e378", "score": "0.6210667", "text": "def make_list(value, sep=','):\n return value.split(sep) if isinstance(value, str) else value", "title": "" }, { "docid": "f9d0116c26cea4971b36c9b47b95419d", "score": "0.6199183", "text": "def format_list(seq):\n if hasattr(seq, 'tolist'):\n seq = seq.tolist()\n elif hasattr(seq, '__len__'):\n seq = list(seq)\n return ', '.join(list(map(str, seq)))", "title": "" }, { "docid": "168edac6f34c37a0372a1a73ea103f07", "score": "0.6197347", "text": "def itemlist(item, sep, suppress_trailing=True):\n return attach(\n item\n + ZeroOrMore(sep + item)\n + Optional(sep.suppress() if suppress_trailing else sep),\n add_list_spacing,\n )", "title": "" }, { "docid": "ee0b3e7be65f6a43dfbc9ed5b479196e", "score": "0.6171717", "text": "def listString(lister, delim):\n retstr = \"\"\n for i in range(len(lister)): retstr = retstr + lister[i] + delim\n\n retstr = retstr.rstrip(\";\")\n\n return retstr", "title": "" }, { "docid": "fecbbab6f10ab629916a98370705210b", "score": "0.6153299", "text": "def catlist(alist, sep=\"\"):\n if len(alist) == 0:\n return \"\"\n if len(alist) == 1:\n return alist[0]\n f = alist[0]\n for p in alist[1:]:\n f = f + sep + p\n return f", "title": "" }, { "docid": "52c88dc7f14cfae7b9e2688ee2f09430", "score": "0.60904855", "text": "def list_str(inlist):\n return '\\n'.join(inlist)", "title": "" }, { "docid": "25e767ffe5fbeff068bf50477721d2af", "score": "0.60802215", "text": "def yamllist(items) :\n return \"\\n\".join([ \"- %s\" % x for x in items])", "title": "" }, { "docid": "179ff0f632aefe976b7935fd27f6064f", "score": "0.6025009", "text": "def split_prefix(lst):\n return '_'.join(lst.split('_')[:-1])", "title": "" }, { "docid": "6fc9467edf7140ce16ff33c030393397", "score": "0.5995243", "text": "def turn_list_to_str(list_values, sep = \";\"):\n\n output_str = \"\"\n for value in list_values[:-1]:\n output_str = output_str + str(value) + sep\n output_str = output_str + str(list_values[-1])\n return output_str", "title": "" }, { "docid": "d05bc283681000c364a4bb80bf072077", "score": "0.5950333", "text": "def quote_list(the_list):\n return [\"'%s'\" % element for element in the_list]", "title": "" }, { "docid": "051d2eed7cb08dfeeab3745a71a39fca", "score": "0.5926013", "text": "def list2str(list):\n return \"[\" + \", \".join([str(x) for x in list]) + \"]\"", "title": "" }, { "docid": "7556476c31d3d94878391bea185c1f47", "score": "0.5916555", "text": "def list_to_string(input_list, separator):\n \n output = input_list[0]\n for item in input_list[1:]:\n output=string_concatenator(output, item, separator)\n return output", "title": "" }, { "docid": "99c6b855d040f82a7b26567a1d28109c", "score": "0.5899982", "text": "def delist(l):\n if type(l).__name__ == 'list':\n return '_'.join([delist(x) for x in l])\n else:\n return str(l)", "title": "" }, { "docid": "d80dfeec4662dd1b52c5aff194ec4378", "score": "0.5859391", "text": "def encodeList(items):\n line = []\n for item in items:\n item = item.strip()\n if not item:\n continue\n line.append(item)\n \n line = '\\t'.join(line)\n return line", "title": "" }, { "docid": "7d7823b69d78d0661cb6f144f3ef4a25", "score": "0.58574474", "text": "def listToString(l):\n s = l[1] + ' ' + l[2] + ' ' + l[3]\n return s", "title": "" }, { "docid": "9b47a889343e0c084e14e720912dcc79", "score": "0.5846651", "text": "def parse_list(msg):\n out_msg = []\n sub_msg = []\n esc = False\n in_quote = None\n for c in msg:\n if esc:\n sub_msg.append(c)\n esc = False\n else:\n if c == '\\\\':\n esc = True\n elif in_quote and c == in_quote:\n in_quote = None\n elif in_quote:\n sub_msg.append(c)\n elif c in ('\"', \"'\"):\n in_quote = c\n elif c == ',':\n m = ''.join(sub_msg).strip()\n if m != '':\n out_msg.append(m)\n sub_msg = []\n else:\n sub_msg.append(c)\n if len(sub_msg) != 0:\n m = ''.join(sub_msg).strip()\n if m != '':\n out_msg.append(m)\n return out_msg", "title": "" }, { "docid": "70eb652bef41412fddf34185d9e4c8f9", "score": "0.58362347", "text": "def stitch_list(l):\n l = [str(i) for i in l] # Convert all elements to string\n l = ''.join(l) # Stitch them together\n return l", "title": "" }, { "docid": "ea822332065d911ea3856700ce6239a7", "score": "0.5829238", "text": "def list2str(items, sep):\n rstr = \"\"\n for item in items:\n rstr = rstr + str(item) + sep\n return rstr[:-1]", "title": "" }, { "docid": "41902442cfd8d6fb4befb5b2d49e6f87", "score": "0.58138245", "text": "def formatList(L):\r\n LFormatted = []\r\n for i in L:\r\n if i != []:\r\n LFormatted.append(int(str(i)[1:-1]))\r\n return LFormatted", "title": "" }, { "docid": "047c749f97e575e30a043a46d6888fa1", "score": "0.57916385", "text": "def FormatList(items, formatter, level=0):\n return ', '.join(LimitedEnumerate(items, formatter, level=level))", "title": "" }, { "docid": "085be826d14c778623fcf34a2d65150e", "score": "0.57910395", "text": "def _as_list(list_str, delimiter=','):\n return [str.strip(item).rstrip() for item in list_str.split(delimiter)]", "title": "" }, { "docid": "c1447bce24354ff2e19f2cbce737fcc1", "score": "0.57853526", "text": "def list2string(string_list: list, separator='\\n') -> str:\n if len(string_list) > 0:\n return ''.join([f'{item}{separator}' for item in string_list])[\n :-len(separator)]\n return ''", "title": "" }, { "docid": "84f030e7a2b50d1a4ef1be752ded931f", "score": "0.5737608", "text": "def sqllist(lst):\r\n if isinstance(lst, basestring): \r\n return lst\r\n else:\r\n return ', '.join(lst)", "title": "" }, { "docid": "8847d85ec78a2a497626e023e6e5c05e", "score": "0.57288104", "text": "def _fixup_cc_list(cc_value):\r\n cclist = []\r\n for cc in re.split(r'[;,\\s]+', cc_value):\r\n if cc and cc not in cclist:\r\n cclist.append(cc)\r\n return ', '.join(cclist)", "title": "" }, { "docid": "f8a3f0c5cce42badbd79b3b074341daa", "score": "0.5728101", "text": "def convert_list_to_string(org_list, seperator=''):\n return seperator.join(org_list)", "title": "" }, { "docid": "01aa8d208f56745bb3e95d75819016dd", "score": "0.57270056", "text": "def list_to_dict_key(keylist):\r\n\treturn \":\".join(k for k in keylist)", "title": "" }, { "docid": "113d9834e0bcda3a62dfb1c2fec9cf13", "score": "0.57222646", "text": "def __call__(self, value):\r\n if isinstance(value, basestring):\r\n return value.split(self.sep)\r\n else:\r\n try:\r\n return list(value)\r\n except Exception, exc:\r\n raise InvalidWidgetArgument(error=exc,\r\n title=_('List conversion error'))", "title": "" }, { "docid": "ba763d224e06bd31feb54f4aac881b04", "score": "0.5711922", "text": "def charlist(in_list):\n out_list = []\n for i in in_list:\n assert type(i) in [str]\n if i == '':\n out_list.append(i)\n else:\n out_list += list(i)\n return out_list", "title": "" }, { "docid": "6939b0ac6d9bce326b1ed065c9b389d8", "score": "0.56960565", "text": "def strip_list(list):\n return [item.strip() for item in list]", "title": "" }, { "docid": "aebb6cd101083a2eb233c3b5b5914a51", "score": "0.5689092", "text": "def hex_list(L: List):\n return [hex(elt) for elt in L]", "title": "" }, { "docid": "740a9f6e26a2e10ed6621e39a67da6ef", "score": "0.5677563", "text": "def indentList(lst, indent):\n\traise NotImplementedError(\"This function is deprecated\")\n\tjoiner = \" \"*indent*INDENT_AMOUNT\n\treturn joiner.join(lst)", "title": "" }, { "docid": "c58e15bc8c0871b9dd2c397cedf0c0b1", "score": "0.5677465", "text": "def add_list_spacing(tokens):\n out = []\n for i, tok in enumerate(tokens):\n out.append(tok)\n if i % 2 == 1 and i < len(tokens) - 1:\n out.append(\" \")\n return \"\".join(out)", "title": "" }, { "docid": "48a5113b905dbcde6c6279f47fb315d9", "score": "0.567648", "text": "def strip_list(l):\n return ([s.strip() for s in l])", "title": "" }, { "docid": "b8e426c43473a309fe8931404826e523", "score": "0.5674332", "text": "def allToString(list):\n newList = []\n s = set()\n for e in list:\n if type(e)==types.ListType or type(e)==type(s):\n newList.append(\",\".join(e))\n else:\n newList.append(str(e))\n return newList", "title": "" }, { "docid": "54a571c2d5f39b44eb706cec4ca34cd2", "score": "0.56741405", "text": "def rstripList(self, theList):\n # 2010/08/27: fix bug 618482.\n s = ''.join(theList).rstrip()\n return s.split('\\n')", "title": "" }, { "docid": "6cb2f452e30716f155b7579715ac9ece", "score": "0.56689364", "text": "def convList2Array(list):\n\tarray = '{'\n\tfor item in list:\n\t\tarray += \"'%s',\" % pgdb.escape_string(item)\n\t# trim ending ','\n\tif len(array) > 1:\n\t\tarray = array[0:-1]\n\tarray += '}'\n\treturn array", "title": "" }, { "docid": "723753fa2d1dd9caf8c41f3a217765b3", "score": "0.56625366", "text": "def PassTransferListToString(ll):\n\tstring = \"\"\n\tfor i in ll:\n\t\tstring += str(i)\n\t\tstring += \",\"\n\treturn string", "title": "" }, { "docid": "df66b3c3acad00d3271b7c0d1c8e09a9", "score": "0.56552744", "text": "def parse_list(list_bytes):\n return [id.strip() for id in list_bytes.decode('utf8').split(_u('\\n'))\n if id.strip()]", "title": "" }, { "docid": "12cc97a628a1640536e6350641905487", "score": "0.56525207", "text": "def fromListToString(listForToString):\n string = \" \".join(listForToString)\n return string", "title": "" }, { "docid": "f0a673aedfff290e75ccacca49fe0f52", "score": "0.5652254", "text": "def format_list_to_commastring(ls):\n \n string = ''\n for item in ls:\n string += str(item).strip() + ','\n string = string.strip(',')\n \n return string", "title": "" }, { "docid": "3e73c884127a5fca71cbbd83b5b1a12d", "score": "0.56455827", "text": "def list_2_string(in_list):\n return ','.join(in_list) # This is much faster but can't be used for list of lists", "title": "" }, { "docid": "c47d11e681b8b108d9c9087fddcd7385", "score": "0.5643744", "text": "def formatList(theList, fmt = '%s\\n'):\n result = []\n for element in theList:\n# NTdebug(\"Doing element: \" +`element`)\n result.append(fmt%element.format())\n return ''.join(result)", "title": "" }, { "docid": "24a3926182d32e0f190ec355a99f3fca", "score": "0.56411743", "text": "def list_str_to_list(list_str, separator=','):\n list_str = list_str.replace('[', '').replace(']', '')\n return list_str.split(separator)", "title": "" }, { "docid": "fa0c3d1211ac0a1de66bd82b36a44d1b", "score": "0.5628604", "text": "def _list_format(data, per_line, style=WFX_FIELD_FMT):\n template = style * per_line\n leftover = len(data) % per_line\n # Template for last line.\n last_template = style * leftover\n\n pretty_list = [\n template % tuple(data[i : i + per_line])\n for i in range(0, len(data) - leftover, per_line)\n ]\n if leftover:\n return pretty_list + [last_template % tuple(data[-1 * leftover :])]\n return pretty_list", "title": "" }, { "docid": "74fc83e1e5b77ac697fbe536777e68a3", "score": "0.56123173", "text": "def add_colons_to_mac(self, mac_addr):\n s = list()\n for i in range(12 / 2): # mac_addr should always be 12 chars, we work in groups of 2 chars\n s.append(mac_addr[i * 2:i * 2 + 2])\n r = \":\".join(s)\n return r", "title": "" }, { "docid": "3d14b28922301fd6902541149245d805", "score": "0.56039095", "text": "def _format_list(param_list: Iterable[Any]) -> str:\n fmt_list = [f'\"{item}\"' for item in param_list]\n return \",\".join(fmt_list)", "title": "" }, { "docid": "75d6c3c9b101545eb61736c8bd0aef45", "score": "0.5591184", "text": "def get_comma_sep_string_from_list(items):\n\n if not items:\n return ''\n\n if len(items) == 1:\n return items[0]\n\n return '%s and %s' % (', '.join(items[:-1]), items[-1])", "title": "" }, { "docid": "053630d5b15a9a76bffcc2055a5a35f3", "score": "0.558819", "text": "def PassTransferStringToList(ss):\n\tll = []\n\tcurr = 0\n\tprev = 0\n\tfor i in ss:\n\t\tif i == ',':\n\t\t\tsub = ss[prev:curr]\n\t\t\tll.append(sub)\n\t\t\tprev = curr+1\n\t\tcurr += 1\n\treturn ll", "title": "" }, { "docid": "bdb311b8d2ab9a7e121a7a06a3a60bb2", "score": "0.5581604", "text": "def transform_list(string_list=\"\"):\n chars_to_remove = [\"-\"]\n string_list = \"\".join(\n i for i in string_list if i not in chars_to_remove).strip()\n return \"<li>{}</li>\\n\".format(string_list)", "title": "" }, { "docid": "4a327ac1124fafbbcd3da7ad2756573c", "score": "0.55776584", "text": "def ll_to_string(inputlist):\n inputlist = [list(map(str, i)) for i in inputlist]\n newlist = []\n for i in inputlist:\n newlist.append(' '.join(i).rstrip())\n string = '\\n'.join(newlist)\n return string", "title": "" }, { "docid": "3016f29899e05612710975c39c57a727", "score": "0.55764604", "text": "def combined_to_list(self, path):\n with open(path, 'r') as raw:\n text = raw.readlines()\n res = []\n k = []\n for line in text:\n if ':' not in line:\n res.append(k + line.strip('\\n').split(','))\n else:\n k = [line.strip(':\\n')]\n return res", "title": "" }, { "docid": "2b9b90435eb2c877aa4caf9255bab13b", "score": "0.5573941", "text": "def flatlist(group=None):\n return \" \".join(sorted(list_jails(group)))", "title": "" }, { "docid": "27b7b81070b4520972adacec4425bbf4", "score": "0.5559492", "text": "def bash_array(lst):\n\n contents = ' '.join(str(x) for x in lst)\n return '({:s})'.format(contents)", "title": "" }, { "docid": "3001fe18cc999b4a0e05b5e52d70d936", "score": "0.5557065", "text": "def listsplit(liststr=''):\n ret = []\n for e in liststr.split(','):\n ret.append(e.strip())\n return ret", "title": "" }, { "docid": "be16f8ea1554e1508d0a94f9564ea308", "score": "0.55556095", "text": "def grouplist(mylist,seperator=\"/\"):\n\tmygroups={}\n\tfor x in mylist:\n\t\txs=x.split(seperator)\n\t\tif xs[0]==\".\":\n\t\t\txs=xs[1:]\n\t\tif xs[0] not in mygroups:\n\t\t\tmygroups[xs[0]]=[seperator.join(xs[1:])]\n\t\telse:\n\t\t\tmygroups[xs[0]]+=[seperator.join(xs[1:])]\n\treturn mygroups", "title": "" }, { "docid": "f91b9f1026b34dabc609e7737e33a887", "score": "0.552983", "text": "def _truncate_list(self, x: Union[List[str], List[int]]) -> Union[List[str], List[int]]:\n # Save two slots for the first [CLS] token and the last [SEP] token.\n return x[: self._max_seq_length - 2]", "title": "" }, { "docid": "14921434dd2670f4cc49537f61c08226", "score": "0.5525684", "text": "def list_stringer(input_list): #input a list\r\n\toutput_list=[]\r\n\tfor item in input_list:\r\n\t\toutput_list.append(str(item))\r\n\treturn ' '.join (output_list) #output a string\r", "title": "" }, { "docid": "8fed9fc50e72ae6008f4bb242f38f740", "score": "0.55247545", "text": "def convert_line(l, c, id=0):\n return [f'{c}'] + [f'{i + 1}:{n}' for i, n in enumerate(l)]", "title": "" }, { "docid": "5425bb7e247a359640143ccb267db7d5", "score": "0.55200064", "text": "def unlist_values(to_list):\r\n return [''.join(x) for x in to_list]", "title": "" }, { "docid": "260e446f414d74c5d278a70e3608ad65", "score": "0.55179256", "text": "def list_join(L, x):\n if isinstance(x, string_types):\n x = (x, RESERVED_TOKEN)\n if len(L) == 0:\n return [], []\n out, out_types = copy.deepcopy(L[0][0]), copy.deepcopy(L[0][1])\n for v, t in L[1:]:\n if x:\n out += [x[0]]\n out_types += [x[1]]\n out += v\n out_types += t\n return out, out_types", "title": "" }, { "docid": "e8ea5080455b3deb85d3b0aa93a41e2f", "score": "0.55121875", "text": "def Stringefy(intlist,delim):\n\n\tstring = str(intlist)[1:-1].replace(', ',delim)\n\treturn string", "title": "" }, { "docid": "e914d4f69d20c8a66ef6fef59d5948e0", "score": "0.5505", "text": "def prepend(list, str):\n #add a {} at the end of the str\n str += '{0}'\n #recreate the list starting with the str\n list = [str.format(i) for i in list]\n return(list)", "title": "" }, { "docid": "01fcea0f4443bf0ffa26846e958ea2ca", "score": "0.550127", "text": "def _prettify_list(items):\n assert isinstance(items, list)\n\n keys_list = 'Available Keys:'\n for item in items:\n keys_list += '\\n - {0}'.format(item)\n return keys_list", "title": "" }, { "docid": "9a4519ce4b1f22204be87978e44d7b03", "score": "0.5499873", "text": "def get_delimited_string_from_list(_list, delimiter=', ', wrap_values_with_char=None, wrap_strings_with_char=None):\n\n if wrap_values_with_char is not None:\n return delimiter.join('{wrapper}{val}{wrapper}'.format(\n val=v,\n wrapper=wrap_values_with_char\n ) for v in _list)\n elif wrap_strings_with_char is not None:\n return delimiter.join(str(v) if not isinstance(v, str) else '{wrapper}{val}{wrapper}'.format(\n val=v,\n wrapper=wrap_strings_with_char\n ) for v in _list)\n else:\n return delimiter.join(str(v) for v in _list)", "title": "" }, { "docid": "ebb5e0cd97f93466a5d7dcd9dc4f4591", "score": "0.5498491", "text": "def parse_list_header(value):\n result = []\n for item in _parse_list_header(value):\n if item[:1] == item[-1:] == '\"':\n item = item[1:-1]\n result.append(item)\n return result", "title": "" }, { "docid": "62b0cfd7fb7472e9eaee5b9eb9e46ec6", "score": "0.54980606", "text": "def print_list(lst):\n for item in lst:\n print(' {}'.format(item))", "title": "" }, { "docid": "09a291a11d53fc0efad96f9ee48895bc", "score": "0.5496579", "text": "def render_list(inlist):\n\n # Return empty string to avoid returning unnecessary newlines\n if not inlist:\n return ''\n\n return '\\n{}\\n\\n'.format('\\n'.join([str(x) for x in inlist]))", "title": "" }, { "docid": "3a30719c5386a25871b7776eddaa837a", "score": "0.5491656", "text": "def listPrinter(lst):\r\n print(\"[\")\r\n for l in range(len(lst)):\r\n print(\" \" + str(lst[l]) + \",\" * (l != (len(lst) -1)))\r\n print(\"]\\n\")", "title": "" }, { "docid": "647af4d08c50593d82d7c1282685e8bf", "score": "0.5481166", "text": "def parse_list_header(value):\r\n result = []\r\n for item in _parse_list_header(value):\r\n if item[:1] == item[-1:] == '\"':\r\n item = unquote_header_value(item[1:-1])\r\n result.append(item)\r\n return result", "title": "" }, { "docid": "64874ae84be731fbe8982c4b302baf6d", "score": "0.54795486", "text": "def print_list(l):\n print('[' + ', '.join([x.__str__() for x in l]) + ']')", "title": "" }, { "docid": "0fb706846bc7069da3f389198a20362f", "score": "0.54726285", "text": "def convert_dict(lista):\n dic = {}\n lista = lista[0].split(\",\")\n for item in lista:\n dic[item.split(':',1)[0]] = item.split(':',1)[1]\n return dic", "title": "" }, { "docid": "837caf610280689fb0cee3a6ff08fe41", "score": "0.54676163", "text": "def _get_formatted_server_list(self, server_list):\n\n # dns/ntp server info from excel is of the format\n # 'xxx.xxx.xxx.xxx, (aaa.bbb.ccc.com)'\n # The function returns a list of comma separated dns ip addresses\n servers = []\n for data in server_list:\n if \"(\" not in data:\n servers.append(data)\n formatted_server_list = \",\".join(servers)\n return formatted_server_list", "title": "" }, { "docid": "700462b3f1a9dc50851905a4b9242e79", "score": "0.5460897", "text": "def parse_list(cls, list_string):\n\n return [cls.parse_string(s) for s in list_string]", "title": "" }, { "docid": "6da61c5499d748a445fab9d088a11562", "score": "0.5453253", "text": "def collapse_list(the_list):\n return [s for i in the_list for s in i]", "title": "" }, { "docid": "5c8027c0d1ba5756bb954361cfe8a9bb", "score": "0.5444541", "text": "def get_list_comma_sep_string(input_string):\n final_list = input_string.split(',')\n for i in range(0, len(final_list)):\n final_list[i] = final_list[i].strip()\n return final_list", "title": "" }, { "docid": "2c3cba30db49498a162a0a44589d496a", "score": "0.54422456", "text": "def listToParString(l):\n\treturn ' '.join([str(x) for x in l])", "title": "" }, { "docid": "5ba6dd08df6f123d023dfaae7c2b757e", "score": "0.54410183", "text": "def makeList(stringList):\n if isinstance(stringList, list):\n return stringList\n if isinstance(stringList, basestring):\n toks = stringList.lstrip(' [').rstrip(' ]').split(',')\n if toks == ['']:\n return []\n return[str(tok.strip(' \\'\"')) for tok in toks]\n raise WMWorkloadToolsException", "title": "" }, { "docid": "afc80c4c4b52698eb33e316a531abc79", "score": "0.54396296", "text": "def get_prep_value(self, value):\n super(ListField, self).get_prep_value(value)\n\n return ','.join(value)", "title": "" }, { "docid": "8e6c8e2c31cfd073de998ba6029e2703", "score": "0.5438427", "text": "def get_path_from_list(to_parse: typing.List[str], path_prefix: str) -> str:\n ret = path_prefix\n\n for i in to_parse:\n ret += f\"/{i}\"\n\n return ret.lstrip(\"/\").rstrip(\"/\")", "title": "" }, { "docid": "9599a9048fd8cc0bb9ea3574e22f5aa1", "score": "0.5430547", "text": "def to_list(str_rep_list: str) -> list:\n in_parts = str_rep_list.strip(\"[\").strip(\"]\").split(',')\n real_list = [part.strip(' ').strip(\"'\").strip('\"') for part in in_parts if part != '' and part != ' ']\n return real_list", "title": "" }, { "docid": "f51a82fc648432f8c20b2fe995fe9cb6", "score": "0.5430519", "text": "def read_colon_list(tok: BaseTokenizer, had_colon: bool = False) -> Tuple[List[str], Token]:\n strings = []\n ready_for_string = had_colon # Did we have a colon before?\n token = Token.EOF\n for token, tok_value in tok:\n if token is Token.STRING:\n if not ready_for_string:\n raise tok.error('Too many strings ({!r})!', tok_value)\n strings.append(tok_value)\n ready_for_string = False\n elif token is Token.COLON:\n if ready_for_string:\n # ': :' means to have an empty string there.\n strings.append('')\n ready_for_string = True\n elif token is Token.PLUS:\n if ready_for_string or not strings:\n raise tok.error('\"+\" without a string before it!')\n strings[-1] += tok.expect(Token.STRING)\n elif ready_for_string and token is Token.NEWLINE:\n continue # skip over this in particular..\n else:\n if ready_for_string:\n raise tok.error(token)\n return strings, token\n raise tok.error(token)", "title": "" }, { "docid": "61c5da8d264408b429a4243879ae37cb", "score": "0.54226595", "text": "def reformatStripList(stripList):\n String = ''\n for channel in stripList:\n String = String + str(channel) + ' '\n # Remove the last space.\n String = String[:-1]\n return String", "title": "" }, { "docid": "4d1a2f79c64e1d492a841416f794feb0", "score": "0.5421868", "text": "def _list_to_name(self, name_list):\n if name_list[1] != \"\":\n return \"{}, {}, {}\".format(*name_list)\n else:\n return \"{}, {}\".format(name_list[0], name_list[2])", "title": "" }, { "docid": "66e5001210d87e18c0101db116aa4146", "score": "0.54189306", "text": "def parse_http_list(s):\n res = []\n part = u''\n\n escape = quote = False\n for cur in s:\n if escape:\n part += cur\n escape = False\n continue\n if quote:\n if cur == u'\\\\':\n escape = True\n continue\n elif cur == u'\"':\n quote = False\n part += cur\n continue\n\n if cur == u',':\n res.append(part)\n part = u''\n continue\n\n if cur == u'\"':\n quote = True\n\n part += cur\n\n # append last part\n if part:\n res.append(part)\n\n return [part.strip() for part in res]", "title": "" }, { "docid": "4bdc69c46b53a5e9015a1c4e49c5ec4c", "score": "0.5393936", "text": "def decodeList(line):\n items = []\n for item in line.split('\\t'):\n item = item.strip()\n if not item:\n continue\n items.append(item)\n return items", "title": "" }, { "docid": "6ceacdda65c93baf94a0a5005165c504", "score": "0.5382561", "text": "def cut_list(l, length=100):\n if not isinstance(l, list):\n raise ValueError(\"A list is expected, got: %s\" % type(l))\n\n res = '['\n\n for idx, item in enumerate(l):\n s = str(item)\n\n new_len = len(res) + len(s)\n\n is_str = isinstance(item, str)\n\n if is_str:\n new_len += 2\n\n if new_len >= length:\n res += \"'%s...\" % s[:length - new_len] if is_str else \"%s...\" % s\n\n break\n else:\n res += \"'%s'\" % s if is_str else s\n res += ', ' if idx < len(l) - 1 else ']'\n\n return res", "title": "" }, { "docid": "b5c3ddecd4e45a43b098b76b76a40c0a", "score": "0.5379951", "text": "def put_list_chars( self, chars ):\n\t\tself.pl.PL_put_list_chars( self.pl_term, chars )", "title": "" }, { "docid": "24dd5316ae2424c928899bcd95a5f62b", "score": "0.5378834", "text": "def list_arg(raw_value):\n return str(raw_value).split(',')", "title": "" }, { "docid": "ff4c0dc014b8944f67764a8f8e4a97ac", "score": "0.53742325", "text": "def prepend_name_to_itemlist(name, item_lst):\n return [(name + item[0], item[1]) for item in item_lst]", "title": "" }, { "docid": "fd15d0f891ef42d821550a86ea30b1af", "score": "0.53685784", "text": "def abbrev_nodenames(node_list: List[str], prefix: str = None) -> List[str]:\n newlist = [s.split(\".\", 1)[0] for s in node_list]\n return newlist", "title": "" }, { "docid": "c7ffede8795a8e75abf3edafbfbabe04", "score": "0.53648967", "text": "def reformat_bibserlist(bibserstr):\n return ' '.join(bibserstr.translate(BIBSERLIST_UTRANS).split())", "title": "" }, { "docid": "fcb869f4deb67e4489437949bad785a9", "score": "0.53521484", "text": "def list_to_string(input_list):\n output_string = ''\n for index, item in enumerate(input_list):\n output_string += str(item)\n if index < len(input_list) - 2:\n output_string += ', '\n elif index == len(input_list) - 2:\n output_string += ' and '\n return output_string", "title": "" }, { "docid": "c33754de1f004b8dc1f6324c9740fbdc", "score": "0.53441066", "text": "def human_task_name_list(list):\n ret = []\n for name in list:\n ret.append(BaseTaskLoader.human_task_name(name))\n return ', '.join(ret)", "title": "" }, { "docid": "4b4019973d343f343992a4e35fe1a6a2", "score": "0.534016", "text": "def list_ingredients():\n return ', '.join(str(items) for items in ingredients_list)", "title": "" }, { "docid": "9942b3aeb18592b86a97d61d6dd203f8", "score": "0.53350043", "text": "def _ListTypeFormatString(value):\n\n if isinstance(value, tuple):\n return '({0})'\n if isinstance(value, set):\n return '{{{0}}}'\n return '[{0}]'", "title": "" } ]
988e2a7752f8000ebc216e3b5fe90fff
This is the creator. It can be passed the the min/max X and Y values for the plane, and a transformation function (f). There are default values if the parameters are not passed to the creator. The creator then generates a 2D plane filled with the X & Y complex number coordinates of the specified plane transformed by the function f(). Note that the default function f() for computing the values in the plane is the identity function, so the values at the coordinate location are the coordinates themselves. Note also that the number of points in each axis is always forced to be fixed value.
[ { "docid": "ad9e077f02647ddb17277f388d0ccd4c", "score": "0.64841443", "text": "def __init__(self, newXmin=-5., newXmax=5., newXlen=1001, newYmin=-5., newYmax=5., newYlen=1001, f=lambda x: x, maxLoop=100):\n self.xmin = newXmin\n self.xmax = newXmax\n self.ymin = newYmin\n self.ymax = newYmax\n self.xlen = newXlen\n self.ylen = newYlen\n # must sub 1 to get the correct actual step size, otherwise last element does not equal x or y max\n self.xstep = (self.xmax - self.xmin)/(self.xlen - 1)\n self.ystep = (self.ymax - self.ymin)/(self.ylen - 1)\n self.f = f\n self.max = maxLoop\n # call refresh() to generate the the plane and its contents\n self.refresh()", "title": "" } ]
[ { "docid": "5e86faa6a75ba40509e0990332270853", "score": "0.6098736", "text": "def make_plane():\n\n source = vtk.vtkPlaneSource()\n source.SetOrigin(-10.0, -10.0, 0.0)\n source.SetPoint2(-10.0, 10.0, 0.0)\n source.SetPoint1(10.0, -10.0, 0.0)\n source.SetXResolution(20)\n source.SetYResolution(20)\n source.Update()\n\n transform = vtk.vtkTransform()\n transform.Translate(0.0, 0.0, 0.0)\n transform.RotateX(-90.0)\n transform_filter = vtk.vtkTransformPolyDataFilter()\n transform_filter.SetInputConnection(source.GetOutputPort())\n transform_filter.SetTransform(transform)\n transform_filter.Update()\n\n # We have a m x n array of quadrilaterals arranged as a regular tiling in a\n # plane. So pass it through a triangle filter since the curvature filter only\n # operates on polys.\n tri = vtk.vtkTriangleFilter()\n tri.SetInputConnection(transform_filter.GetOutputPort())\n\n # Pass it though a CleanPolyDataFilter and merge any points which\n # are coincident, or very close\n cleaner = vtk.vtkCleanPolyData()\n cleaner.SetInputConnection(tri.GetOutputPort())\n cleaner.SetTolerance(0.005)\n cleaner.Update()\n\n return cleaner.GetOutput()", "title": "" }, { "docid": "bb3ce14f17bfc0196625fbf286186d0b", "score": "0.6033272", "text": "def __init__(self, newXmin=-5., newXmax=5., newXlen=11, newYmin=-5., newYmax=5., newYlen=11, c=(-1.037 + 0.17j), maxLoop=100):\n # set the function and re-compute the plane's values\n f = julia(c, maxLoop)\n self.c = c\n ComplexPlaneNP.__init__(self, newXmin, newXmax, newXlen, newYmin, newYmax, newYlen, f, maxLoop)", "title": "" }, { "docid": "772e3836487337435021adf49f7f5c7d", "score": "0.5945625", "text": "def plane(self):\n return Plane(self.position, self.position.Vz, self.position.Vy)", "title": "" }, { "docid": "c3c87c278b8e2d4513d1c98cb5fca3e5", "score": "0.5920536", "text": "def plane_grid(x_axis_lims, y_axis_lims, rotation_axis, rotation_angle, element_size):\n\n stub = \"\"\"\n Point(1) = {ax1_lim1, ax2_lim1, 0, cl};\n Point(2) = {ax1_lim2, ax2_lim1, 0, cl};\n Point(3) = {ax1_lim2, ax2_lim2, 0, cl};\n Point(4) = {ax1_lim1, ax2_lim2, 0, cl};\n Line(1) = {1, 2};\n Line(2) = {2, 3};\n Line(3) = {3, 4};\n Line(4) = {4, 1};\n Line Loop(1) = {1, 2, 3, 4};\n Plane Surface(2) = {1};\n Rotate {{rot_ax1, rot_ax2, rot_ax3}, {0, 0, 0}, rot_ang_rad} { Surface{2}; }\n Mesh.Algorithm = 2;\n \"\"\"\n import sys\n\n if sys.version_info.major >= 3 and sys.version_info.minor >= 6:\n return\n else:\n geometry = (\n \"ax1_lim1 = \"\n + str(x_axis_lims[0])\n + \";\\n\"\n + \"ax1_lim2 = \"\n + str(x_axis_lims[1])\n + \";\\n\"\n + \"ax2_lim1 = \"\n + str(y_axis_lims[0])\n + \";\\n\"\n + \"ax2_lim2 = \"\n + str(y_axis_lims[1])\n + \";\\n\"\n + \"rot_ax1 = \"\n + str(rotation_axis[0])\n + \";\\n\"\n + \"rot_ax2 = \"\n + str(rotation_axis[1])\n + \";\\n\"\n + \"rot_ax3 = \"\n + str(rotation_axis[2])\n + \";\\n\"\n + \"rot_ang_rad = \"\n + rotation_angle\n + \";\\n\"\n + \"cl = \"\n + str(element_size)\n + \";\\n\"\n + stub\n )\n return generate_grid_from_geo_string(geometry)", "title": "" }, { "docid": "64a6b3cb8566a976fea33a5cf10ae9da", "score": "0.58104485", "text": "def __init__(self, newXmin=-5., newXmax=5., newXlen=11, newYmin=-5., newYmax=5., newYlen=11, c=(-1.037 + 0.17j), maxLoop=100):\n # set the function and re-compute the plane's values\n f = juliaNV(c, maxLoop)\n self.c = c\n ComplexPlaneNP.__init__(self, newXmin, newXmax, newXlen, newYmin, newYmax, newYlen, f, maxLoop)", "title": "" }, { "docid": "699ef8eab5b4d8b155a71bea9c65dcc6", "score": "0.57951874", "text": "def plane(self, obj, *args, **kwargs):\n assert obj.type is Fol, \"Only Fol type instance could be plotted as plane.\"\n if \"zorder\" not in kwargs:\n kwargs[\"zorder\"] = 5\n animate = kwargs.pop(\"animate\", False)\n if isinstance(obj, Group):\n x = []\n y = []\n for azi, inc in obj.dd.T:\n xx, yy = self._cone(\n p2v(azi, inc),\n l2v(azi, inc),\n limit=89.9999,\n res=int(cosd(inc) * 179 + 2),\n )\n x = np.hstack((x, xx, np.nan))\n y = np.hstack((y, yy, np.nan))\n x = x[:-1]\n y = y[:-1]\n else:\n azi, inc = obj.dd\n x, y = self._cone(\n p2v(azi, inc),\n l2v(azi, inc),\n limit=89.9999,\n res=int(cosd(inc) * 179 + 2),\n )\n h = self.fig.axes[self.active].plot(x, y, *args, **kwargs)\n if animate:\n self.artists.append(tuple(h))\n self.draw()", "title": "" }, { "docid": "415a2aa689ea5c123cba2e29b419086f", "score": "0.57545996", "text": "def generate_surface(cls, func, *,\n u_param=(-1., 1., .1), v_param=(-1., 1., .1)):\n def make_axis(param):\n return np.linspace(param[0], param[1],\n int((param[1] - param[0]) / param[2]) + 1)\n\n u_axis = make_axis(u_param)\n v_axis = make_axis(v_param)\n u, v = np.meshgrid(u_axis, v_axis)\n surface = func(u, v)\n return cls(surface, param=np.array([u, v]))", "title": "" }, { "docid": "558fb53f9075cfaf2223918c8c78b5d6", "score": "0.56716377", "text": "def from_plane(cls, plane):\n raise NotImplementedError", "title": "" }, { "docid": "c520c94ceb10ef83d7e39bf092a193c9", "score": "0.56545216", "text": "def fit_plane(self, x_in, y_in, z_in):\n points = array([x_in, y_in, z_in])\n center = points.mean(axis=1)\n points[0,:] -= center[0]\n points[1,:] -= center[1]\n points[2,:] -= center[2]\n covariance = cov(points)\n \n eval, evec = linalg.eig(covariance)\n ax_id = argmin(eval)\n plane_normal = evec[:, ax_id]\n plane_d = dot(center.T, plane_normal)\n \n # print \"center: %s\" % center\n # print \"cov: %s\" % covariance\n # print \"eval: %s\" % eval\n # print \"evec: %s\" % evec\n # print \"axe: %s\" % ax_id\n # print \"normal: %s\" % plane_normal\n # print \"plane_d: %s\" % plane_d\n \n return array([plane_normal[0], plane_normal[1], plane_normal[2], plane_d])", "title": "" }, { "docid": "264422431472dd332162191815b9935e", "score": "0.55873793", "text": "def bl_make_plane(cls, name):\n verts=[]\n verts.append((1.0, -1.0, 0.0))\n verts.append((-1.0, -1.0, 0.0))\n verts.append((1.0, 1.0, 0.0))\n verts.append((-1.0, 1.0, 0.0))\n\n edges=[]\n edges.append((0,1))\n edges.append((2,3))\n edges.append((3,1))\n edges.append((0,2))\n\n faces=[]\n faces.append((2,3,1,0))\n \n meshplane = bpy.data.meshes.new(name)\n meshplane.from_pydata(verts, edges, faces)\n meshplane.name = name\n obj, base = SlicePlane.add_obj(meshplane, bpy.context)\n\n obj.location = Vector((0.,0.,0.))\n\n return obj # or is base the correct one?", "title": "" }, { "docid": "e2d1552c8173ff9f2e7adbe9838fa012", "score": "0.55644786", "text": "def create_grid_points(resolution, plane_axes, plane_offset, bounding_box, mode):\n\n from ..utils.generic import bold_ul_text\n\n ax1_min, ax1_max, ax2_min, ax2_max = bounding_box\n if mode.lower() == \"numpy\":\n plot_grid = _np.mgrid[\n ax1_min : ax1_max : resolution[0] * 1j,\n ax2_min : ax2_max : resolution[1] * 1j,\n ]\n points_tmp = [_np.ones(plot_grid[0].size) * plane_offset] * 3\n points_tmp[plane_axes[0]] = plot_grid[0].ravel()\n points_tmp[plane_axes[1]] = plot_grid[1].ravel()\n points = _np.vstack((points_tmp,))\n plane = None\n\n elif mode.lower() == \"gmsh\":\n if 2 not in plane_axes:\n axis1_lims = bounding_box[0:2]\n axis2_lims = bounding_box[2:]\n rotation_axis = [0, 0, 1]\n rotation_angle = \"2*Pi\"\n elif 1 not in plane_axes:\n axis1_lims = bounding_box[0:2]\n axis2_lims = bounding_box[2:]\n rotation_axis = [1, 0, 0]\n rotation_angle = \"Pi/2\"\n elif 0 not in plane_axes:\n axis1_lims = bounding_box[2:]\n axis2_lims = bounding_box[0:2]\n rotation_axis = [0, 1, 0]\n rotation_angle = \"-Pi/2\"\n else:\n raise ValueError(\"Plane axis not correctly defined.\")\n\n elem_len = _np.min(\n [\n (axis1_lims[1] - axis1_lims[0]) / resolution[0],\n (axis2_lims[1] - axis2_lims[0]) / resolution[1],\n ]\n )\n\n plane = plane_grid(\n axis1_lims, axis2_lims, rotation_axis, rotation_angle, elem_len\n )\n points = plane.leaf_view.vertices\n else:\n raise TypeError(\n \"The correct values for the argument\"\n + bold_ul_text(\"mode\")\n + \"are numpy or gmsh.\"\n )\n\n return points, plane", "title": "" }, { "docid": "6e4f70656e3cf67b8c6882bdb3970186", "score": "0.5541129", "text": "def ApplyCoordinateTransformation(self,f):\n npoints = self.ugrid.GetNumberOfPoints ()\n \n for i in range (npoints):\n (x,y,z) = self.ugrid.GetPoint (i)\n newX = f(arr([x,y,z]),t=0)\n self.ugrid.GetPoints ().SetPoint (i, newX[0], newX[1], newX[2])", "title": "" }, { "docid": "86ab59a7a139e8a8dfd2783551df91f6", "score": "0.55118614", "text": "def topo_plane_paramEval(self, param):\n # Create an empty numpy array with the same number as pixels as the real data.\n self.topo_plane_fit_data = np.zeros((self.y_res, self.x_res))\n for y in range(0, self.y_res): # Iterate over the y-axis pixels.\n for x in range(0, self.x_res): # Iterate over the x-axis pixels.\n self.topo_plane_fit_data[y, x] = param[0]*x + param[1]*y + param[2] # Generate plane value.\n return self.topo_plane_fit_data # Return entire array.", "title": "" }, { "docid": "c4c12b0bdc7f9cbf61045190be3d3c55", "score": "0.5451705", "text": "def create_plane (self, image_orientation):\n\n if self.plane_name in bpy.data.objects:\n return bpy.data.objects[self.plane_name]\n \n \"\"\"Create a mesh and add it to Blender\"\"\"\n if (len(self.img_names) == 0):\n raise ValueError(\"No images! Something is wrong\")\n \n idx = len(self.img_names) - 1\n img_name = self.img_names[idx//2]\n try:\n img = bpy.data.images[img_name]\n except KeyError:\n print(\"Couldn't find image \" + img_name + \"!\")\n return\n \n self.widthp,self.heightp = img.size\n \n \"\"\" Make a plane mesh and add to blender \"\"\"\n plane = SlicePlane.bl_make_plane(self.plane_name)\n\n bpy.ops.object.transform_apply(rotation=True,scale=True)\n if (self.orientation == Orientation('AXIAL')):\n self.widthf = self.widthp*self.spacing[0]\n self.heightf = self.heightp*self.spacing[1]\n plane.dimensions = self.widthf, self.heightf, 0.0\n\n plane.rotation_mode = 'YXZ'\n plane.rotation_euler = [0.,pi,0.]\n if image_orientation[0] == 'R':\n plane.rotation_euler[1] = 0\n if image_orientation[1] == 'P':\n plane.rotation_euler[2] = pi\n if image_orientation[2] == 'S':\n self.reverse = True\n plane.rotation_euler[1] -= pi\n elif (self.orientation == Orientation('SAGITTAL')):\n self.widthf = self.widthp*self.spacing[1]\n self.heightf = self.heightp*self.spacing[2]\n plane.dimensions = self.widthf, self.heightf, 0.0\n\n plane.rotation_mode = 'XYZ'\n plane.rotation_euler = [pi/2,0.,-pi/2]\n if image_orientation[0] == 'R':\n self.reverse = True\n if image_orientation[1] == 'P':\n plane.rotation_euler[2] = pi/2\n self.reverse = not self.reverse\n if image_orientation[2] == 'S':\n plane.rotation_euler[0] = -pi/2\n self.reverse = not self.reverse\n elif (self.orientation == Orientation('CORONAL')):\n self.widthf = self.widthp*self.spacing[0]\n self.heightf = self.heightp*self.spacing[2]\n plane.dimensions = self.widthf, self.heightf, 0.0\n\n plane.rotation_mode = 'XYZ'\n plane.rotation_euler = [pi/2,0.,0.]\n if image_orientation[0] == 'L':\n plane.rotation_euler[2] = pi\n if image_orientation[1] == 'P':\n plane.rotation_euler[2] -= pi\n self.reverse = True\n if image_orientation[2] == 'S':\n plane.rotation_euler[0] = -pi/2\n plane.rotation_euler[2] -= pi\n else:\n raise ValueError('orientation must be in Orientation')\n\n img = self.get_image_from_location (plane.location)\n tex = SlicePlane.create_image_texture (img)\n mat = self.create_material_for_texture (tex)\n\n plane.data.materials.append(mat)\n plane.data.uv_textures.new()\n plane.data.uv_textures[0].data[0].image = \\\n mat.texture_slots[0].texture.image\n\n plane.location = Vector (self.plane_centre)\n\n self.register_callback()\n\n return plane", "title": "" }, { "docid": "1faa279fe9417e3cf9f6ba715e9efd49", "score": "0.5444708", "text": "def makeplane(self, estimator=np.nanmean):\n # THIS IS A HACK!!! isinstance(a function, function) must be a thing...\n FUNCTION = type(np.max)\n\n # estimator is NOT duck-typed\n if type(estimator) is FUNCTION:\n self.plane = estimator(self.Cube.cube,axis=0)\n elif isinstance(estimator, six.string_types):\n if estimator == 'max':\n self.plane = self.Cube.cube.max(axis=0)\n elif estimator == 'int':\n dx = np.abs(self.Cube.xarr[1:] - self.Cube.xarr[:-1])\n dx = np.concatenate([dx,[dx[-1]]])\n self.plane = (self.Cube.cube * dx[:,np.newaxis,np.newaxis]).sum(axis=0)\n elif estimator[-5:] == \".fits\":\n self.plane = pyfits.getdata(estimator)\n elif type(estimator) is slice:\n self.plane = self.Cube.cube[estimator,:,:]\n elif type(estimator) is int:\n if hasattr(self.Cube,'parcube'):\n self.plane = self.Cube.parcube[estimator,:,:]\n\n if self.plane is None:\n raise ValueError(\"Invalid estimator %s\" % (str(estimator)))\n\n if np.sum(np.isfinite(self.plane)) == 0:\n raise ValueError(\"Map is all NaNs or infs. Check your estimator or your input cube.\")", "title": "" }, { "docid": "94b4da0965081fdc43d8f05362f4bb65", "score": "0.5435796", "text": "def createPlane(self):\n\n plane_dict = {}\n\n #Takes in input for plane insignia and puts it under \"planeInsignia\" key in plane_dict\n plane_dict[\"planeInsignia\"] = InputHandler().planeInsignia(\"Input plane insignia (e.g. TF-XXX): \")\n\n #Creates a list of airplane types in the list\n airplane_data_list = IOAPI().opener(self.dataFiles[\"AIRCRAFT_TYPE_FILE\"])\n airplaneType_list = []\n for a_line_dict in airplane_data_list:\n airplaneType_list.append(a_line_dict[\"planeTypeId\"])\n #Turns the list into a list of dictionaries\n plane_list = []\n for x in airplaneType_list:\n planeType_dict = {}\n planeType_dict[\"Plane Type IDs: \"] = x\n plane_list.append(planeType_dict)\n DisplayScreen().printOptions(plane_list, header = \"\")\n #Input for plane Type ID\n plane_dict[\"planeTypeId\"] = InputHandler().multipleNumChoices(plane_list, \"Choose Plane Type ID: \")\n plane_dict[\"manufacturer\"] = InputHandler().strNoCheck(\"Input plane manufacturer: \")\n plane_dict[\"capacity\"] = InputHandler().digit(\"Input plane seating capacity: \")\n #Input confirmation\n DisplayScreen().printList([plane_dict], header = \"\")\n confirmation_bool = InputHandler().yesOrNoConfirmation(\"Is this information correct? (y/n)\")\n if confirmation_bool:\n #Appending the input info to aircraft file\n IOAPI().appender(self.dataFiles[\"AIRCRAFT_FILE\"], plane_dict)", "title": "" }, { "docid": "f892133cf55ec9528c0f346cdbb6b53b", "score": "0.5417904", "text": "def project_on_to_plane(self, plane_normal):\n return Vector()", "title": "" }, { "docid": "95426ca54f018bacbccaf0009f7ad20e", "score": "0.53655446", "text": "def Plane(pos=(0, 0, 0), normal=(0, 0, 1), sx=1, sy=None, c=\"g\", alpha=1):\n if sy is None:\n sy = sx\n ps = vtk.vtkPlaneSource()\n ps.SetResolution(1, 1)\n tri = vtk.vtkTriangleFilter()\n tri.SetInputConnection(ps.GetOutputPort())\n tri.Update()\n poly = tri.GetOutput()\n axis = np.array(normal) / np.linalg.norm(normal)\n theta = np.arccos(axis[2])\n phi = np.arctan2(axis[1], axis[0])\n t = vtk.vtkTransform()\n t.PostMultiply()\n t.Scale(sx, sy, 1)\n t.RotateY(np.rad2deg(theta))\n t.RotateZ(np.rad2deg(phi))\n tf = vtk.vtkTransformPolyDataFilter()\n tf.SetInputData(poly)\n tf.SetTransform(t)\n tf.Update()\n pd = tf.GetOutput()\n actor = Actor(pd, c, alpha)\n actor.SetPosition(pos)\n settings.collectable_actors.append(actor)\n return actor", "title": "" }, { "docid": "3da68bdf46f80e4f092d7ad2eccf0a7c", "score": "0.5341773", "text": "def project_point_on_to_plane(self, plane_base, plane_normal):\n return Vector()", "title": "" }, { "docid": "194eb131c45d2cdc5a791a263f2863e0", "score": "0.5318441", "text": "def create_planes(func: vtk.vtkSampleFunction,\n number_of_planes: int) -> vtk.vtkActor:\n actor = vtk.vtkActor()\n append = vtk.vtkAppendFilter()\n\n dimensions = func.GetSampleDimensions()\n slice_increment = (dimensions[2] - 1) // (number_of_planes + 1)\n slice_num = -4\n for i in range(0, number_of_planes):\n extract = vtk.vtkExtractVOI()\n extract.SetInputConnection(func.GetOutputPort())\n extract.SetVOI(0, dimensions[0] - 1,\n 0, dimensions[1] - 1,\n slice_num + slice_increment,\n slice_num + slice_increment)\n append.AddInputConnection(extract.GetOutputPort())\n slice_num += slice_increment\n append.Update()\n\n planes_mapper = vtk.vtkDataSetMapper()\n planes_mapper.SetInputConnection(append.GetOutputPort())\n planes_mapper.SetScalarRange(0, 7)\n\n actor.SetMapper(planes_mapper)\n actor.GetProperty().SetAmbient(1.)\n\n return actor", "title": "" }, { "docid": "4fe012752b7d81033e284fae3d7ac1ce", "score": "0.5316544", "text": "def set_plane(output, plane, scalar):\n\n\n parameters = {\n \"dst\":output,\n \"plane\":int(plane),\n \"value\":float(scalar)\n }\n\n execute(__file__, 'set_plane_' + str(len(output.shape)) + 'd_x.cl', 'set_plane_' + str(len(output.shape)) + 'd', output.shape, parameters);", "title": "" }, { "docid": "db9f51a0ae4313e8d55d0c8991d6aa36", "score": "0.53159255", "text": "def mirror_by_plane(self, plane):\n return Vector()", "title": "" }, { "docid": "1aeae27aa589545d752eac2a6e77040c", "score": "0.53090656", "text": "def plot_data_plane_init(self):\n plot_overrides = {'PLOT_DATA_PLANE_INPUT_TEMPLATE': 'template',\n 'PLOT_DATA_PLANE_OUTPUT_TEMPLATE': 'template',\n 'PLOT_DATA_PLANE_FIELD_NAME': 'field_name',\n 'PLOT_DATA_PLANE_CONVERT_TO_IMAGE': True,\n }\n\n if not self.c_dict['BACKGROUND_MAP']:\n plot_overrides['PLOT_DATA_PLANE_FIELD_EXTRA'] = (\n \"map_data={ source=[];}\"\n )\n\n pdp_wrapper = PlotDataPlaneWrapper(self.config,\n config_overrides=plot_overrides)\n return pdp_wrapper", "title": "" }, { "docid": "e227bb27cae7ad1a97a2c5921891e3ac", "score": "0.52886415", "text": "def project_point_on_to_plane(cls, point, plane_base, plane_normal):\n return Vector()", "title": "" }, { "docid": "b021b19d60ab56a42cd74d411f6f0538", "score": "0.52709067", "text": "def vector_mirror_by_plane(cls, a, plane):\n return Vector()", "title": "" }, { "docid": "dacb90c01ba6c87447728f4b0183600f", "score": "0.5269757", "text": "def zplane(b,a,filename=None):\n plt.figure(3)\n # get a figure/plot\n ax = plt.subplot(111)\n\n # create the unit circle\n uc = patches.Circle((0,0), radius=1, fill=False,\n color='black', ls='dashed')\n ax.add_patch(uc)\n\n # The coefficients are less than 1, normalize the coeficients\n if np.max(b) > 1:\n kn = np.max(b)\n b = b/float(kn)\n else:\n kn = 1\n\n if np.max(a) > 1:\n kd = np.max(a)\n a = a/float(kd)\n else:\n kd = 1\n \n # Get the poles and zeros\n p = np.roots(a)\n z = np.roots(b)\n k = kn/float(kd)\n \n # Plot the zeros and set marker properties \n t1 = plt.plot(z.real, z.imag, 'go', ms=10)\n plt.setp( t1, markersize=10.0, markeredgewidth=1.0,\n markeredgecolor='k', markerfacecolor='g')\n\n # Plot the poles and set marker properties\n t2 = plt.plot(p.real, p.imag, 'rx', ms=10)\n plt.setp( t2, markersize=12.0, markeredgewidth=3.0,\n markeredgecolor='r', markerfacecolor='r')\n\n ax.spines['left'].set_position('center')\n ax.spines['bottom'].set_position('center')\n ax.spines['right'].set_visible(False)\n ax.spines['top'].set_visible(False)\n\n # set the ticks\n r = 1.5; plt.axis('scaled'); plt.axis([-r, r, -r, r])\n ticks = [-1, -.5, .5, 1]; plt.xticks(ticks); plt.yticks(ticks)\n\n if filename is None:\n plt.show()\n else:\n plt.savefig(filename)\n \n\n return z, p, k", "title": "" }, { "docid": "4d82d57f950859ef55fd56cd369f5318", "score": "0.52664226", "text": "def show_plane(orig, n, scale=1.0, **kwargs):\n b1 = orthogonal_vector(n)\n b1 /= la.norm(b1)\n b2 = np.cross(b1, n)\n b2 /= la.norm(b2)\n verts = [orig + scale*(-b1 - b2),\n orig + scale*(b1 - b2),\n orig + scale*(b1 + b2),\n orig + scale*(-b1 + b2)]\n faces = [(0, 1, 2), (0, 2, 3)]\n trimesh3d(np.array(verts), faces, **kwargs)", "title": "" }, { "docid": "b1acd6daa53de952df986f618fd125d0", "score": "0.5262819", "text": "def project_vector_on_to_plane(cls, v, plane_normal):\n return Vector()", "title": "" }, { "docid": "1591250981236eb6146620c6def62f6c", "score": "0.5252082", "text": "def plane_axes(plane):\n assert isinstance(plane, gen_cuts.Plane3D)\n p_normal = normal(plane)\n # Make sure Z is not fixed\n assert not (p_normal[0] == 0 and p_normal[1] == 0)\n\n # Get a sideways vector on the plane\n up = np.array([0, 0, 1])\n side_on_plane = normalize(np.cross(up, p_normal))\n\n # And an up vector on the plane\n up_on_plane = normalize(np.cross(side_on_plane, p_normal))\n if up_on_plane[2] < 0:\n up_on_plane = -up_on_plane\n\n # Take Ax + By + Cz + D = 0\n # Substitute Z = 0 and obtain Ax + By = -D\n # We get one of two solutions:\n # x = - (D + By) / A\n # y = - (D + Ax) / B\n # We want the more numerically stable solution, so we'll pick the first\n # if abs(A) > abs(B), otherwise we'll take the second. In these cases we\n # we'll set the other coordinate (x or y) to zero.\n if abs(plane.A) > abs(plane.B):\n x = -plane.D / plane.A\n y = 0\n z = 0\n else:\n x = 0\n y = -plane.D / plane.B\n z = 0\n\n return PlaneAxes(side=side_on_plane, up=up_on_plane,\n origin=np.array([x, y, z]))", "title": "" }, { "docid": "82b986081d4f7fc5397a6696c1741442", "score": "0.52338004", "text": "def plane(normal=UP, point=ORIGIN):\n normal = _normalize(normal)\n def f(p):\n return np.dot(point - p, normal)\n return f", "title": "" }, { "docid": "b259760952d92a860b48ee0f37057213", "score": "0.52290386", "text": "def refresh(self):\n planeArray = np.zeros([self.xlen,self.ylen])\n for xpos in range(self.xlen):\n for ypos in range(self.ylen):\n # compute the value at each of the coordinate points in the plane\n planeArray[(self.ylen-ypos-1),xpos] = self.f( (xpos*self.xstep+self.xmin) + (ypos*self.ystep+self.ymin)*1j )\n ylabels = [str(self.ymax-ypos*self.ystep) for ypos in range(self.ylen)]\n xlabels = [str(xpos*self.xstep+self.xmin) for xpos in range(self.xlen)]\n self.plane = pd.DataFrame(planeArray, index=ylabels, columns=xlabels)", "title": "" }, { "docid": "904b027019e0718dcdeef8381d9f81c4", "score": "0.5209764", "text": "def _fit_plane(vertices):\n\n # The center is always the mean of the four corners.\n c = 0.25 * (vertices[0] + vertices[1] + vertices[2] + vertices[3])\n\n # Compute covariance matrix.\n C = matrix.rec(\n elems=tuple(vertices[0] - c) \\\n + tuple(vertices[1] - c) \\\n + tuple(vertices[2] - c) \\\n + tuple(vertices[3] - c),\n n=(4, 3))\n cov = C.transpose_multiply(C)\n\n # Obtain eigenvalues of covariance matrix. XXX Ouch, this is ugly.\n from tntbx import svd\n cov_flex = flex.double(flex.grid(3, 3))\n cov_flex += flex.double((cov(0, 0), cov(0, 1), cov(0, 2),\n cov(1, 0), cov(1, 1), cov(1, 2),\n cov(2, 0), cov(2, 1), cov(2, 2)))\n svd = svd(cov_flex)\n\n # Get least-squares residual (smallest eigenvalue) and plane normal\n # (eigenvector corresponding to smallest eigenvalue) from SVD.\n rss = svd.s()[2, 2]\n normal = matrix.col((svd.v()[0, 2], svd.v()[1, 2], svd.v()[2, 2]))\n\n \"\"\"\n rss2 = normal.dot(vertices[0] - c)**2 \\\n + normal.dot(vertices[1] - c)**2 \\\n + normal.dot(vertices[2] - c)**2 \\\n + normal.dot(vertices[3] - c)**2\n assert math.fabs(rss - rss2) < 1e-5\n \"\"\"\n\n # Project each vertex into the plane. Normal must have unit length.\n vertices_lsq = [(vertices[0] - c) - normal.dot(vertices[0] - c) * normal + c,\n (vertices[1] - c) - normal.dot(vertices[1] - c) * normal + c,\n (vertices[2] - c) - normal.dot(vertices[2] - c) * normal + c,\n (vertices[3] - c) - normal.dot(vertices[3] - c) * normal + c]\n\n return (vertices_lsq, rss)", "title": "" }, { "docid": "b372576a686ed0a886ecce27b4153d93", "score": "0.520438", "text": "def parabola():\r\n # method 1: using the algebraic function\r\n fun = lambda x, y: x*x+y*y\r\n x1 = np.arange(-2, 2.5, 0.02)\r\n y1 = np.arange(-2, 2.5, 0.2)\r\n p_fun = new.mesh(xyfun=fun, x=x1, y=y1, name='parabola_fun', coll_name='surface')\r\n p_fun.loc += mathutils.Vector((4.0, -4.0, 0.0))\r\n\r\n def fun2mat(xyfun, tx=np.array([]), ty=np.array([])):\r\n \"\"\"\r\n This functionality is already in the bpn module.\r\n It is here only to illustrate the versatility of new.mesh creation\r\n \"\"\"\r\n assert isinstance(xyfun, types.FunctionType)\r\n assert xyfun.__code__.co_argcount == 2 # function has two input arguments\r\n if tx is fun2mat.__defaults__[0]: # default ranges\r\n tx = np.arange(-2, 2, 0.1)\r\n if ty is fun2mat.__defaults__[1]:\r\n ty = np.arange(-2, 2, 0.1)\r\n return np.array([[xyfun(xv, yv) for yv in ty] for xv in tx])\r\n\r\n # method 2: MATLAB-style surf, using a 2d matrix Z\r\n z1 = fun2mat(fun, x1, y1)\r\n p_xyz = new.mesh(x=x1, y=y1, z=z1, name='parabola_xyz', coll_name='surface')\r\n p_xyz.loc += mathutils.Vector((-4.0, -4.0, 0.0))\r\n\r\n def mat2mesh(tz, tx=np.array([]), ty=np.array([])):\r\n \"\"\"\r\n z is a 2-D numpy array or a 2D list\r\n returns:\r\n v list of vertices\r\n f list of faces\r\n This is here only for demonstration. It is already in bpn module.\r\n \"\"\"\r\n if tx is mat2mesh.__defaults__[0]:\r\n tx = np.arange(0, np.shape(tz)[0])\r\n if ty is mat2mesh.__defaults__[1]:\r\n ty = np.arange(0, np.shape(tz)[1])\r\n\r\n nX = len(tx)\r\n nY = len(ty)\r\n\r\n assert len(tx) == np.shape(tz)[0]\r\n assert len(ty) == np.shape(tz)[1]\r\n \r\n v = [(xv, yv, tz[ix][iy]) for iy, yv in enumerate(ty) for ix, xv in enumerate(tx)]\r\n f = [(iy*nX+ix, iy*nX+ix+1, (iy+1)*nX+(ix+1), (iy+1)*nX+ix) for iy in np.arange(0, nY-1) for ix in np.arange(0, nX-1)]\r\n return v, f\r\n\r\n # method 3: by specifying the vertices and faces - this is here mainly for testing\r\n v1, f1 = mat2mesh(z1, tx=x1, ty=y1)\r\n p_vf = new.mesh(v=v1, f=f1, name='parabola_vf', coll_name='surface')\r\n p_vf.loc += mathutils.Vector((0, 4.0, 0.0))", "title": "" }, { "docid": "1eec3c5d621e0be8c5ff4c38792c86a1", "score": "0.5197117", "text": "def plane(xi, points, values):\n\n alpha = np.linalg.solve(points[1:] - points[0], values[1:] - values[0])\n return (np.stack(xi, axis=-1) - points[0]).dot(alpha) + values[0]", "title": "" }, { "docid": "b6b027bf8d6944170c955a82c2eb6e66", "score": "0.51860917", "text": "def from_coords(origin, target):\n\t\txo,yo,zo = origin\n\t\txt,yt,zt = target\n\n\t\tn = Vector(xt-xo, yt-yo, zt-zo).unit()\n\t\treturn Plane.from_normal(n, n.i*xo + n.j*yo + n.k*zo)", "title": "" }, { "docid": "21fd03067d0920c474b2951861b37250", "score": "0.5146844", "text": "def plot_line_in_complex_plane(t: np.ndarray,\n xf: Callable, yf: Callable,\n function: Callable, title: str,\n dlim: List[int] = [],\n show_identity=False) -> None:\n x = xf(t)\n y = yf(t)\n z = function(x + 1.0j*y)\n if show_identity is True:\n a, = plt.plot(x, y, color=\"black\", linewidth=0.75)\n b, = plt.plot(np.real(z), np.imag(z), color=\"aqua\", linewidth=0.75)\n plt.legend((a, b), ['z', 'f(z)'])\n b = plt.plot(np.real(z), np.imag(z), linewidth=0.75)\n # These set the aspect ratio to the correct scale,\n # as well as adding a title and labels.\n plt.gca().set_aspect('equal', adjustable='box')\n plt.title(title)\n plt.xlabel(\"x\")\n plt.ylabel(\"iy\")\n plt.grid(True)\n\n # The following lines of code remove the axis label.\n # Uncomment these and comment the labels above to remove labels.\n # plt.xticks([])\n # plt.yticks([])\n\n if (len(dlim) == 4):\n plt.xlim(dlim[0], dlim[1])\n plt.ylim(dlim[2], dlim[3])\n plt.savefig(title + '.png', dpi=400)\n plt.show()\n plt.close()", "title": "" }, { "docid": "9c8ae24a7ecf92b8334ee2203f48089d", "score": "0.5146186", "text": "def planeMACl(self):\n return Plane(Point(-self.cMACyPos, 0, 0), Vector(1, 0, 0),\n hidden=True)", "title": "" }, { "docid": "9c8ae24a7ecf92b8334ee2203f48089d", "score": "0.5146186", "text": "def planeMACl(self):\n return Plane(Point(-self.cMACyPos, 0, 0), Vector(1, 0, 0),\n hidden=True)", "title": "" }, { "docid": "64366ac9cee7594d0fc003cda52faebb", "score": "0.5127683", "text": "def get_plane_ben(M,v, axis_str, x_range, y_range, z_range):\n x0, y0, _= M\n a, b, _= v\n\n if axis_str == \"x\":\n Y = y_range\n X = x0*np.ones(2)\n Z = np.array(list(z_range)+list(z_range)).reshape((2,2))\n elif axis_str == \"y\":\n X = x_range\n Y = y0*np.ones(2)\n Z = np.array([z_range[0]]*2+[z_range[1]]*2).reshape((2,2))\n elif axis_str == \"xy\":\n X = x_range\n Y = y0+b*(X-x0)/a\n Z = np.array([z_range[0]]*2+[z_range[1]]*2).reshape((2,2))\n else:\n pass\n return X, Y, Z", "title": "" }, { "docid": "22f8c02f4597c06caa3283b0e53813c2", "score": "0.50950354", "text": "def planeMACr(self):\n return Plane(Point(self.cMACyPos, 0, 0), Vector(1, 0, 0),\n hidden=True)", "title": "" }, { "docid": "22f8c02f4597c06caa3283b0e53813c2", "score": "0.50950354", "text": "def planeMACr(self):\n return Plane(Point(self.cMACyPos, 0, 0), Vector(1, 0, 0),\n hidden=True)", "title": "" }, { "docid": "85cbf21e87584a9ba11457e6847bebfa", "score": "0.50760114", "text": "def Grid(\n pos=(0, 0, 0),\n normal=(0, 0, 1),\n sx=1,\n sy=1,\n c=\"g\",\n alpha=1,\n lw=1,\n resx=10,\n resy=10,\n):\n ps = vtk.vtkPlaneSource()\n ps.SetResolution(resx, resy)\n ps.Update()\n poly0 = ps.GetOutput()\n t0 = vtk.vtkTransform()\n t0.Scale(sx, sy, 1)\n tf0 = vtk.vtkTransformPolyDataFilter()\n tf0.SetInputData(poly0)\n tf0.SetTransform(t0)\n tf0.Update()\n poly = tf0.GetOutput()\n axis = np.array(normal) / np.linalg.norm(normal)\n theta = np.arccos(axis[2])\n phi = np.arctan2(axis[1], axis[0])\n t = vtk.vtkTransform()\n t.PostMultiply()\n t.RotateY(np.rad2deg(theta))\n t.RotateZ(np.rad2deg(phi))\n tf = vtk.vtkTransformPolyDataFilter()\n tf.SetInputData(poly)\n tf.SetTransform(t)\n tf.Update()\n pd = tf.GetOutput()\n actor = Actor(pd, c, alpha)\n actor.GetProperty().SetRepresentationToWireframe()\n actor.GetProperty().SetLineWidth(lw)\n actor.SetPosition(pos)\n settings.collectable_actors.append(actor)\n return actor", "title": "" }, { "docid": "315dbe3a3fdcc1ebbe2805b28d82e159", "score": "0.50717515", "text": "def refresh(self):\n rx = np.linspace( self.xmin, self.xmax, self.xlen )\n ry = np.linspace( self.ymin, self.ymax, self.ylen )\n x, y = np.meshgrid( rx, ry )\n planeArray = x + y*1j\n self.f( planeArray )\n ylabels = [str(self.ymax-ypos*self.ystep) for ypos in range(self.ylen)]\n xlabels = [str(xpos*self.xstep+self.xmin) for xpos in range(self.xlen)]\n self.plane = pd.DataFrame(planeArray, index=ylabels, columns=xlabels)", "title": "" }, { "docid": "6358aa9e98a71fc7ef6f2e1e45e99d10", "score": "0.50683236", "text": "def apply(self, f):\n ### Use pandas' applymap method to modify all plane points using f\n self.plane = self.plane.applymap(f)\n ### Store the applied function for future reference\n self.fs.append(f)", "title": "" }, { "docid": "ae5d45f0bafcd51d7b7cfa6c2983f704", "score": "0.5022259", "text": "def Paraboloid(pos=(0, 0, 0), r=1, height=1, axis=(0, 0, 1), c=\"cyan\", alpha=1, res=50):\n quadric = vtk.vtkQuadric()\n quadric.SetCoefficients(1, 1, 0, 0, 0, 0, 0, 0, height / 4, 0)\n # F(x,y,z) = a0*x^2 + a1*y^2 + a2*z^2\n # + a3*x*y + a4*y*z + a5*x*z\n # + a6*x + a7*y + a8*z +a9\n sample = vtk.vtkSampleFunction()\n sample.SetSampleDimensions(res, res, res)\n sample.SetImplicitFunction(quadric)\n\n contours = vtk.vtkContourFilter()\n contours.SetInputConnection(sample.GetOutputPort())\n contours.GenerateValues(1, 0.01, 0.01)\n contours.Update()\n\n axis = np.array(axis) / np.linalg.norm(axis)\n theta = np.arccos(axis[2])\n phi = np.arctan2(axis[1], axis[0])\n t = vtk.vtkTransform()\n t.PostMultiply()\n t.RotateY(np.rad2deg(theta))\n t.RotateZ(np.rad2deg(phi))\n t.Scale(r, r, r)\n tf = vtk.vtkTransformPolyDataFilter()\n tf.SetInputData(contours.GetOutput())\n tf.SetTransform(t)\n tf.Update()\n pd = tf.GetOutput()\n\n actor = Actor(pd, c, alpha).flipNormals()\n actor.GetProperty().SetInterpolationToPhong()\n actor.mapper.ScalarVisibilityOff()\n actor.SetPosition(pos)\n settings.collectable_actors.append(actor)\n return actor", "title": "" }, { "docid": "c405a9777b03aa4d9bd0b8902176322d", "score": "0.49944082", "text": "def from_plane(plane):\n\t\treturn plane.normal.copy()", "title": "" }, { "docid": "38f78bf4082587ba0e0ad9644b53e1c4", "score": "0.49903047", "text": "def project_points_to_plane(self, x_in, y_in, z_in, plane_coeffs):\n # define the origin (on the plane) as the point pointed by the coeff vector\n # d * (a, b, c)\n origin = (plane_coeffs[3]) * plane_coeffs[0:3]\n # print \"origin: %s\" % origin\n \n # for each point project and obtain its x, y coords\n v_x = x_in - origin[0]\n v_y = y_in - origin[1]\n v_z = z_in - origin[2]\n \n dist = v_x * plane_coeffs[0] + v_y * plane_coeffs[1] + v_z * plane_coeffs[2]\n # print \"dist: %s\" % dist\n \n proj_x = x_in - dist * plane_coeffs[0]\n proj_y = y_in - dist * plane_coeffs[1]\n proj_z = z_in - dist * plane_coeffs[2]\n \n return proj_x, proj_y, proj_z, origin", "title": "" }, { "docid": "5696a13c59169942d2d3b4acadb947b5", "score": "0.49795002", "text": "def __init__ (self, orientation, origin, plane_centre, image_names, spacing, image_orientation):\n\n self.x_vtx_offset = -1\n self.use_transparency = False\n self.orientation = orientation\n self.reverse = False # Reverse image indices\n self.origin = origin\n self.plane_centre = plane_centre\n\n self.img_names = image_names\n self.spacing = spacing\n \n self.loop_name = \"loop\" + str(self.orientation)[:3]\n self.plane_name = \"plane\"+str(self.orientation)[:3]\n self.is_updated = False\n \n plane = self.create_plane(image_orientation)\n self.update_image(plane)", "title": "" }, { "docid": "93277d8c67a58b60e57c715ffe95ee20", "score": "0.49560016", "text": "def plane2PointNormal( plane ):\n (a,b,c,d) = plane\n return asarray((-d*a,-d*b,-d*c),'f'), asarray((a,b,c),'f')", "title": "" }, { "docid": "19ff8aa031d772653af9a07dff1beac6", "score": "0.49497247", "text": "def add_plane(self,\n position,\n color,\n plane=None,\n normal=(0.0, 0.0, 1.0),\n type='circle',\n width=4,\n height=4,\n opacity=1.0):\n if type == 'square':\n geometry = PlaneGeometry(width=width,\n height=height,\n widthSegments=10,\n heightSegments=10)\n else:\n geometry = CircleGeometry(radius=width / 2, segments=48)\n\n material = MeshStandardMaterial(color=color,\n roughness=0.3,\n metalness=0.0,\n side='DoubleSide',\n transparent=True,\n opacity=opacity)\n\n mesh = Mesh(geometry=geometry, material=material, position=position)\n\n if plane == 'xy' or plane == 'yx':\n normal = (0.0, 0.0, 1.0)\n elif plane == 'xz' or plane == 'zx':\n normal = (0.0, 1.0, 0.0)\n elif plane == 'yz' or plane == 'zy':\n normal = (1.0, 0.0, 0.0)\n\n # If the plane is not rotated skip the rotation step\n if normal[2] != 1.0 or normal[2] != -1.0:\n R = self.__plane_rotation_matrix(normal)\n mesh.setRotationFromMatrix(R)\n self.scene.add(mesh)", "title": "" }, { "docid": "cc473b5b5888979b794537a46f48a01c", "score": "0.49165383", "text": "def zplane(b, a, axis_lim=1, axes= None, circlecolor='black', figsize=(8,8), filename=None):\n\n # get a figure/plot \n if axes != None:\n ax = axes\n else:\n ax = plt.subplot(111)\n \n plt.rcParams[\"figure.figsize\"] = figsize\n \n # create the unit circle\n uc = patches.Circle((0,0), radius=1, fill=False,\n color=circlecolor, ls='dashed')\n ax.add_patch(uc)\n\n # The coefficients are less than 1, normalize the coeficients\n if np.max(b) > 1:\n kn = np.max(b)\n b = b/float(kn)\n else:\n kn = 1\n\n if np.max(a) > 1:\n kd = np.max(a)\n a = a/float(kd)\n else:\n kd = 1\n \n # Get the poles and zeros\n p = np.roots(a)\n z = np.roots(b)\n k = kn/float(kd)\n \n # Plot the zeros and set marker properties \n t1 = plt.plot(z.real, z.imag, 'go', ms=10)\n plt.setp( t1, markersize=10.0, markeredgewidth=1.0,\n markeredgecolor='k', markerfacecolor='g')\n\n # Plot the poles and set marker properties\n t2 = plt.plot(p.real, p.imag, 'rx', ms=10)\n plt.setp( t2, markersize=12.0, markeredgewidth=3.0,\n markeredgecolor='r', markerfacecolor='r')\n\n ax.spines['left'].set_position('center')\n ax.spines['bottom'].set_position('center')\n ax.spines['right'].set_visible(False)\n ax.spines['top'].set_visible(False)\n\n # set the ticks\n r = axis_lim; r_axis = r + 0.5; plt.axis('scaled'); plt.axis([-r_axis, r_axis, -r_axis, r_axis])\n ticks = np.arange(-r, r + 0.5, 0.5); plt.xticks(ticks); plt.yticks(ticks)\n \n if filename is None:\n plt.show()\n else:\n plt.savefig(filename)\n \n\n return z, p, k", "title": "" }, { "docid": "5cac940537cc3e1b76bfbe66e3e6913a", "score": "0.48944932", "text": "def __init__(\n self,\n backend=None,\n num_points=150,\n *,\n plane=None,\n length_scale_units=u.km,\n ):\n # Initialize the backend, number of points and length scale\n self._backend = backend or orbit_plotter_backends.Matplotlib2D(\n ax=None, use_dark_theme=False\n )\n self._num_points = num_points\n self._length_scale_units = length_scale_units\n\n # Initialize the attractor\n self._attractor = None\n self._attractor_radius = np.inf * length_scale_units\n\n # Initialize the plane and frame used by the plotter\n self._plane = plane or Planes.EARTH_EQUATOR\n self._frame = None\n\n # Initialize the list containing all the plotted trajectories\n self._trajectories = [] # type: List[Trajectory]", "title": "" }, { "docid": "ade8680ddfa565aa7faa3992a0f93a24", "score": "0.4871801", "text": "def dodecahedron_planes(draw, r, tet_orig, scale = 300, shift = np.array([1000,1000,0])):\n phi = (1+np.sqrt(5)) / 2\n cind = -1\n for pm1 in [-1,1]:\n coeff1 = np.array([1, pm1 * phi, 0])\n for i1 in range(3):\n for pm2 in [-1,1]:\n cind += 1\n coeff = np.array([coeff1[ (i1+jj)%3] for jj in range(3)])\n penta = np.array([i for i in tet_orig if (np.dot(i, coeff ) + pm2*phi*phi == 0)])\n penta = np.dot(penta, r)\n hull = ConvexHull([i[:2] for i in penta]).vertices\n sqr1 = penta * scale + shift[:3]\n poly = [(sqr1[i][0],sqr1[i][1]) for i in hull]\n smat = sum(penta)\n face_angle = np.dot(smat/np.sqrt(sum(smat**2)), np.array([0,0.01,0.99]))\n forward_face = np.dot(sum(penta), np.array([0,0,1])) > -1e-3\n angles.append(face_angle)\n rgba = colorFromAngle2(face_angle,h=134,s=144,maxx=0.95)\n if forward_face: #Meaning the plane is facing forward.\n draw.polygon(poly, rgba)\n ## Uncomment if you want to draw the edges.\n #for i in range(len(poly)):\n # vv1 = poly[i]\n # vv2 = poly[(i+1)%5]\n # if forward_face:\n # draw.line((vv1[0],vv1[1],vv2[0],vv2[1]), fill = (0,255,0,255), width = 3)\n # else:\n # draw.line((vv1[0],vv1[1],vv2[0],vv2[1]), fill = (0,255,0,255), width = 3)", "title": "" }, { "docid": "55f26ede5cead737428c3998c6d07b5d", "score": "0.48582056", "text": "def fromJSON( self, filename ):\n # open the file for reading\n from pprint import pprint\n\n # open and read in the json file, with help from: http://stackoverflow.com/questions/2835559/parsing-values-from-a-json-file-in-python\n with open( filename ) as jsonfile: \n data = json.load(jsonfile)\n\n # parse the parameter values\n self.xmin = data[\"JuliaPlaneParameters\"][\"xmin\"]\n self.xmax = data[\"JuliaPlaneParameters\"][\"xmax\"]\n self.xlen = int( data[\"JuliaPlaneParameters\"][\"xlen\"] )\n self.ymin = data[\"JuliaPlaneParameters\"][\"ymin\"]\n self.ymax = data[\"JuliaPlaneParameters\"][\"ymax\"]\n self.ylen = int( data[\"JuliaPlaneParameters\"][\"ylen\"] )\n self.c = data[\"JuliaPlaneParameters\"][\"creal\"] + data[\"JuliaPlaneParameters\"][\"cimaginary\"]*1j\n self.xstep = (self.xmax - self.xmin)/(self.xlen - 1)\n self.ystep = (self.ymax - self.ymin)/(self.ylen - 1)\n self.set_f( self.c ) # set the tranformation function. Note that this automatically calls refresh()\n\n # uncomment this line to see the re-constituted plane\n# print( self.plane )\n\n # we're done, clean up the file\n jsonfile.close()", "title": "" }, { "docid": "5c56afa87723d55aa6774c9b143ebc05", "score": "0.4843919", "text": "def make_plane_from_point_and_normal(cls, point, normal):\n return Plane()", "title": "" }, { "docid": "5e176beb547117f1b9f40ad70bf9ec57", "score": "0.48368517", "text": "def sample_from_planes(self,\n plane_features: torch.Tensor,\n coordinates: torch.Tensor,\n interp_mode: str = 'bilinear',\n box_warp: float = None) -> torch.Tensor:\n N, n_planes, C, H, W = plane_features.shape\n _, M, _ = coordinates.shape\n plane_features = plane_features.view(N * n_planes, C, H, W)\n\n coordinates = (2 / box_warp) * coordinates\n # NOTE: do not support change projection_mode for specific renderer,\n # use self.projection_mode\n projected_coordinates = self.project_onto_planes(coordinates)\n projected_coordinates = projected_coordinates[:, None, ...]\n\n output_features = torch.nn.functional.grid_sample(\n plane_features,\n projected_coordinates.float(),\n mode=interp_mode,\n padding_mode='zeros',\n align_corners=False)\n output_features = output_features.permute(0, 3, 2, 1).reshape(\n N, n_planes, M, C)\n return output_features", "title": "" }, { "docid": "ddac1ed8573f0f722aa41a9748cfa2f1", "score": "0.48304066", "text": "def PolygonPlane(face, points):\n\n if len(face) < 3:\n return (0.0, 0.0, 1.0) # arbitrary, we really have no idea\n else:\n coords = [points.pos[i] for i in face]\n return Normal(coords)", "title": "" }, { "docid": "0a09aed63ebb851d0fb5d53c93552f31", "score": "0.48082134", "text": "def project_onto_planes(self, coordinates: torch.Tensor) -> torch.Tensor:\n N, _, _ = coordinates.shape\n xy_coord = coordinates[:, :, (0, 1)] # (bz, N_points, 3)\n xz_coord = coordinates[:, :, (0, 2)] # (bz, N_points, 3)\n if self.projection_mode.upper() == 'OFFICIAL':\n yz_coord = coordinates[:, :, (2, 0)] # actually zx_coord\n else:\n yz_coord = coordinates[:, :, (2, 1)]\n coord_proejcted = torch.cat([xy_coord, xz_coord, yz_coord], dim=0)\n # create a index list to release the following remapping:\n # [xy, xy, ..., xz, xz, ..., yz, yz, ...] -> [xy, xz, yz, ...]\n index = []\n for n in range(N):\n index += [n, N + n, N * 2 + n]\n return coord_proejcted[index, ...]", "title": "" }, { "docid": "7f8e545a6ee68923e48f380cf526e2dd", "score": "0.47924134", "text": "def plot_f_xy(self, f, mesh_domain, **kwargs):\n super().plot_surface(\n *mesh_domain, map_f_xy(f, mesh_domain),\n **kwargs)", "title": "" }, { "docid": "b1ce0ff4d46fa9cd4ce0d6de4912726e", "score": "0.47908765", "text": "def move(self, point):\n\t\tx,y,z = point\n\t\ta,b,c = self.normal.tuple()\n\t\td = a*x + b*y + c*z\n\t\treturn Plane(a, b, c, d)", "title": "" }, { "docid": "d4a3a8f9c76f883295b5a014de094791", "score": "0.47838867", "text": "def transformation_between_planes(newplane, oldplane):\n newaxis = np.array(newplane[:3])\n Rnew = rotation_matrix_from_axes(newaxis, Z_AXIS)[:3,:3]\n newpoint = np.dot(Rnew, np.array([0,0,newplane[3]]))\n oldaxis = np.array(oldplane[:3])\n Rold = rotation_matrix_from_axes(oldaxis, Z_AXIS)[:3,:3]\n oldpoint = np.dot(Rold, np.array([0,0,oldplane[3]]))\n T = rotation_matrix_from_axes(newaxis, oldaxis)\n T[:3,3] = -newpoint + np.dot(T[:3,:3], oldpoint)\n return T", "title": "" }, { "docid": "ab78af1367979184dbef31c4625dd135", "score": "0.4781972", "text": "def SelectPlane(self, p_float=..., p_float=..., p_float=..., *args, **kwargs):\n ...", "title": "" }, { "docid": "4687a2711d5c93b044f22aae839632cb", "score": "0.47760838", "text": "def translatepointnewcoordinatesystem(point, plane ): \n vector = rg.Point3d.Subtract(point, plane.Origin) \n \n\n xcomp = componentofbindira(plane.XAxis, vector)\n ycomp = componentofbindira(plane.YAxis, vector) \n zcomp = componentofbindira(plane.ZAxis, vector) \n \n newPoint = rg.Point3d(xcomp, ycomp, zcomp) \n \n return newPoint", "title": "" }, { "docid": "060b75768ad33268bdb24e635aa3c05a", "score": "0.47714168", "text": "def intersect_with_plane(self, origin=(0, 0, 0), normal=(1, 0, 0)):\n plane = vtk.vtkPlane()\n plane.SetOrigin(origin)\n plane.SetNormal(normal)\n\n cutter = vtk.vtkPolyDataPlaneCutter()\n cutter.SetInputData(self.polydata())\n cutter.SetPlane(plane)\n cutter.InterpolateAttributesOn()\n cutter.ComputeNormalsOff()\n cutter.Update()\n\n msh = Mesh(cutter.GetOutput(), \"k\", 1).lighting(\"off\")\n msh.GetProperty().SetLineWidth(3)\n msh.name = \"PlaneIntersection\"\n\n msh.pipeline = OperationNode(\n \"intersect_with_plan\", parents=[self],\n comment=f\"#pts {msh.inputdata().GetNumberOfPoints()}\"\n )\n return msh", "title": "" }, { "docid": "f9dccfcd2291fa67d33bff88ca7280fe", "score": "0.4762412", "text": "def __implementation_distance_to_plane(self,\n plane=None,\n pt_1=None, pt_2=None, pt_3=None):\n if not plane is None:\n plane_a = plane.a\n plane_b = plane.b\n plane_c = plane.c\n normal = Vector(x=plane_a, y=plane_b, z=plane_c)\n # TODO - remove hardcode\n if abs(self.parallel_vector.product_with(normal)) > 0.001:\n return 0\n return self.origin.distance_to_plane(plane)\n elif not None in (pt_1, pt_2, pt_3):\n x = ((pt_2.y - pt_1.y) * (pt_3.z - pt_1.z) -\n (pt_3.y - pt_1.y) * (pt_2.z - pt_1.z))\n y = -((pt_2.x - pt_1.x) * (pt_3.z - pt_1.z) -\n (pt_3.x - pt_1.x) * (pt_2.z - pt_1.z))\n z = ((pt_2.x - pt_1.x) * (pt_3.y - pt_1.y) -\n (pt_3.x - pt_1.x) * (pt_2.y - pt_1.y))\n normal = Vector(x=x, y=y, z=z)\n # TODO - remove hardcode\n if self.parallel_vector.product_with(normal) > 0.001:\n return 0\n elements = [pt_1.x - self.origin.x,\n pt_1.y - self.origin.y,\n pt_1.z - self.origin.z,\n pt_2.x - self.origin.x,\n pt_2.y - self.origin.y,\n pt_2.z - self.origin.z,\n pt_3.x - self.origin.x,\n pt_3.y - self.origin.y,\n pt_3.z - self.origin.z]\n tetrahedron_volume = Matrix3(elements=elements).det() / 6\n vec12 = Vector(pt_1=pt_1, pt_2=pt_2)\n vec13 = Vector(pt_1=pt_1, pt_2=pt_3)\n cos_angle_1 = (vec12.product_with(vec13) /\n vec12.length() /\n vec13.length())\n sin_angle_1 = (1 - cos_angle_1**2)**0.5\n triangle_area = vec12.length() * vec13.length() * sin_angle_1 / 2\n return 3 * tetrahedron_volume / triangle_area\n else:\n print('error in line.__implementation_distance_to_plane:',\n 'incorrect arguments')\n return None", "title": "" }, { "docid": "c9722354f937865a87c5c440058ccae2", "score": "0.4755353", "text": "def toJSON( self, filename ):\n # open the file for writing\n with open( filename, 'w') as jsonfile:\n # output the parameters needed to recreate the plane\n data = { \"JuliaPlaneParameters\": { \"xmin\":self.xmin, \"xmax\":self.xmax, \"xlen\":self.xlen, \"ymin\":self.ymin, \"ymax\":self.ymax, \"ylen\":self.ylen, \"creal\":self.c.real, \"cimaginary\":self.c.imag }}\n\n # handle the output of the plane contents\n mat = self.plane.as_matrix()\n for row in range( 0, self.xlen ):\n print( mat[ row ] )\n l = mat[ row ].tolist()\n data[ \"JuliaPlaneContents\" + str( row )] = { self.plane.index[ row ]: l }\n\n # write the accummulated dictionary to the JSON file\n json.dump(data, jsonfile, indent=4, sort_keys=True, separators=(',', ':'))\n\n # we're done, clean up the file\n jsonfile.close()", "title": "" }, { "docid": "1e11b9bc07cc2238a565c59eae0c15b8", "score": "0.47398037", "text": "def compute_polygon_plane(self, polygon_id):\n return Plane()", "title": "" }, { "docid": "b38ed80ffe7a681340a70a2055065686", "score": "0.4739005", "text": "def GetPlanePositions(self):\n ...", "title": "" }, { "docid": "5306db6f63ae1597368fd7f4b219a7ca", "score": "0.47357008", "text": "def show_Gingl_plane():\n point1 = np.array([29, 0, 50])\n normal1 = np.array([3.7504, 326.8702, -1])\n \n # a plane is a*x+b*y+c*z+d=0\n # [a,b,c] is the normal. Thus, we have to calculate\n # d and we're set\n# d1 = -np.sum(point1*normal1)# dot product \n d1 = -63.2415 # TODO: wtf?\n \n # create x,y\n xx, yy = np.meshgrid(np.linspace(31, 24.5,100), np.linspace(-0.02,0.02, 100)) #ys should be from -.03 to .03\n \n # calculate corresponding z\n z1 = (-normal1[0]*xx - normal1[1]*yy - d1)*1./normal1[2] \n# \n# z1 = normal1[0]*xx \n# z1 = (-normal1[0]*xx - normal1[1]*yy - d1)*1./normal1[2]\n \n ##make mesh transparent\n theCM = cm.get_cmap()\n theCM._init()\n alphas = np.abs(np.linspace(-1.0, 1.0, theCM.N))\n theCM._lut[:-3,-1] = alphas \n \n # plot the surface\n plt3d = plt.figure().gca(projection='3d')\n plt3d.plot_surface(xx, yy, z1, cmap=theCM)\n \n \n plt.show()", "title": "" }, { "docid": "cb034a950c2cba36661f522ee1fc64cb", "score": "0.47291055", "text": "def generate_ordered_puzzle_plane(row, column):\n plane = []\n size = row * column\n empty_element_index = [0, 0]\n\n # Generating ordered numbers list\n numbers = []\n for n in range(0, size):\n numbers.append(n)\n\n # Getting a index of an empty_element\n start = 0\n begin = row\n\n # Creating a 2D plane from 1D\n while begin <= row * column:\n plane.append(numbers[start:begin])\n start += row\n begin += row\n\n return PuzzlePlane(plane, empty_element_index)", "title": "" }, { "docid": "70fd69f0ce22862de51ef1e045934b50", "score": "0.47160617", "text": "def __init__(self, f):\n _complex_fem.ComplexGridFunction_swiginit(self, _complex_fem.new_ComplexGridFunction(f))", "title": "" }, { "docid": "37b3557867496196901eccabfc976dfb", "score": "0.4709728", "text": "def SetPlanePositions(self, p_float=..., p_float=..., p_float=..., p_float=..., p_float=..., p_float=...):\n ...", "title": "" }, { "docid": "9fc46ba5c3e654824eb355a537565686", "score": "0.46980014", "text": "def _generate_uniform_planes(self):\n return np.random.randn(self.hash_size, self.input_dim)", "title": "" }, { "docid": "2be0a15718d9248ea7de27121ba363a6", "score": "0.46928856", "text": "def create_transformation(self) -> Transformation:\n raise NotImplementedError", "title": "" }, { "docid": "a8cf51bf6f5749c525405ee086fd1851", "score": "0.46720093", "text": "def draw_plane_cgo(obj, name,apex1,apex2,apex3,apex4,color=(0.5,0.5,0.5)):\r\n\r\n # Convert args to floating point numbers\r\n x1,y1,z1 = map(float,apex1)\r\n x2,y2,z2 = map(float,apex2)\r\n x3,y3,z3 = map(float,apex3)\r\n x4,y4,z4 = map(float,apex4)\r\n if type(color) == type(''):\r\n color = map(float,color.replace('(','').replace(')','').split(','))\r\n\r\n # Compute the normal vector for the triangle\r\n normal1 = compute_normal(x1, y1, z1, x2, y2, z2, x3, y3, z3)\r\n normal2 = compute_normal(x1, y1, z1, x3, y3, z3, x4, y4, z4)\r\n normal3 = compute_normal(x2, y2, z2, x3, y3, z3, x4, y4, z4)\r\n\r\n # Create the CGO objects\r\n # Uncomment ALPHA line below to draw semi-transparent hull\r\n obj.extend([\r\n BEGIN, TRIANGLE_STRIP,\r\n COLOR, color[0], color[1], color[2],\r\n# ALPHA, 0.5,\r\n NORMAL, normal1[0], normal1[1], normal1[2],\r\n VERTEX, x1, y1, z1,\r\n VERTEX, x2, y2, z2,\r\n VERTEX, x3, y3, z3,\r\n VERTEX, x4, y4, z4,\r\n\r\n END\r\n ])", "title": "" }, { "docid": "6257fc02c45fb8dc9a4630b2b565498c", "score": "0.46716306", "text": "def __init__(self,nx=(2,2,2),ox=(0.,0.,0.),size=((0.0,1.0,1.0),(0.0,1.0,1.0)),linecolor=black,linewidth=None,planecolor=white,alpha=0.5,lines=True,planes=True):\n Actor.__init__(self)\n self.linecolor = saneColor(linecolor)\n self.planecolor = saneColor(planecolor)\n self.linewidth = linewidth\n self.alpha = alpha\n self.trans = True\n self.lines = lines\n self.planes = planes\n self.nx = asarray(nx)\n ox = asarray(ox)\n sz = asarray(size)\n self.x0,self.x1 = ox-sz[0], ox+sz[1]", "title": "" }, { "docid": "d1864fac37b6ddeb331ce784ba14f9e5", "score": "0.46699846", "text": "def constrain_location_to_plane(self, location):\n return Vector()", "title": "" }, { "docid": "87fd6b2ba0318dd4b5839b3e13f0e874", "score": "0.46659094", "text": "def render_surface(\n x: np.ndarray,\n y: np.ndarray,\n z_fcn: [Callable[[np.ndarray], np.ndarray], nn.Module],\n x_label: str,\n y_label: str,\n z_label: str,\n data_format='numpy',\n fig: plt.Figure = None,\n cmap: mpl.cm.ScalarMappable = None,\n title: str = None,\n) -> plt.Figure:\n if cmap is None:\n cmap = mpl.rcParams['image.cmap']\n\n if fig is None:\n fig = plt.figure()\n ax = Axes3D(fig)\n\n # Create mesh grid matrices from x and y vectors\n xx, yy = np.meshgrid(x, y)\n\n # Check which version to use based on the output of the function\n if data_format == 'numpy':\n # Operate on ndarrays\n zz = np.array([z_fcn(np.stack((x, y), axis=0)) for x, y in zip(xx, yy)])\n\n elif data_format == 'torch':\n # Operate on Tensors\n xx_tensor = to.from_numpy(xx)\n yy_tensor = to.from_numpy(yy)\n\n if hasattr(z_fcn, '_fcn'):\n # Passed function was wrapped (e.g. by functools)\n check_fcn = z_fcn._fcn\n else:\n check_fcn = z_fcn\n\n if isinstance(check_fcn, nn.Module):\n # Adapt for batch-first behavior of NN-based policies\n zz = to.stack([z_fcn(to.stack((x, y), dim=1).view(-1, 1, 2).to(to.get_default_dtype()))\n for x, y in zip(xx_tensor, yy_tensor)])\n else:\n zz = to.stack([z_fcn(to.stack((x, y), dim=1).transpose(0, 1).to(to.get_default_dtype()))\n for x, y in zip(xx_tensor, yy_tensor)])\n zz = zz.squeeze().detach().numpy()\n\n else:\n raise pyrado.ValueErr(given=data_format, eq_constraint=\"'numpy' or 'torch'\")\n\n # Generate the plot\n ax.plot_surface(xx, yy, zz, cmap=cmap)\n\n # Add labels\n ax.set_xlabel(x_label)\n ax.set_ylabel(y_label)\n ax.set_zlabel(z_label)\n if title is not None:\n ax.set_title(title)\n return fig", "title": "" }, { "docid": "7fb049ec3cdd0dccd49aa67f91b5cbba", "score": "0.46588483", "text": "def from_axes(axes):\n\t\tif len(axes) != 2:\n\t\t\traise ValueError(\"too many axes\")\n\n\t\tif \"x\" in axes and \"y\" in axes:\n\t\t\treturn Plane.from_normal(Vector.from_axis(\"z\"),0)\n\t\tif \"x\" in axes and \"z\" in axes:\n\t\t\treturn Plane.from_normal(Vector.from_axis(\"y\"),0)\t\n\t\tif \"y\" in axes and \"z\" in axes:\n\t\t\treturn Plane.from_normal(Vector.from_axis(\"x\"),0)\n\n\t\traise ValueError(\"invalid axes\")", "title": "" }, { "docid": "a0d1f314966e0aaf4e7288bbbbff5423", "score": "0.46554577", "text": "def plane(self):\n return self._plane", "title": "" }, { "docid": "2e50899d4afa04ae80a8c66ff25de54d", "score": "0.46470854", "text": "def create_nurbs_plane(name='plane', width=1.0, length=1.0, patches_u=1, patches_v=1, **kwargs):\n\n raise NotImplementedError()", "title": "" }, { "docid": "e70465a765d3314c1c52b0fb42d52497", "score": "0.46440476", "text": "def __init__(self):\n self.__board = [[0] * 10 for x in range(10)]\n self.__plane_id = 0\n self.__dict_planes = {}", "title": "" }, { "docid": "2f6e1147065f1c045a716d9e91ce2aaa", "score": "0.4643647", "text": "def main():\n\n (a1,a2,a3,surface_normal,ns) = read_input()\n\n surface_normal = reduced_norm(surface_normal)\n\n #Here we find the atoms in the plane that passes through the\n #origin that is normal to the vector surface_normal\n atoms_in_plane = []\n \n #loop over possible lattice vector combinations.\n for i in range(-10,10):\n for j in range(-10,10):\n for m in range(-10,10):\n #find the multiples of the lattice vectors\n b1 = scalar_prod(i,a1)\n b2 = scalar_prod(j,a2)\n b3 = scalar_prod(m,a3)\n #add up the lattice vectors to determine the loctaion\n #of the atom\n atom = [b1[0]+b2[0]+b3[0],b1[1]+b2[1]+b3[1],b1[2]+b2[2]+b3[2]]\n #if the vector from the orgin to the point is in the\n #plane then it's dot product with the normal vector\n #will be zero and we want to add it to th list of\n #atoms in th plane.\n if (dot_prod(atom,surface_normal)==0):\n atoms_in_plane.append(atom)\n\n #After that we find out how many atoms were in that plane, which\n #is useful for the plotting later and then start building the\n #other planes, the plane variable tracks how many planes there are\n #and the distance between the current plane and the origin, the\n #different planes should always be equadistant. The shift array\n #keeps the location of an atom in each plane, which is then\n #treated as the new origin when finding the rest of the atoms in\n #that plane.\n atoms_per_plane = [len(atoms_in_plane)]\n planes = [0]\n shift = [[0,0,0]]\n \n #ns is the number of planes we want, this if there was only one\n #were done.\n if(ns > 1):\n #keep finding new planes and new atoms until we've found each\n #desired plane.\n \n while(len(planes) < ns):\n #logical, returns true once a new plane is found.\n next_plane = False\n\n #temporary variable useful for the loops\n t = 0\n #nn is used to limit the number of lattice vector\n #combinations used so that we don't have to generate an\n #arbitrarily large list. Instead if we need more lattice\n #points we'll increment nn.\n nn = 2\n\n #here we actually find the next plane in the sequenc. This\n #is done by going through the possible lattice vector\n #combinations and find the magnitude of their projection\n #onto the normal vector. If that value isn't in the list\n #of planes then we add it to the planes array, add the\n #lattice site to the shift array, then change next_plane\n #to True.\n while(next_plane == False):\n possible_sites = list(product(range(nn),repeat=3))\n \n while((next_plane == False) and (t < len(possible_sites))):\n v = possible_sites[t]\n #p2 is the lattice point in concideration\n p2 = [v[0]*a1[0]+v[1]*a2[0]+v[2]*a3[0],v[0]*a1[1]+v[1]*a2[1]+v[2]*a3[1],v[0]*a1[2]+v[1]*a2[2]+v[2]*a3[2]]\n #dtp is the magnitude of the projection of the\n #vector from the origin to p2 along the normal\n #vector.\n dtp = distance_between_planes([0,0,0],p2,surface_normal)\n #If dpt isn't zero or in planes then we found a new plane.\n if((dtp != 0) and (dtp not in planes)):\n next_plane = True\n planes.append(dtp)\n shift.append(p2)\n t += 1\n else:\n t += 1\n \n if(next_plane == False):\n #If we've been to all the points and haven't found\n #a new plane then we need to increase the\n #multiples of the lattice vectors we consider.\n nn +=1\n \n #For every plane we've found we now need to find the atoms in\n #that plane.\n for p in range(len(planes)):\n #We don't want to do the plane through the origin, we\n #alread found all the atoms in it.\n if (planes[p] != 0):\n atoms_in_nextplane = []\n #loop over possible lattice vector combinations.\n for i in range(-10,10):\n for j in range(-10,10):\n for k in range(-10,10):\n #p2 is the point that the normal vector\n #will pass through, ie the new origin for\n #this plane\n p2 = shift[p]\n #find the multiples of the lattice vectors\n b1 = scalar_prod(i,a1)\n b2 = scalar_prod(j,a2)\n b3 = scalar_prod(k,a3)\n #add up the lattice vectors to determine\n #the loctaion of the atom\n atom = [b1[0]+b2[0]+b3[0],b1[1]+b2[1]+b3[1],b1[2]+b2[2]+b3[2]]\n #now shift the atom by p2 to put it in the\n #appropriate coordinates for the plane\n #passing through p2\n shifted_atom = [atom[0]-p2[0],atom[1]-p2[1],atom[2]-p2[2]]\n #if the vector from the orgin to the point\n #is in the plane then it's dot product\n #with the normal vector will be zero and\n #we want to add it to th list of atoms in\n #th plane.\n if (dot_prod(shifted_atom,surface_normal)==0):\n atoms_in_nextplane.append(atom)\n\n #Add the atoms in the plane to the complete list of\n #atoms.\n for i in atoms_in_nextplane:\n atoms_in_plane.append(i)\n atoms_per_plane.append(len(atoms_in_nextplane))\n\n #Now we need to change our x,y coordinates so that we can go from\n #a 3D space to a 2D surface plot for atomic positions.\n if (surface_normal != [1,0,0]):\n new_y = cross_prod([1,0,0],surface_normal)\n else:\n new_y = cross_prod([0,0,1],surface_normal)\n \n new_x = cross_prod(new_y,surface_normal)\n\n #make the new x and y axis into unit vectors to get the\n #dimmensions right.\n new_z = find_unit_vector(surface_normal)\n new_y = find_unit_vector(new_y)\n new_x = find_unit_vector(new_x)\n\n atoms = []\n \n # we need to project our atomic positions onto the new 2D plane.\n for i in atoms_in_plane:\n # if (i!=[0,0,0]):\n # proj_x = scalar_prod(dot_prod(i,new_x)/mt.sqrt(dot_prod(i,i)),i)\n # proj_y = scalar_prod(dot_prod(i,new_y)/mt.sqrt(dot_prod(i,i)),i)\n # atoms.append([proj_x,proj_y])\n # else:\n # atoms.append(i)\n atoms.append([dot_prod(new_x,i),dot_prod(new_y,i)])\n\n #variables for the plot. which color changes so that each plane\n #will have a different color of atom so that we can tell which\n #atom are in the same plane.\n at_atom = 0\n which_color = 0\n \n #Now we plot each plane order\n for i in atoms_per_plane:\n for j in range(i):\n x = which_color/float(ns)\n y_mag = mt.sqrt(dot_prod(new_y,new_y))\n x_mag = mt.sqrt(dot_prod(new_x,new_x))\n plt.axis('equal')\n plt.ylim((-2.5*y_mag,2.5*y_mag))\n plt.xlim((-2.5*x_mag,2.5*x_mag))\n plt.plot(atoms[at_atom][0],atoms[at_atom][1], marker = 'o', color = cm.gist_ncar(x),ms = 40,ls = '')\n at_atom += 1\n which_color += 1\n\n plt.show()\n return()", "title": "" }, { "docid": "f02d0e202acb3a7fb05e2a0c21b2da2c", "score": "0.4642036", "text": "def colorplane(x, y, width, height, *a):\n if len(a) == 2:\n # Top and bottom colors.\n clr1, clr2, clr3, clr4 = a[0], a[0], a[1], a[1]\n elif len(a) == 4:\n # Top left, top right, bottom right, bottom left.\n clr1, clr2, clr3, clr4 = a[0], a[1], a[2], a[3]\n elif len(a) == 3:\n # Top left, top right, bottom.\n clr1, clr2, clr3, clr4 = a[0], a[1], a[2], a[2]\n elif len(a) == 0:\n # Black top, white bottom.\n clr1 = clr2 = (0,0,0,1)\n clr3 = clr4 = (1,1,1,1)\n glPushMatrix()\n glTranslatef(x, y, 0)\n glScalef(width, height, 1)\n glBegin(GL_QUADS)\n glColor4f(clr1[0], clr1[1], clr1[2], clr1[3] * _alpha); glVertex2f(-0.0, 1.0)\n glColor4f(clr2[0], clr2[1], clr2[2], clr2[3] * _alpha); glVertex2f( 1.0, 1.0)\n glColor4f(clr3[0], clr3[1], clr3[2], clr3[3] * _alpha); glVertex2f( 1.0, -0.0)\n glColor4f(clr4[0], clr4[1], clr4[2], clr4[3] * _alpha); glVertex2f(-0.0, -0.0)\n glEnd()\n glPopMatrix()", "title": "" }, { "docid": "d45debfd0f99c255ce0f787bf310604a", "score": "0.46357605", "text": "def generate_planes(segment_list, internal_points_list):\n i=1\n list = []\n\n while(i<len(segment_list)):\n \n list.append(TEXTURE(\"texture/tetto.jpg\")(OFFSET([0.1,0.1,0.1])(MKPOL([[segment_list[i][0],segment_list[i][1],internal_points_list[i-1],internal_points_list[i] ],[[1,2,3,4]],[1]]))))\n\n i=i+1\n\n list.append(TEXTURE(\"texture/tetto.jpg\")(OFFSET([0.1,0.1,0.1])(MKPOL([[segment_list[0][0],segment_list[0][1],internal_points_list[0],internal_points_list[len(internal_points_list)-1] ],[[1,2,3,4]],[1]]))))\n \n return STRUCT(list)", "title": "" }, { "docid": "52f88e3232cc515e99144806086a0b75", "score": "0.4634318", "text": "def __init__(self, horizontal_n, vertical_m, function, value, segment_endpoints=None, callback=None):\n #self.epsilon = epsilon\n n = self.n = horizontal_n\n m = self.m = vertical_m\n self.corner = np.array([n, m], dtype=np.int)\n self.f = function\n self.z = value\n if segment_endpoints is None:\n self.search_grid()\n else:\n self.end_points = np.array(segment_endpoints, dtype=np.int)\n # cache of (x,y) --> f(x,y)\n #self.location_values = {}\n # ((x1,y1), (x2,y2)) --> (x0,y0), interpolated contour location.\n self.interpolated_contour_pairs = {}\n # set of ((x1,y1), (x2,y2)) for low-point/high-point pairs on horizon.\n self.new_contour_pairs = set()\n self.callback = callback\n # [(closed, point_sequence), ...] for contours found\n self.contours = []\n self.triangle_triples = set()", "title": "" }, { "docid": "a693273c47ca6d9bf1dea75f33fee57c", "score": "0.46181926", "text": "def draw_debug_plane(cls, world_context_object, plane_coordinates, location, size, plane_color=[0.000000, 0.000000, 0.000000, 0.000000], duration=0.000000):\n return None", "title": "" }, { "docid": "a74f0fbe0f03eb74c71f0c72c9d1949a", "score": "0.46111906", "text": "def random_point(self, plane=True):\r\n vertices = self.vertices(plane=plane)\r\n u_min = min([p[0] for p in vertices])\r\n u_max = max([p[0] for p in vertices])\r\n v_min = min([p[1] for p in vertices])\r\n v_max = max([p[1] for p in vertices])\r\n if plane:\r\n return uniform(u_min, u_max), uniform(v_min, v_max)\r\n else:\r\n if self.ellipsoidal_shape() == 'cap':\r\n # Need to adjust extremes.\r\n PI = self.ellipsoid.pi()\r\n u_max = PI\r\n if v_min > 0:\r\n v_max = PI/2\r\n else:\r\n v_min = -PI/2 \r\n # Sample longitude and latitude within extremes, but reject if\r\n # they don't lie in the cell.\r\n # Rejection can happen for polar cells, because they are not \r\n # rectangular.\r\n while True:\r\n lam, phi = self.ellipsoid.random_point(u_min, u_max, \r\n v_min, v_max) \r\n if self.contains((lam, phi), plane=False):\r\n # Success\r\n return lam, phi", "title": "" }, { "docid": "eee0239cce1a0d162083ff340db4e08e", "score": "0.4584949", "text": "def plane_by_points(points):\n a, b, c = np.array(points)\n ab, ac = b - a, c - a\n n = vdot(ab, ac)\n return np.hstack([n, -np.dot(n, points[0])])", "title": "" }, { "docid": "0c49c291e7fdf0a83edfff8d8d0c55ed", "score": "0.4579398", "text": "def cp(xmin = -1.5, xmax = 1.5, xres = 2**10,\n ymin = -1.5, ymax = 1.5, yres = 2**10):\n X = linspace(xmin,xmax,xres)\n Y = linspace(ymin,ymax,yres)\n x, y = meshgrid(X,Y)\n z = x + 1j*y\n return(z)", "title": "" }, { "docid": "6675f2dc59321e0122b24e783f91e6ce", "score": "0.45785245", "text": "def plot_plane_curve(vertices, *args, **kwargs):\n x = vertices[:, 0]\n y = vertices[:, 1]\n fig = kwargs.pop(\"fig\", None)\n unique = (fig is None)\n if fig is None:\n plt.figure()\n plt.plot(x, y, *args, **kwargs)\n if unique:\n plt.show()", "title": "" }, { "docid": "2670928651685a82e5a92c304d46a178", "score": "0.4575455", "text": "def plot_points_2d(f, points, ax=None, **kwargs):\n kwargs.setdefault('s', 100)\n kwargs.setdefault('c', TUM_COLORS['accent_orange'])\n kwargs.setdefault('depthshade', False)\n\n if ax is None:\n fig = plt.figure()\n ax = fig.add_subplot(111, projection='3d')\n\n ax.scatter(\n points[:, 0], points[:, 1], f(points),\n **kwargs)\n\n return ax", "title": "" }, { "docid": "4339867d948c688d829d3853009ff2b0", "score": "0.45744097", "text": "def from_normal(normal, d=0):\n\t\treturn Plane(*normal.tuple(), d)", "title": "" }, { "docid": "825ddb365099e2d25762d1c20245c35e", "score": "0.45603502", "text": "def add_clipping_plane(plane, d_u, d_v, views=pythoncom.Empty):\r\n return _base._rsf.add_clipping_plane(plane, d_u, d_v, views)", "title": "" }, { "docid": "a01b0ae3e79ab497ee0a1acce29289a7", "score": "0.4556325", "text": "def icosahedron_planes(draw, r, scale = 300, shift = np.array([1000,1000,0])):\n cind = -1\n phi = (1 + np.sqrt(5)) / 2.0\n mat_orig = np.array([\n [1, phi, 0],\n [0, 1, phi],\n [phi, 0, 1]\n ])\n for ii in [1, -1]:\n for jj in [1, -1]:\n for kk in [1, -1]:\n cind += 1\n mat = np.copy(mat_orig)\n mat[0,0] = mat[0,0] * ii\n mat[1,0] = mat[1,0] * ii\n mat[2,0] = mat[2,0] * ii\n mat[0,1] = mat[0,1] * jj\n mat[1,1] = mat[1,1] * jj\n mat[2,1] = mat[2,1] * jj\n mat[0,2] = mat[0,2] * kk\n mat[1,2] = mat[1,2] * kk\n mat[2,2] = mat[2,2] * kk\n mat1 = np.dot(mat, r) * scale + shift[:3]\n smat = sum(mat1)\n forward_face = np.dot(smat, np.array([0,0,1])) > -1e-3\n face_angle = np.dot(smat/np.sqrt(sum(smat**2)), np.array([0,0.01,0.99]))\n if forward_face:\n poly = [(mat1[i][0],mat1[i][1]) for i in range(len(mat1))]\n rgba = colorFromAngle2(face_angle,h=153,s=120,maxx=0.25)\n draw.polygon(poly, rgba)\n #uncomment if you want to plot edges\n #for line in range(len(mat1)):\n # draw.line((mat1[line][0],mat1[line][1],mat1[(line+1)%3][0],mat1[(line+1)%3][1]), fill = (0,255,0,255), width = 5)\n #else:\n # for line in range(len(mat1)):\n # draw.line((mat1[line][0],mat1[line][1],mat1[(line+1)%3][0],mat1[(line+1)%3][1]), fill = (0,255,0,255), width = 3)\n for ii in range(3):\n for kk in [1, -1]:\n for ll in [1, -1]:\n cind += 1\n mat = np.copy(mat_orig)\n mat[0, ii] = kk * mat[0, ii]\n mat[1, ii] = kk * mat[1, ii]\n mat[2, ii] = kk * mat[2, ii]\n mat[0, (ii+2)%3] = ll * mat[0, (ii+2)%3]\n mat[1, (ii+2)%3] = ll * mat[1, (ii+2)%3]\n mat[2, (ii+2)%3] = ll * mat[2, (ii+2)%3]\n for jj in range(3):\n mat[ii,jj] = mat[(ii+1)%3,jj]\n mat[ii, (ii+1)%3] = -1 * mat[ii, (ii+1)%3]\n mat1 = np.dot(mat, r) * scale + shift[:3]\n smat = sum(mat1)\n forward_face = np.dot(smat, np.array([0,0,1])) > -1e-3\n face_angle = np.dot(smat/np.sqrt(sum(smat**2)), np.array([0,0.01,0.99]))\n if forward_face:\n poly = [(mat1[i][0],mat1[i][1]) for i in range(len(mat1))]\n rgba = colorFromAngle2(face_angle,h=153,s=120,maxx=0.25)\n draw.polygon(poly, rgba)\n # Uncomment if you want to plot edges.\n #for line in range(len(mat1)):\n # draw.line((mat1[line][0],mat1[line][1],mat1[(line+1)%3][0],mat1[(line+1)%3][1]), fill = (0,255,0,255), width = 5)\n #else:\n # for line in range(len(mat1)):\n # draw.line((mat1[line][0],mat1[line][1],mat1[(line+1)%3][0],mat1[(line+1)%3][1]), fill = (0,255,0,255), width = 3)", "title": "" }, { "docid": "8df54f2b0f42f1006802de03a171268e", "score": "0.45546117", "text": "def run_plane(model, slow, npts, dt, baz=0, wvtype='P',\n obs=False, dp=100., c=1.5, rhof=1027):\n\n # Pass variables to Fortran conf\n model2for(model)\n wave2for(dt, slow, baz)\n\n # Run the ``plane`` module depending on land or OBS case.\n if obs:\n\n # If OBS, then further pass OBS-related paramters to Fortran conf\n obs2for(dp, c, rhof)\n\n # Get the Fourier transform of seismograms for ``obs``case\n yx, yy, yz = pw_f.plane_obs(\n npts, model.nlay, np.array(wvtype, dtype='c'))\n\n else:\n\n # Get the Fourier transform of seismograms for ``land`` case\n yx, yy, yz = pw_f.plane_land(\n npts, model.nlay, np.array(wvtype, dtype='c'))\n\n # Transfer displacement seismograms to an ``obspy`` ``Stream`` object.\n trxyz = get_trxyz(yx, yy, yz, npts, dt, slow, baz, wvtype)\n\n return trxyz", "title": "" }, { "docid": "dac2fb45283b4ec32dc0f2a422403f12", "score": "0.4551788", "text": "def calculate_cross_plane(\n self, x_loc, x_resolution=200, y_resolution=200, x_bounds=None, y_bounds=None\n ):\n # Get the points of data in a dataframe\n df = get_plane_from_flow_data(self.flow_data, normal_vector=\"x\", x3_value=x_loc)\n\n # Compute and return the cutplane\n return CutPlane(df)", "title": "" }, { "docid": "d6b98ee5a2ed7a87933b27f09a834f8e", "score": "0.45500967", "text": "def generate_points(self, batch=None):\n num_x, num_y, _ = self.get_dims()\n logger = logging.getLogger(__name__)\n logger.info(\"Surface for domain contains %d points (%d x %d).\",\n num_x * num_y, num_x, num_y)\n\n if batch:\n x_start, x_end = batch.x_range\n y_start, y_end = batch.y_range\n logger.info(\"Surface for batch contains %d points (%d x %d).\",\n (x_end - x_start) * (y_end - y_start), (x_end - x_start), (y_end - y_start))\n else:\n x_start = 0\n x_end = num_x\n y_start = 0\n y_end = num_y\n\n if self.x_resolution:\n x1 = numpy.linspace(0.0, self.x_resolution * (num_x - 1), num_x)[x_start:x_end]\n else:\n x1 = self.x_coordinates[x_start:x_end]\n if self.y_resolution:\n y1 = numpy.linspace(0.0, self.y_resolution * (num_y - 1), num_y)[y_start:y_end]\n else:\n y1 = self.y_coordinates[y_start:y_end]\n x, y = numpy.meshgrid(x1, y1, indexing=\"ij\")\n z = numpy.zeros(x.shape)\n\n xyz_geo = numpy.stack((x, y, z), axis=2)\n xyz_model = numpy.zeros(xyz_geo.shape)\n az_rad = self.model_metadata.y_azimuth * math.pi / 180.0\n xyz_model[:, :, 0] = self.model_metadata.origin_x + xyz_geo[:, :, 0] * \\\n math.cos(az_rad) + xyz_geo[:, :, 1] * math.sin(az_rad)\n xyz_model[:, :, 1] = self.model_metadata.origin_y - xyz_geo[:, :, 0] * \\\n math.sin(az_rad) + xyz_geo[:, :, 1] * math.cos(az_rad)\n return xyz_model", "title": "" }, { "docid": "8ee3df9e38ce79489738c42a2ccc96dd", "score": "0.45434946", "text": "def generate_random_puzzle_plane(row, column):\n empty_element = 0\n plane = []\n size = row * column\n\n # Generating ordered numbers list\n numbers = []\n for n in range(0, size):\n numbers.append(n)\n\n random.shuffle(numbers)\n\n # Getting a index of an empty_element\n empty_element_index = numbers.index(empty_element)\n empty_element_index = [int(empty_element_index / row), int(empty_element_index % row)]\n\n start = 0\n begin = row\n\n # Creating a 2D plane from 1D\n while begin <= row * column:\n plane.append(numbers[start:begin])\n start += row\n begin += row\n\n return PuzzlePlane(plane, empty_element_index)", "title": "" }, { "docid": "a7ee252a17121167261a49fa3bae9531", "score": "0.454343", "text": "def __init__(self, origin, x_vector, y_vector, scaling=(1,1)):\n\n\t\t# unwrap tuples into numpy array\n\t\tself.origin = Point(*origin)\n\t\tself.x_vector = x_vector.copy()\n\t\tself.y_vector = y_vector.copy()\n\t\tself.scaling = scaling\n\n\t\tself.update()", "title": "" } ]
0432d19c960ac28feb72115c29bf9549
This generator outputs unique lines from a file line by line.
[ { "docid": "8af6f1e81a26b94d3e539cfb7442eae7", "score": "0.7510291", "text": "def read_file_line_by_line(file_name):\n with open(file_name) as f:\n unique_strings = []\n while True:\n line = f.readline()\n if line not in unique_strings:\n unique_strings.append(line)\n yield line\n elif not line:\n break", "title": "" } ]
[ { "docid": "8d23355987653c742f5f54eaca177c26", "score": "0.6710934", "text": "def gen_lines(files):\n for file in files:\n with open(file, 'r') as fin:\n for line in fin.readlines():\n yield line", "title": "" }, { "docid": "7ace98676562ef4bdfae958f481201d2", "score": "0.6681335", "text": "def genline(self):\n for line in self.lines:\n yield line", "title": "" }, { "docid": "ec05eea60aad1c509000cd2767cf23b0", "score": "0.6613316", "text": "def read_log_gen(f):\r\n\twhile True:\r\n\t\tline = f.readline()\r\n\t\tif not line:\r\n\t\t\tbreak\r\n\t\tyield line.rstrip('\\n').strip()", "title": "" }, { "docid": "b1ebdd35a78b0692794800d847ed5c21", "score": "0.64156103", "text": "def lines_from_file(path):\n with open(path) as f:\n for line in f:\n yield line.rstrip()", "title": "" }, { "docid": "d4b68b0cf60ad12dfcf173a0d7f417f8", "score": "0.63318044", "text": "def lines_from_file(filename):\n with open(filename) as file_obj:\n for line in file_obj:\n yield line", "title": "" }, { "docid": "09f13db297697095324f12c8c6b96c81", "score": "0.6304959", "text": "def _read_lines(cls, file_path):\n with open(file_path, 'r', encoding='utf-8-sig') as f:\n for line in f:\n # Strip whitespaces.\n line = line.strip()\n # Don't return blank lines and comments.\n if line != '' and line[0] != '#':\n yield line", "title": "" }, { "docid": "af2fa596cd5bad2c45a58a4f93135793", "score": "0.62737507", "text": "def file_id_iter(fname, mode='r'):\n with open(fname, mode) as f:\n for line in f:\n yield line.strip()", "title": "" }, { "docid": "256643a9e18818013f85695b045a7d4f", "score": "0.62097704", "text": "def read_line(path): \r\n for line in open(path):\r\n yield line.strip()", "title": "" }, { "docid": "6ca94e65b68f7c75e947b7f0b58b5925", "score": "0.6099867", "text": "def lines(self):\n\t\twith open(self.path, \"r\") as fh:\n\t\t\tself.reading = True\n\t\t\tfor line in fh:\n\t\t\t\tself.in_lines.append(line)\n\t\t\t\tyield line\n\n\t\t\tself.reading = False", "title": "" }, { "docid": "284eff33496ec4df607f9190dc12d47e", "score": "0.6099574", "text": "def block_generator(filename):\n buffer = []\n with open(filename) as f:\n for line in f:\n line = line.strip()\n if not line:\n yield buffer\n buffer = []\n else:\n buffer.append(line)", "title": "" }, { "docid": "763c3dc1e77e5a569e684836a826c7ed", "score": "0.6080848", "text": "def read_log(file):\n\n while True:\n line = file.readline()\n if not line:\n break\n yield line", "title": "" }, { "docid": "bb90468d32bdf7a82f68812259ea6350", "score": "0.60781175", "text": "def read_lines(file_path: str) -> Generator[str, None, None]:\n with open(file_path, 'r') as fp:\n for line in fp.readlines():\n yield line.strip()", "title": "" }, { "docid": "be996d5b69fd856fdc815a1261ded39b", "score": "0.59861994", "text": "def keys_generator(fname):\n with open(fname, \"r\") as f:\n for line in f:\n fields = line.replace(\"\\n\", \"\").split(\"\\t\")\n yield int(fields[0]), fields[1]", "title": "" }, { "docid": "d9430615b9c7045b96b89b6bee4bb6a3", "score": "0.598367", "text": "def lines(filename):\n\tfn_open = gzip.open if filename.endswith('.gz') else open\n\n\twith fn_open(filename) as fh:\n\t\tfor line in fh:\n\t\t\tif line.startswith('#'):\n\t\t\t\tcontinue\n\t\t\telse:\n\t\t\t\tyield parse(line)", "title": "" }, { "docid": "470d6d700b10a34a18798596e978c6e3", "score": "0.5962142", "text": "def _read_new_lines(self):\n file = open(self.file_path, \"r\")\n\n while True:\n line = file.readline()\n\n if not line:\n continue\n\n yield line", "title": "" }, { "docid": "b63ecc5f4510a4b4b71f743c0435419b", "score": "0.5960312", "text": "def read(filename, num_lines):\n first_line = None\n with open(filename, 'r') as f:\n if not first_line:\n first_line = f.readline()\n for chunk in grouper(num_lines, f, ''):\n yield chunk", "title": "" }, { "docid": "3e44d547f08bd917b316e887f1de01f7", "score": "0.5942329", "text": "def lines(filename):\n fn_open = gzip.open if filename.endswith('.gz') else open\n\n with fn_open(filename) as fh:\n for line in fh:\n if line.startswith('#'):\n continue\n else:\n yield parse(line) #see parse(line) below converts each line to a dict.", "title": "" }, { "docid": "b4dec7e32194d7dc83105c5182794e07", "score": "0.58823484", "text": "def _fetch(self, file):\n file.seek(0, 2)\n while self.running:\n line = file.readline()\n if not line:\n time.sleep(self.frequency)\n continue\n yield line.rstrip('\\n')", "title": "" }, { "docid": "080294c6929c362ae2270720f4cda199", "score": "0.5873971", "text": "def read_file(filename):\n f = open(filename, 'r', encoding='utf-8', errors='ignore')\n generator = (line.strip() for line in f if line != '---------------\\n')\n return generator", "title": "" }, { "docid": "35c9abf153a0c215704e12414c5f3dfa", "score": "0.58555937", "text": "def makeFASTQGenerator(filename):\n\twith open(filename, 'rU') as f:\n\t\tfor i in range(5):\n\t\t\tfifth = f.readline()\n\t\tif fifth == \"\\n\":\n\t\t\tf.seek(0)\n\t\t\tfor chunk in grouper(f.readlines(), 5):\n\t\t\t\tchunky = [chunk[0].strip(\"\\n\"),chunk[1].strip(\"\\n\"),chunk[3].strip(\"\\n\")]\n\t\t\t\tyield chunky\n\t\telse:\n\t\t\tf.seek(0)\n\t\t\tfor chunk in grouper(f.readlines(), 4):\n\t\t\t\tchunky = [chunk[0].strip(\"\\n\"),chunk[1].strip(\"\\n\"),chunk[3].strip(\"\\n\")]\n\t\t\t\tyield chunky", "title": "" }, { "docid": "762d66ee2f493db92998e00adeb33f88", "score": "0.5836806", "text": "def data_generator(filename):\n def data_gen():\n with open(filename) as file:\n for line in file:\n yield tuple(k.strip() for k in line.split(',')) \n\n return data_gen", "title": "" }, { "docid": "aaeb765a6e9e8e326d766fdb54d4c64b", "score": "0.5749873", "text": "def read_from_file(self):\n\n file_input = open('input.txt', 'r')\n list_of_lines = file_input.readlines()\n\n for i in self.__parse_line(list_of_lines):\n yield i", "title": "" }, { "docid": "c1b05697a591cc1359b626d4596ca354", "score": "0.57417935", "text": "def generateChunks(f, pa):\n # logger.info('Generator invoked with pattern %s for file: %s' % (pa, f))\n with open(f, 'r') as fi:\n reslist = []\n lineNumber = 1\n previousLineNumber = 1\n for line in fi:\n if pa in line:\n yield {'chunk': ''.join(reslist),\n 'lineNumber': previousLineNumber}\n reslist = []\n previousLineNumber = lineNumber\n reslist.append(line)\n lineNumber += 1\n yield {'chunk': ''.join(reslist), 'lineNumber': previousLineNumber}", "title": "" }, { "docid": "e061ccf77bc89019a33afe2c7c293773", "score": "0.57125324", "text": "def readLogLines(self, file_handler):\r\n while True:\r\n line = file_handler.readline(4096)\r\n if not line:\r\n break\r\n yield line", "title": "" }, { "docid": "a786f87c8f463accfaf31a5e77c71bd5", "score": "0.57091033", "text": "def _SourceLines(filename, line_num, context=3):\n import linecache\n for line_num in range(line_num - context, line_num + context + 1):\n yield line_num, linecache.getline(filename, line_num)", "title": "" }, { "docid": "db173e2cfc274bf8998834a8f33c1c8f", "score": "0.5704928", "text": "def read_map_uniq(fh, sep='\\t'):\n for line in fh:\n key, found, value = line.partition(sep)\n if found and sep not in value:\n yield key, value.rstrip()", "title": "" }, { "docid": "e3d138169707252807696b4d4aa9f2b8", "score": "0.5682923", "text": "def get_questions(file):\r\n with open(file, 'r') as fp:\r\n for lines in fp:\r\n newline = lines.strip()\r\n yield newline", "title": "" }, { "docid": "c46d20b3e152e9af3900d4b5a2c7cf69", "score": "0.56772584", "text": "def _reader(path: str) -> Iterator[str]:\n with open(path, \"r\") as source:\n for line in source:\n yield line.rstrip()", "title": "" }, { "docid": "6b8e0b25a5c9a8e1482ebb61d9fd4462", "score": "0.56771106", "text": "def line_file2set(in_file):\n with open(in_file, 'r') as f:\n return set(i.strip('\\n\\r') for i in f)", "title": "" }, { "docid": "c7c1e22ea8279fe1ed802b8f886d2abe", "score": "0.5663412", "text": "def read_lines(filename):\n with open(filename, 'r') as f:\n lines = f.readlines();\n lines = filter(lambda line: not line.startswith('#'), lines)\n lines = map(lambda line: line.strip(), lines)\n return set(lines)", "title": "" }, { "docid": "24ddc8e3ca66f5ae01624ced49cdcddd", "score": "0.5655174", "text": "def readlines(self):\n # avoid using file as iterator, since it implies buffering\n while True:\n l = self.input.readline()\n if not l:\n break\n \n # remove trailing newline\n if l[-1] == '\\n':\n l = l[0:-1]\n\n logger.debug(\"< \" + l)\n yield l", "title": "" }, { "docid": "24171db3b8778c2569a4a663167a9016", "score": "0.5654681", "text": "def entries(self) -> Iterator[HashEntry]:\n while True:\n line = self._file_obj.readline()\n if len(line) == 0:\n break\n yield HashEntry.from_str(line, self.line_split, self.hash_type)", "title": "" }, { "docid": "f0812f6abbf793e2dbfcb025f841ec94", "score": "0.56400084", "text": "def textfile_generator(textfile, linebreak=True, encoding=None):\n for t in textfile:\n if len(t) > 0:\n if encoding is None:\n yield t.strip() + ('\\n' if linebreak else '')\n else:\n yield t.decode(encoding).strip() + ('\\n' if linebreak else '')", "title": "" }, { "docid": "ac6e25d8882cbb20b20231213f6d0851", "score": "0.56201625", "text": "def readDuplications( infile ):\n\n duplications = []\n first = True\n for line in infile:\n if first:\n first = False\n continue\n\n duplication = Duplication()\n duplication.readFromString( line )\n \n duplications.append( duplication )\n\n return duplications", "title": "" }, { "docid": "57463040e3ce2ef3a73a7a9cb50b9b29", "score": "0.5611486", "text": "def edges_generator( file_name ) :\n\twith gzip.open( file_name ) as f :\n\t\treader = csv.reader( f )\n\t\t# Ignore the header\n\t\treader.next( )\n\t\tfor edges in reader :\n\t\t\tnodes = [ int( node ) for node in edges ]\n\t\t\tyield nodes", "title": "" }, { "docid": "c9e857d10209340df5a15a4e359362a5", "score": "0.561072", "text": "def bloom_filter_set():\n with open('Proper.txt', 'r') as f:\n for line in f:\n yield line.strip()", "title": "" }, { "docid": "66eee951d6fbca056e1488d79d2cf9eb", "score": "0.55970323", "text": "def random_line(afile):\r\n line = next(afile)\r\n for num, aline in enumerate(afile):\r\n if random.randrange(num + 2):\r\n continue\r\n line = aline\r\n return line", "title": "" }, { "docid": "f7423389f7c1f9e2c339a870efa0b634", "score": "0.559054", "text": "def _LogLines(self):\n if (not self._temp_log_file_path or\n not os.path.isfile(self._temp_log_file_path)):\n yield '(N/A)'\n return\n with open(self._temp_log_file_path) as f:\n for line in f:\n yield line", "title": "" }, { "docid": "63e6da5b95d6a20aee21ebf6e48d3db4", "score": "0.55895424", "text": "def yield_line_from_files(\n *, source_files: typing.Iterable[str]\n) -> typing.Iterator[str]:\n for source_file in source_files:\n print(f\"---- Loading lines from {source_file}...\")\n with gzip.open(source_file, mode=\"rt\", encoding=\"utf8\") as source:\n yield from source\n print(f\"---- All lines read from {source_file}\")", "title": "" }, { "docid": "6a7aaba14cc186962d50a45a3dbb9547", "score": "0.55712813", "text": "def gen_tokens(self):\n for line in open(self.path, 'rb'):\n for tok in line.strip().lower().split():\n yield self.pre + tok", "title": "" }, { "docid": "f56d22d9009ac88183d9d8c8e090f00a", "score": "0.55665076", "text": "def bloom_filter_set():\n with open('/Users/harshinder/Downloads/homework2-master/Proper.txt','r') as f:\n for line in f:\n yield line.strip()", "title": "" }, { "docid": "40cbc9bb4009a0be231d6960d89dc431", "score": "0.5555802", "text": "def dataFromFile(fname):\n\tfile_iter = open(fname, 'rU')\n \tfor line in file_iter:\n\t\tline = line.strip().rstrip(',')\t\t\t\t# Remove trailing comma\n\t\trecord = frozenset(line.split(','))\n\t\tyield record", "title": "" }, { "docid": "413facdaf28d8c5d8428a0ea7d97bbb9", "score": "0.5553472", "text": "async def chunk_stories_from_file(file: str, batch_size: int = 100) -> Tuple[List[str], List[int]]:\n line_count = 1\n lines = []\n story_nums = []\n async with AIOFile(file, mode=\"rb\") as f:\n async for line in LineReader(f):\n line = line.decode('utf-8', errors=\"ignore\")\n line = line.replace(\"<newline>\", \"\")\n lines.append(line)\n story_nums.append(line_count)\n line_count += 1\n if len(lines) == batch_size:\n yield lines, story_nums\n lines = []\n story_nums = []\n\n yield lines, story_nums", "title": "" }, { "docid": "423ed894ad94b27936fc1952c2358bd3", "score": "0.55305266", "text": "def read_map_output(file):\n for line in file:\n yield line.strip().split(\"\\t\")", "title": "" }, { "docid": "423ed894ad94b27936fc1952c2358bd3", "score": "0.55305266", "text": "def read_map_output(file):\n for line in file:\n yield line.strip().split(\"\\t\")", "title": "" }, { "docid": "48ad078cd580cf5162f8bec731177da2", "score": "0.55272305", "text": "def extract_urls(input_file_path):\n with open(input_file_path) as input_file:\n for line in input_file:\n yield line.strip()", "title": "" }, { "docid": "5ead3bb4acffccdaf5547fd61186e22d", "score": "0.55080724", "text": "def line_iterator(readable_file, size=None):\n # type: (IO[bytes], Optional[int]) -> Iterator[bytes]\n read = readable_file.read\n line = []\n byte = b\"1\"\n if size is None or size < 0:\n while byte:\n byte = read(1)\n line.append(byte)\n if byte in b\"\\n\":\n yield b\"\".join(line)\n del line[:]\n\n else:\n while byte and size:\n byte = read(1)\n size -= len(byte)\n line.append(byte)\n if byte in b\"\\n\" or not size:\n yield b\"\".join(line)\n del line[:]", "title": "" }, { "docid": "d03cf6238506ca2a1b30562b2993e1f2", "score": "0.55058074", "text": "def generate(self):\r\n self.structure = []\r\n with open(self.file, \"r\") as file:\r\n for line in file:\r\n row = [square for square in line if square != '\\n']\r\n self.structure.append(row)", "title": "" }, { "docid": "bc1d619280f9bb3aec44c1724579c05c", "score": "0.549715", "text": "def simple_corpus_iterator(corpus_file):\r\n l = corpus_file.readline()\r\n while l:\r\n line = l.strip()\r\n if line: # Nonempty line\r\n yield line\r\n else: # Empty line\r\n yield None \r\n l = corpus_file.readline()", "title": "" }, { "docid": "d35bf9ecc08a740779723e6022a3504f", "score": "0.5494441", "text": "def gen_readlog(filename):\n log = gzip.open(filename, mode='rt', encoding='utf-8') if filename.endswith(\".gz\") \\\n else open(filename, mode='r', encoding='utf-8')\n for line in log:\n yield line.strip()\n log.close()", "title": "" }, { "docid": "dea9c364c66a3fdc1c95b22b5f9555d6", "score": "0.54815996", "text": "def read_emails_from_file(file, n=10000):\n with open(file, 'rb') as file_in:\n while True:\n next_n_lines = list(islice(file_in, n))\n if not next_n_lines:\n break\n yield next_n_lines", "title": "" }, { "docid": "ba7f5be38657b46b10bb252b0b34c855", "score": "0.547566", "text": "def _getSourceLines(self):\n filename = self.frame[1]\n lineNumber = self.frame[2]\n for snipLineNumber in range(lineNumber - 1, lineNumber + 2):\n yield (snipLineNumber,\n linecache.getline(filename, snipLineNumber).rstrip())", "title": "" }, { "docid": "11526d3c0e01b9056e179bffe8940343", "score": "0.5464331", "text": "def fasta_iter(fasta_name):\n \"first open the file outside \"\n fh = open(fasta_name)\n\n # ditch the boolean (x[0]) and just keep the header or sequence since\n # we know they alternate.\n faiter = (x[1] for x in groupby(fh, lambda line: line[0] == \">\"))\n\n for header in faiter:\n # drop the \">\"\n headerStr = header.__next__()[1:].strip()\n\n # join all sequence lines to one.\n seq = \"\".join(s.strip() for s in faiter.__next__())\n\n yield (headerStr, seq)\n\n # --------------\n def add_hash(x,y):\n return x+\"_\"+hashlib.sha1(y.encode()).hexdigest()[:5]", "title": "" }, { "docid": "ef535ae269e8d4034b1f08c67b4ffcaa", "score": "0.5463162", "text": "def read_map_output(file):\n for line in file:\n yield line.strip().split(\"\\t\", 1)", "title": "" }, { "docid": "485e0090e0dc1608e0cb589f3ceb1b2b", "score": "0.5452605", "text": "def FastaIterator(fh):\n def readTotitle(fh):\n \"\"\"returns a tuple ([lines before the next title line], next tile line)\n \"\"\"\n preLines = []\n while True:\n l = fh.readline().strip()\n if l.startswith('>'):\n return (preLines,l)\n elif l == '':\n return preLines,None\n else:\n preLines.append(l)\n\n\n if type(fh) in StringTypes:\n fh = file(fh)\n \n preLines,nextTitleLine =readTotitle(fh)\n\n while nextTitleLine != None:\n title = nextTitleLine[1:].rstrip()\n preLines,nextTitleLine=readTotitle(fh)\n yield (title,''.join(map(lambda x: x.rstrip(),preLines)))", "title": "" }, { "docid": "485e0090e0dc1608e0cb589f3ceb1b2b", "score": "0.5452605", "text": "def FastaIterator(fh):\n def readTotitle(fh):\n \"\"\"returns a tuple ([lines before the next title line], next tile line)\n \"\"\"\n preLines = []\n while True:\n l = fh.readline().strip()\n if l.startswith('>'):\n return (preLines,l)\n elif l == '':\n return preLines,None\n else:\n preLines.append(l)\n\n\n if type(fh) in StringTypes:\n fh = file(fh)\n \n preLines,nextTitleLine =readTotitle(fh)\n\n while nextTitleLine != None:\n title = nextTitleLine[1:].rstrip()\n preLines,nextTitleLine=readTotitle(fh)\n yield (title,''.join(map(lambda x: x.rstrip(),preLines)))", "title": "" }, { "docid": "2ee004162c51ae5d0cbe6d9395b86cdc", "score": "0.5443738", "text": "def _generate_examples(self, filename):\n\t\twith open(filename) as file_in:\n\t\t\tlines = np.array(file_in.readlines())\n\t\tlines = sp.char.rstrip(lines)\n\t\tfor line in lines:\n\t\t\tx, y = self._line_consumer(line)\n\t\t\tself._max_x_length = max(len(x), self._max_x_length)\n\t\t\tif self._max_y_length > -1:\n\t\t\t\tself._max_y_length = max(len(y), self._max_y_length)\n\t\t\tyield x, y", "title": "" }, { "docid": "0f47ebc69d586bd99d8fece41a35da49", "score": "0.5438642", "text": "def read_file(filename):\n\ttry:\n\t\tf = open(filename)\n\t\twhile True:\n\t\t\tline = f.readline()\n\t\t\tif not line:\n\t\t\t\tbreak\n\t\t\telse:\n\t\t\t\tyield line\n\tfinally:\n\t\tf.close()", "title": "" }, { "docid": "6e2d243e41f201ff205821ab608c892b", "score": "0.54300267", "text": "def files_to_lines(files):\n for filename in files:\n with open(filename) as file:\n data = file.read()\n data = data.splitlines()\n for lines in data:\n yield lines.strip()", "title": "" }, { "docid": "7a9e4615ee37fe16eb36b990826bb2db", "score": "0.54276913", "text": "def simple_iterfasta(filehandle):\n \n # Skip to first header\n for probeline in filehandle:\n if probeline.startswith('>'):\n break\n else: # nobreak\n raise ValueError('No headers in this file.')\n \n header = probeline.strip('>\\n')\n buffer = list()\n \n # Iterate over lines\n for line in map(str.rstrip, filehandle):\n if line.startswith('>'): \n yield header, ''.join(buffer)\n buffer.clear()\n header = line[1:]\n \n else:\n buffer.append(line)\n \n yield header, ''.join(buffer)", "title": "" }, { "docid": "2dcd86890bd8c3b743bac85a7dd997ac", "score": "0.5410768", "text": "def get_lines(path: str) -> Iterator[str]:\r\n try:\r\n fp: IO = open(path, 'r')\r\n except FileNotFoundError:\r\n raise FileNotFoundError(f\"Cant open {path}\")\r\n else:\r\n with fp:\r\n for line in fp:\r\n line = line.strip('\\n')\r\n while line.endswith('\\\\'):\r\n line = line[:-1] + fp.readline().strip('\\n')\r\n\r\n # if \"#\" in line:\r\n # if line.startswith('#'):\r\n # del line\r\n # else:\r\n # line = line.split('#', 1)[0].strip('\\n')\r\n # yield line\r\n # else:\r\n # yield line\r\n\r\n if not line.startswith('#'):\r\n yield line.split('#')[0]", "title": "" }, { "docid": "d2e5a49911937d7b0a1af9ad3aa4fdda", "score": "0.53985715", "text": "def read_file(file_path):\n file_object = codecs.open(file_path, mode='r', encoding='utf-8')\n lines = file_object.readlines()\n random.shuffle(lines)\n file_object.close()\n return lines", "title": "" }, { "docid": "7f719bd7300efbb564dbaea1dc127e68", "score": "0.53823835", "text": "def gen_set(file_name,*start_end):\n if 'gz' in file_name:\n iflines = gzip.open(file_name,'rb').readlines()\n else:\n iflines = open(file_name,'r').xreadlines()\n file_set = set([])\n x = iter(start_end)\n start_end_pairs = [(item, x.next()) for item in x]\n for line in iflines:\n keys = []\n for s,e in start_end_pairs:\n keys.append(line[s-1:e].strip())\n file_set.add('|'.join(keys))\n return file_set", "title": "" }, { "docid": "ac93e547bd587e088dc2630aa63f76c4", "score": "0.53783214", "text": "def read_mapper_output(file, separator='\\t'):\n\n # Go through each line\n for line in file:\n yield line", "title": "" }, { "docid": "9e42182f031217498b70144e1569db6c", "score": "0.5373325", "text": "def lines(file_name: str, known_encoding='utf-8') -> Iterator[str]:\n\tif file_name.endswith('.gz') or file_name.endswith('.gzip'):\n\t\twith io.TextIOWrapper(gzip.open(file_name, 'r'), encoding=known_encoding) as f:\n\t\t\tfor line in f: yield line.rstrip('\\n\\r')\n\telse:\n\t\twith open(file_name, 'r') as f:\n\t\t\tfor line in f: yield line.rstrip('\\n\\r')", "title": "" }, { "docid": "d43f28f412538efd9e55a83a82b96bb4", "score": "0.5355514", "text": "def yield_lines(file_path, make_lower=False, threshold_len=0):\n for line in io.open(file_path, mode='r', encoding='utf-8'):\n line = line.strip()\n if make_lower:\n line = line.lower()\n if len(line) > threshold_len:\n yield line", "title": "" }, { "docid": "c4ba5da653275484ca5bca2d544d6cb7", "score": "0.5344083", "text": "def read_lines(data_file):\n with codecs.open(data_file,'r',encoding='utf-8') as f:\n for line in f.readlines():\n yield line", "title": "" }, { "docid": "e90d0e0bc620a97a00a68383e8fcebec", "score": "0.5343919", "text": "def generate_from_file(file_path: str):\n try:\n with open(file_path, 'r') as file:\n return [generate_from_template(line) for line in file]\n except FileNotFoundError:\n logging.error(f\"File '{file_path}' doesn\\'t exist\")", "title": "" }, { "docid": "1377be9536c998a6ffa8faca2d25aea5", "score": "0.53409183", "text": "def _creategenerator(self):\n\t\tf=self._fileprefix()\n\t\tif f != self.filePrefix:\n\t\t\tself.filePrefix=f\n\n\t\t\tself.fileGenerator=itertools.imap( \n\t\t\t\t\t\t\tlambda n : self.filePrefix+\"_\"+str(n), \n\t\t\t\t\t\t\titertools.count(1)\n\t\t\t\t\t\t)", "title": "" }, { "docid": "fcb557780b899561cb945bb55e45b067", "score": "0.5340206", "text": "def yield_records( input_lines ):\n buffer = \"\"\n for line in input_lines:\n if \"\" == line:\n yield buffer[1:]\n buffer=\"\"\n else:\n buffer += (\" \" + line)\n if buffer != \"\":\n yield buffer[1:]", "title": "" }, { "docid": "2f238706573e06bf4bf42e8cb631c7a5", "score": "0.5333908", "text": "def create_data(filename, k, n, wc_prop):\n print(\"Reading in file...\\n\")\n f = open(filename)\n seqs = []\n for line in f.readlines()[:k]:\n seq = []\n for character in line[:n]:\n if random.random() > wc_prop:\n seq.append(int(character))\n else:\n seq.append('*')\n seqs.append(seq)\n return seqs", "title": "" }, { "docid": "f16425205b06c8f50eafcfc3bc167888", "score": "0.53305376", "text": "def _striplines(m):\n while True:\n v = m.readline().decode('utf-8')\n if v:\n yield v.strip()\n else:\n break", "title": "" }, { "docid": "d596a49b3bd5747e7b2830911dae5e7e", "score": "0.5330089", "text": "def __iter__(self):\n while True:\n al = self.file.readline()\n cl = self.file.readline()\n ll = self.file.readline()\n if al == '' or cl == '' or ll == '':\n if al == '' and (cl != '' or ll != ''):\n print \"CORRUPT LOGFILE!\"\n break\n\n action = tuple(map(float, al.strip().split()[1:]))\n cams = cl.strip().split()[2:]\n las = map(float, ll.strip().split()[1:])\n rs = las[::3]\n ps = las[1::3]\n ts = las[2::3]\n yield LogEntry(action, cams, zip(rs, ps, ts))", "title": "" }, { "docid": "1191b3e18b79af93f1089ef5003d91aa", "score": "0.5326357", "text": "def grep_file(self, filename):\n\n try:\n with open(filename) as file:\n for linenum, line in enumerate(file, 1): # count line numbers from 1\n line = line.rstrip('\\n') # remove trailing newline\n for line in self.grep_line(line):\n yield GrepSuccess(filename, line, linenum)\n except IOError as e:\n yield GrepError(filename, e.strerror)", "title": "" }, { "docid": "3903fded9610f4243d28d125579545be", "score": "0.53260267", "text": "def source_token_lines(self):\n if os.path.exists(self.filename):\n with open_source_file(self.filename) as f:\n for line in f:\n yield [('txt', line.rstrip('\\n'))]\n else:\n for line in self._iter_source_tokens():\n yield [('txt', line)]", "title": "" }, { "docid": "2d3260055646f2742b3bac814954029d", "score": "0.5323318", "text": "def listGenerator(filename):\n filevar = []\n with open(filename) as f:\n filevar = f.read().split('\\n')\n return(filevar)", "title": "" }, { "docid": "df557dd76b8e201c4dc6e775c4796172", "score": "0.531289", "text": "def read_streamed_file(file, chunk_size=10):\n counter = 0\n ret_lines = []\n with open(file) as f:\n while True:\n counter += 1\n line = f.readline()\n if line:\n ret_lines.append((counter, line))\n if (counter/chunk_size).is_integer() or not line:\n yield ret_lines\n ret_lines = []\n if not line:\n break", "title": "" }, { "docid": "f178464a4b4758309fba185bbca644ec", "score": "0.5301929", "text": "def gen_files():\n with open(tempfile) as f:\n for line in f.read().splitlines():\n if line.split(\",\")[1] == \"True\":\n if line.split(\",\")[0].split(\"/\")[1] in IGNORE:\n continue\n yield (line.split(\",\")[0].split(\"/\"))", "title": "" }, { "docid": "865816046aabfb053d5acdbf90041477", "score": "0.529185", "text": "def dataFromFile(fname):\n # file_iter = codecs.open(fname, 'rU','gbk',errors='ignore') # 张apriori\n file_iter = open(fname, 'rU') # apriori\n for line in file_iter:\n line = line.strip().rstrip(',') # Remove trailing comma\n record = frozenset(line.split(','))\n yield record", "title": "" }, { "docid": "6b837c2945d056dce4c6887e6307d044", "score": "0.5291131", "text": "def read_mapper_output(file, separator='\\t'):\n\tfor line in file:\n\t\tyield line.rstrip().split(separator, 1)", "title": "" }, { "docid": "558671318c2be151962e2924d96779be", "score": "0.52895737", "text": "def _read_file_by_lines(filename):\n with open(filename, \"r\") as f:\n return f.read().splitlines()", "title": "" }, { "docid": "0757b9ca20d263cd6e3ea154e97acba6", "score": "0.5285663", "text": "def LineGenerator(self):\r\n\t\t# self.gennum+=1\r\n\t\tself.gennum = random.randint(1, 4)\r\n\t\tline = [0, 0, 0, 0]\r\n\t\ta = random.randint(1, 4)\r\n\t\tif self.gennum % 3 == 0:\r\n\t\t\tline[a - 1] = 2\r\n\t\t\tif self.Trained == True:\r\n\t\t\t\tself.bombnum += 1\r\n\t\telse:\r\n\t\t\tline[a - 1] = 1\r\n\t\t\tif self.Trained == True:\r\n\t\t\t\tself.coinnum += 1\r\n\t\treturn line", "title": "" }, { "docid": "84c3d81f06f4a4b50a709fa79e7e3dd0", "score": "0.5285531", "text": "def trigram_bow_generator(filepath):\n \n for post in LineSentence(filepath):\n yield trigram_dictionary.doc2bow(post)", "title": "" }, { "docid": "8ebce5cad0a0fe27d5b87106dede8f9e", "score": "0.5277852", "text": "def read_fasta(fasta_file):\n\n try :\n input = (gzip.open if fasta_file.endswith('.gz') else open)(fasta_file)\n except IOError:\n print \"[Error] Cannot find fasta file : %s !\" % fasta_file\n exit(-1)\n sanitize = re.compile(r'[^ACTGN]')\n sanitize_seq_id = re.compile(r'[^A-Za-z0-9]')\n\n chrom_seq = []\n chrom_id = None\n seen_ids = set()\n\n for line in input:\n if line[0] == '>':\n if chrom_id is not None:\n yield chrom_id, ''.join(chrom_seq)\n\n chrom_id = sanitize_seq_id.sub('_', line.split()[0][1:])\n\n if chrom_id in seen_ids:\n error('BS Seeker found identical sequence ids (id: %s) in the fasta file: %s. Please, make sure that all sequence ids are unique and contain only alphanumeric characters: A-Za-z0-9_' % (chrom_id, fasta_file))\n seen_ids.add(chrom_id)\n\n chrom_seq = []\n\n else:\n chrom_seq.append(sanitize.sub('N', line.strip().upper()))\n\n yield chrom_id, ''.join(chrom_seq)\n\n input.close()", "title": "" }, { "docid": "ffcfbd867a212394466b6982bf04de20", "score": "0.5275022", "text": "def gen_list(file_name,*start_end):\n if 'gz' in file_name:\n iflines = gzip.open(file_name,'rb').readlines()\n else:\n iflines = open(file_name,'r').xreadlines()\n x = iter(start_end)\n start_end_pairs = [(item, x.next()) for item in x]\n for line in iflines:\n keys = []\n for s,e in start_end_pairs:\n keys.append(line[s-1:e].strip())\n yield '|'.join(keys)", "title": "" }, { "docid": "35b30ee2299b06434b1f7415fcbb1c0e", "score": "0.5272629", "text": "def trigram_bow_generator(filepath):\n \n for review in LineSentence(filepath):\n yield trigram_dictionary.doc2bow(review)", "title": "" }, { "docid": "2bf835f0f5789dcc8e0433a94bf34c97", "score": "0.5248813", "text": "def process_file(self, path):\n with open(path) as file_handler:\n while True:\n yield next(csv.reader(file_handler, delimiter=\",\"))", "title": "" }, { "docid": "c3db5c251f04ee36e73d3fa8178818c4", "score": "0.524564", "text": "def read_mapper_output(file, separator):\n for line in file:\n yield line.rstrip().split(separator, 2)", "title": "" }, { "docid": "0268361e109c7361d608a3ceba2d9a29", "score": "0.52434385", "text": "def get_entries(log_file):\r\n with open(log_file, 'rt') as f:\r\n for i,line in enumerate(f):\r\n entry = decode_log_line(line)\r\n if entry:\r\n yield [log_file,i] + entry", "title": "" }, { "docid": "ca834d788c985c6f6bd39bc20040eef7", "score": "0.5236938", "text": "def rows(self):\n with self.input().open('r') as fobj:\n for line in fobj:\n yield line.strip('\\n').split('\\t')", "title": "" }, { "docid": "87a0a97f249e8797b5517911a91f6f43", "score": "0.5236725", "text": "def line_review(filename):\n \n with codecs.open(filename, encoding='utf_8') as f:\n for review in f:\n yield review.replace('\\\\n', '\\n')", "title": "" }, { "docid": "3a4d1859fac538ad0dc379589a2b8007", "score": "0.5229629", "text": "def process_fasta(infile):\r\n f = open(infile, \"r\")\r\n \r\n name, seq = None, []\r\n for line in f:\r\n if line.startswith(\">\"):\r\n # when we get to the next sequence, yield the one before it\r\n # seq is a list, so we joing everything into one string\r\n if name: \r\n yield (name, ''.join(seq))\r\n name, seq = line.strip(), [] # parsing starts here\r\n # sequence spanning multiple lines\r\n else:\r\n seq.append(line.strip())\r\n # handles the last sequence in the file\r\n if name: \r\n yield (name, ''.join(seq))\r\n \r\n f.close()", "title": "" }, { "docid": "a7d9509ba56ca55b2897284ffa8edd25", "score": "0.52281916", "text": "def iterate_lines(self):\n for i in self.agp_lines:\n yield i", "title": "" }, { "docid": "e2f3edb2747b43b2bc0e965bddc35312", "score": "0.52089405", "text": "def parse_flatfile(filehandle: IO) -> Iterator[Entry]:\n acs, pids = [], []\n for line in filehandle:\n if line.startswith(\"AC\"):\n tokens = line[5:].strip(\";\\n\").split(\"; \")\n acs += tokens\n elif line.startswith(\"DR EMBL\"):\n tokens = line[5:].strip(\";\\n\").split(\"; \")\n pid = tokens[2]\n if pid != \"-\":\n pids.append(pid)\n elif line == \"//\\n\":\n yield Entry(acs, pids)\n acs, pids = [], []", "title": "" }, { "docid": "eb7846dee551e9e8b55ddff5b784b863", "score": "0.5208255", "text": "def lines(self, filePath):\n return list(self.linerator(filePath))", "title": "" }, { "docid": "d0a4d76ef49ac546ff22f22c4588cefc", "score": "0.52014", "text": "def take(lines, n, header):\n while True:\n count = 0\n result = []\n while count < n:\n try:\n line = next(lines)\n except StopIteration:\n if result:\n yield result\n return\n else:\n if line != header:\n result.append(line)\n count += 1\n if count == n:\n yield result\n break", "title": "" }, { "docid": "d4846a4a03801b35fa36667daf8d4240", "score": "0.52007395", "text": "def _universal_newlines(fp):\n # if file was opened with universal newline support we don't need to convert\n if 'U' in getattr(fp, 'mode', ''):\n for line in fp:\n yield line\n else:\n for line in fp:\n line = line.replace(b'\\r\\n', b'\\n').replace(b'\\r', b'\\n')\n for piece in line.split(b'\\n'):\n yield piece", "title": "" }, { "docid": "30366ea8cf46964d044a5c139112a06c", "score": "0.51954883", "text": "def get_next_line(self):\n # patch to skip first few lines\n num_of_lines_to_ignore = 4\n for line in self.lines:\n if num_of_lines_to_ignore:\n num_of_lines_to_ignore -= 1\n continue\n if line == \"\\r\\n\" or line[0]==\"-\": # ignore empy lines and irrelevant lines\n continue\n items = self.parse_line(line)\n yield items", "title": "" }, { "docid": "cc9108a9b572257bdc1c42c3fd03a732", "score": "0.51861453", "text": "def read(file):\n if not file:\n for line in sys.stdin: yield line\n else:\n with open(file, 'r') as f: \n for line in f: yield line", "title": "" }, { "docid": "62a32db4af506b34055245bcd38c21c9", "score": "0.51813376", "text": "def unique_config_sections(self, config_file):\r\n # taken from https://github.com/allanzelener/YAD2K/blob/master/yad2k.py\r\n section_counters = defaultdict(int)\r\n output_stream = io.StringIO()\r\n with open(config_file) as fin:\r\n for line in fin:\r\n if line.startswith('['):\r\n section = line.strip().strip('[]')\r\n _section = section + '_' + str(section_counters[section])\r\n section_counters[section] += 1\r\n line = line.replace(section, _section)\r\n output_stream.write(line)\r\n output_stream.seek(0)\r\n return output_stream", "title": "" } ]
217c6404ce15f75ab8975641f863146e
Discretization of scattering energies.
[ { "docid": "0d2a37ec01b25e7aa9ff57dfa9c9c6a7", "score": "0.53659004", "text": "def discrete_energies(E, dE, N=1024, WE=8.):\n # Discrete energies that contains the \"elastic\" points:\n # nu0=nu2 and nu0=nu3.\n NdE = np.ceil(N / 2 / (dE / 2 + WE) * dE / 2)\n Lq = N / 2 / NdE * dE / 2 if np.abs(dE) > 0 else WE\n qs = np.linspace(-Lq, Lq, N, endpoint=False) + E / 2\n\n return qs", "title": "" } ]
[ { "docid": "336d994e96b34208653edd65317db622", "score": "0.5948378", "text": "def _spatial_dispersal(self, iteration: int, n_iterations: int) -> None:\n\n coef = ((n_iterations - iteration) ** self.e) / (\n (n_iterations + c.EPSILON) ** self.e\n )\n\n self.sigma = coef * (self.init_sigma - self.final_sigma) + self.final_sigma", "title": "" }, { "docid": "5fe34fa70cd049621f2fe32df0315bb1", "score": "0.5892061", "text": "def calculate_energy_slope(self):\n \n# e = self.elev_c + self.depth + self.U**2 / (2. * g)\n# # e = self.depth + self.U**2 / (2. * g)\n# \n# self.domain.set_quantity('energy', e, location='centroids')\n \n# quant = self.domain.quantities['energy']\n# quant.compute_gradients()\n# \n# self.S = num.sqrt((quant.x_gradient/ self.dx)**2 + (quant.y_gradient/ self.dx)**2)\n# \n# \n quant = self.domain.quantities['elevation']\n quant.compute_gradients()\n \n self.S = num.sqrt((quant.x_gradient/ self.dx)**2 + (quant.y_gradient/ self.dx)**2)\n self.S[self.S > num.mean(self.S)/2] = num.mean(self.S)/2\n\n # self.S = num.sqrt((quant.x_gradient/ self.dx)**2 + (quant.y_gradient/ self.dx)**2)\n \n# self.S[self.S <= 0.000001] = 0.000001\n# self.S[self.S > 0.01] = 0.01\n \n# self.domain.set_quantity('ee', self.S, location='centroids')\n # self.domain.set_quantity('vel', self.U, location='centroids')\n \n return self.S", "title": "" }, { "docid": "368ba7157b0a8c9ea0bf84e48722dfa9", "score": "0.57776797", "text": "def sediment_flux(self):\n \n normal_vels = num.zeros((self.num_cells,3))\n \n normal_vels[self.ind,:] = ((self.normals[self.ind,0::2] *\n self.xmom_e[self.ind,:] +\n self.normals[self.ind, 1::2] *\n self.ymom_e[self.ind,:]) /\n self.depth_e[self.ind,:])\n\n \n edge_flux = (self.depth_e * self.edgelengths * normal_vels * self.dt)\n \n neighbour_conc = self.conc[self.neighbours]\n \n sed_flux = edge_flux * self.conc[:,num.newaxis]\n \n sed_flux[edge_flux < 0] = (\n edge_flux[edge_flux < 0] * neighbour_conc[edge_flux < 0])\n \n \n for k in self.bdry_indices:\n for i in range(3):\n\n n = self.neighbours[k,i]\n \n if n < 0:\n \n sed_flux[k,i] = edge_flux[k,i] * self.inflow_concentration\n \n \n sed_vol_change = num.sum(-sed_flux, axis=1)\n \n sed_vol_in_cell = self.conc * self.depth * self.areas\n new_sed_vol_in_cell = num.maximum(sed_vol_in_cell + sed_vol_change, 0)\n \n self.conc[:] = 0.\n self.conc[self.ind] = (new_sed_vol_in_cell[self.ind] /\n (self.depth[self.ind] * self.areas[self.ind]))\n\n self.conc[self.conc > 0.2] = 0.2\n \n self.domain.quantities['concentration'].\\\n set_values(self.conc, location = 'centroids')", "title": "" }, { "docid": "5e42f5790082b25372043f5ecc5fa399", "score": "0.57428986", "text": "def dispersive_spread():\n\n def sod(f):\n return -1/2 * f**2\n\n def gamma(f):\n return 1\n\n def kerr(t, x, u):\n return abs(u)**2 * u\n\n t = numpy.linspace(0, 20, 2**9)\n x = numpy.linspace(-20, +20, 2**10)\n\n u0 = 0.5 * sech(x)\n result = gnlse(t, x, u0, sod, gamma, kerr)\n\n plt.figure(figsize=(4, 6))\n\n plt.pcolormesh(\n x, t, abs(result.u),\n cmap=\"magma\")\n plt.xlabel(r\"Coordinate $x$, a.u.\")\n plt.ylabel(r\"Time $t$, a.u.\")\n plt.tight_layout()\n\n plt.show()", "title": "" }, { "docid": "ff93b96f10c7f4b258091be6de11b3ec", "score": "0.5726917", "text": "def __calc_sse(self):\r\n sse = 0\r\n for i in range(self.n_data):\r\n for j in range(self.n_cluster):\r\n sse += self.cluster[i][j] * np.sum((self.data[i] - self.mu[j])**2)\r\n return sse", "title": "" }, { "docid": "37977d5dac1fe5f235047af3848966c9", "score": "0.56725", "text": "def __calc_sse(self):\r\n sse = 0\r\n for i in range(self.n_cluster):\r\n sse += np.sum((self.data[self.idx==i] - self.centroid[i])**2)\r\n return sse", "title": "" }, { "docid": "7b0eb556f49a4d8d62238d2fdafa2f46", "score": "0.5662908", "text": "def derivative_of_scale_space(cutouts,sigmas):\n # part E\n kernel_x = np.array([[1,0,-1]]) # as specified in the asignment\n kernel_y = kernel_x.T\n \n dx_for_all_sigmas={} #this dictionary will contain all the images transformed by kernel_x \n dy_for_all_sigmas ={} #this dictionary will contain all the images transformed by kernel_y\n \n #apply botht the filters\n for i,(image,sigma) in enumerate(zip(cutouts,sigmas)):\n \n #apply the filter \n filter_x = cv2.filter2D(image,-1,kernel_x)\n filter_y = cv2.filter2D(image,-1,kernel_y)\n \n dx_for_all_sigmas[sigma] = filter_x\n dy_for_all_sigmas[sigma] = filter_y\n \n cv2.imshow(\"kernel_x on image \"+str(i),filter_x)\n cv2.imshow(\"kernel_y on image \"+str(i),filter_y)\n \n cv2.waitKey(0)\n cv2.destroyAllWindows()\n \n return dx_for_all_sigmas,dy_for_all_sigmas", "title": "" }, { "docid": "18d0e6f0cfb6b165769418eb6731244d", "score": "0.5648625", "text": "def _e_step(self, X):\n n_samples = len(X)\n res = np.zeros((n_samples, self.n_components))\n\n for idx, data_point in enumerate(X):\n denominator = np.sum([self._weights[j] * self.density_func(data_point, self._means[j], self._vars[j]) for j in range(self.n_components)])\n for j in range (self.n_components):\n numerator = self._weights[j] * self.density_func(data_point, self._means[j], self._vars[j])\n res[idx][j] = numerator / denominator\n\n epslon = 1e-10\n for i in range(res.shape[0]):\n if (res[i,0] == 0):\n res[i,0] += epslon\n res[i,1] -= epslon\n if (res[i,1] == 0):\n res[i,0] -= epslon\n res[i,1] += epslon\n\n return res", "title": "" }, { "docid": "d5a0cc566b1d16bb5bf800e68913e87d", "score": "0.56152195", "text": "def energy(f) :\n\n\toutput = 0.\n\t\n\toutput += 1/( 2* DeltaX ) * np.sum( np.multiply( xStaggered, np.square( f[1:,0] - f[:-1,0]) + np.square( f[1:,1] - f[:-1,1]) ) )\n\t\n\toutput += DeltaX * np.sum( np.divide( np.square( np.square(f[1:,0]) - np.square(f[1:,1])) , np.multiply( x[1:], np.square(f[1:,0]) + np.square(f[1:,1])) ) ) \n\t\n\treturn output", "title": "" }, { "docid": "a00d995ca976abdc8f335d60c083cc92", "score": "0.5599425", "text": "def ecdf_representation(D, n):\n m = np.mean(D)\n X = []\n for d in xrange(D.shape[1] + 1):\n func = ECDF(([D[:, d] + np.random.randn(np.shape(D[:, d])) * 0.01]))\n ll = func(np.linspace(0, 1, n))\n X = [X, ll]\n X = [X, m]\n plt.plot(X)\n plt.show()\n return X", "title": "" }, { "docid": "634e12ae636290e3b19db0e73ecdb18a", "score": "0.55871993", "text": "def gradEnergy(f) :\n\t\n\th = np.divide( np.square( f[:,0] ) - np.square( f[:,1] ), np.square( f[:,0] ) + np.square( f[:,1] ) )\n\t\n\tgrad = np.zeros((N,2))\n\t\n\tgrad[1:-1,0] -= np.multiply( xStaggered[1:], f[2:,0] - f[1:-1,0] ) / DeltaX + np.multiply( xStaggered[:-1], f[:-2,0] - f[1:-1,0] ) / DeltaX\n\t\n\tgrad[1:-1,1] -= np.multiply( xStaggered[1:], f[2:,1] - f[1:-1,1] ) / DeltaX + np.multiply( xStaggered[:-1], f[:-2,1] - f[1:-1,1] ) / DeltaX\n\n\tgrad[1:-1,0] +=\tnp.divide(4 * DeltaX * np.multiply( f[1:-1,0], h[1:-1] ) - 2 * DeltaX * np.multiply( f[1:-1,0], np.square(h[1:-1]) ), x[1:-1]) \n\t\n\tgrad[1:-1,1] +=\tnp.divide(4 * DeltaX * np.multiply( f[1:-1,1], - h[1:-1] ) - 2 * DeltaX * np.multiply( f[1:-1,1], np.square(h[1:-1]) ), x[1:-1]) \n\t\n\t# On ajoute la composante en 0. On suppose que f[0,0] = f[0,1]\n\tgrad[0,0] = xStaggered[0] * ( 2 * f[0,0] - f[1,0] - f[1,1] ) / DeltaX\n\t\n\treturn grad", "title": "" }, { "docid": "11340756135fbb12dc3f95b7d9ae61d9", "score": "0.55824184", "text": "def get_electron_density(c_states, e_fermi, c_mass_array, npoints,degen, smearing, beta_eV, band_contribution = False, avg_eff_mass = False):\n # The 1D DOS is (including the factor of 2 for the spin):\n # g(E) = sqrt(2 * effmass)/(pi*hbar) * 1/sqrt(E-E0)\n # where effmass is the band effective mass, E0 is the band edge.\n #\n # I rewrite it as g(E) = D * sqrt(meff/m0) / sqrt(E-E0)\n # where (meff/m0) is simply the effective mass in units of the electron free mass,\n # and D=sqrt(2) / pi / sqrt(HBAR2OVERM0) and will be in units of 1/ang/sqrt(eV)\n D = n.sqrt(2.) / n.pi / n.sqrt(HBAR2OVERM0)\n\n el_density = n.zeros(npoints) \n \n contrib = n.zeros(len(degen)) \n \n avg_mass = n.zeros((1,3))\n \n # All the conduction band minima have to be taken into account with the appropriate degeneracy\n for j in range(len(degen)): #number of minima\n deg = degen[j]\n if j > 0 and avg_eff_mass == True:\n avg_mass = n.append(avg_mass,[[0.,0.,0.]],axis=0) # so that bands are separated by a line of zeros\n for state_energy, state in c_states[j]:\n energy_range = 20. # eV, to be very safe\n \n #if state_energy > e_fermi:\n # continue\n square_norm = sum((state)**2)\n # I average the inverse of the effective mass\n # Both state and c_mass_array should have the same length\n # NOT SURE: square_norm or sqrt(square_norm) ? AUGU: I'm pretty sure it's square_norm and I changed it\n averaged_eff_mass = 1./(sum(state**2 / c_mass_array[j]) / square_norm)\n if avg_eff_mass == True:\n avg_mass = n.append(avg_mass,[[state_energy,state_energy-e_fermi,averaged_eff_mass]],axis=0)\n \n if not smearing and state_energy < e_fermi:\n # At T=0, integrating from E0 to Ef the DOS gives\n # D * sqrt(meff) * int_E0^Ef 1/(sqrt(E-E0)) dE =\n # D * sqrt(meff) * 2 * sqrt(Ef-E0) [if Ef>E0, else zero]\n el_density += deg * D * n.sqrt(averaged_eff_mass) * 2. * n.sqrt(e_fermi - state_energy) * (\n state**2 / square_norm)\n contrib[j] += n.sum(deg * D * n.sqrt(averaged_eff_mass) * 2. * n.sqrt(e_fermi - state_energy) * (\n state**2 / square_norm))\n \n elif smearing and state_energy-e_fermi < energy_range: # more than enough margin\n # Need to numerically integrate the density of state times the occupation given by MV_smearing\n # to compute the integral, one uses the trick explained there to avoid singularities: http://math.stackexchange.com/questions/1351734/numerical-integration-of-divergent-function\n n_int = 10000. # number of intervals\n # change of variable E = state_energy + t**2\n dt = n.sqrt(energy_range)/n_int \n t = n.arange(0,n.sqrt(energy_range),dt) \n #plt.figure(1)\n #plt.plot(energy,deg * D * sqrt(averaged_eff_mass) * 1./n.sqrt(energy-state_energy),\"b\")\n #plt.plot(energy,MV_smearing(energy,beta_eV,e_fermi),\"r\")\n #plt.plot(energy,g_times_f,\"k\")\n #plt.title(\"%s\"%e_fermi)\n #plt.show()\n temp_dens = 2*deg * D * n.sqrt(averaged_eff_mass) * n.trapz(MV_smearing(state_energy+t**2,beta_eV,e_fermi),dx=dt) * (state**2 / square_norm)\n el_density += temp_dens\n contrib[j] += n.sum(temp_dens)\n \n # Up to now, el_density is in 1/ang; we want it in 1/cm\n if band_contribution == False:\n return el_density * 1.e8\n elif band_contribution == True and avg_eff_mass == False:\n return el_density * 1.e8, contrib * 1.e8\n else:\n return el_density * 1.e8, contrib * 1.e8, avg_mass", "title": "" }, { "docid": "d7e79ef7f472ef5229bcf7422b86fa4d", "score": "0.55435175", "text": "def create_clumpy_system(num_concepts=200,n_dim=2,n_epicentres =1,linearsep = 1,plot=False,seed = 456):\n \n #Settings\n np.random.seed(seed)\n epicentre_range=1 \n \n #Check number of epicentres allows 40 items per epicentre\n min_concepts_per_epicentres = 40\n max_epicentres = num_concepts // min_concepts_per_epicentres\n assert (n_epicentres <= max_epicentres),\"Max number of epicentres is %d for %d concpets\" %(max_epicentres,num_concepts)\n \n #Get sigma\n #sigma = 1/linearsep\n #sigma = 1/math.exp(linearsep)\n sigma = 1/linearsep**3\n \n # Create X (from create_n_systems)\n X_cov = np.zeros((n_dim, n_dim), dtype = np.float32)\n np.fill_diagonal(X_cov, sigma)\n \n means = np.random.uniform(\n -epicentre_range, epicentre_range, size = (n_epicentres, n_dim)\n )\n\n X = []\n for i in range(num_concepts):\n mean = i % n_epicentres\n value = np.random.multivariate_normal(\n mean=means[mean], cov=X_cov, size=1\n )\n X.append(value)\n X = np.array(X,dtype = np.float32)\n X = np.squeeze(X)\n \n if n_dim == 2 and plot == True:\n fig = plt.figure()\n plt.scatter(\n X[:,0], X[:,1]\n ) \n plt.title(\"Synthetic data\")\n plt.show()\n \n X = np.float32(X)\n return X", "title": "" }, { "docid": "8e36b33eb8457ecfd0736eaf7a6d81b2", "score": "0.55198014", "text": "def energy(img):\n raise NotImplementedError", "title": "" }, { "docid": "c39620ea042ec12d4efccdc900809a10", "score": "0.55122936", "text": "def exc(self,n,der=0):\n return self.e_x(n,der=der)+self.e_corr(n,der=der)", "title": "" }, { "docid": "c39620ea042ec12d4efccdc900809a10", "score": "0.55122936", "text": "def exc(self,n,der=0):\n return self.e_x(n,der=der)+self.e_corr(n,der=der)", "title": "" }, { "docid": "eeca89241e66882039db4d79a290c56a", "score": "0.5507301", "text": "def get_hole_density(v_states, e_fermi, v_mass_array, npoints, degen, smearing, beta_eV, band_contribution = False, avg_eff_mass = False):\n D = n.sqrt(2.) / n.pi / n.sqrt(HBAR2OVERM0)\n \n h_density = n.zeros(npoints) \n \n avg_mass = n.zeros((1,3))\n \n contrib = n.zeros(len(degen))\n for j in range(len(degen)):\n deg = degen[j]\n if j > 0 and avg_eff_mass == True:\n avg_mass = n.append(avg_mass,[[0.,0.,0.]],axis=0) # so that bands are separated by a line of zeros\n for state_energy, state in v_states[j]:\n energy_range = 20. # eV to be extra safe\n \n # Note that here the sign is opposite w.r.t. the conduction case\n #if state_energy < e_fermi:\n # continue\n square_norm = sum((state)**2)\n averaged_eff_mass = 1./(sum(state**2 / v_mass_array[j]) / square_norm)\n if avg_eff_mass == True:\n avg_mass = n.append(avg_mass,[[state_energy,state_energy-e_fermi,averaged_eff_mass]],axis=0)\n \n if not smearing and state_energy > e_fermi:\n h_density += deg * D * n.sqrt(averaged_eff_mass) * 2. * n.sqrt(state_energy - e_fermi) * (\n state**2 / square_norm)\n contrib[j] += n.sum(deg * D * n.sqrt(averaged_eff_mass) * 2. * n.sqrt(state_energy - e_fermi) * (\n state**2 / square_norm)) \n \n \n elif smearing and e_fermi-state_energy < energy_range:\n \n n_int = 10000. # number of intervals\n # change of variable E = state_energy + t**2\n dt = n.sqrt(energy_range)/n_int \n t = n.arange(0,n.sqrt(energy_range),dt) \n temp_dens = 2*deg * D * n.sqrt(averaged_eff_mass) * n.trapz(MV_smearing(2*e_fermi-state_energy+t**2,beta_eV,e_fermi),dx=dt) * (state**2 / square_norm)\n h_density += temp_dens\n contrib[j] += n.sum(temp_dens)\n \n # to keep a trace of old work\n \"\"\"\n n_int = 100000. # 500 intervals\n delta_E = energy_range/n_int \n energy = n.arange(state_energy-energy_range,state_energy,delta_E)\n energy -= 1.e-8 # to avoid dividing by zero\n g_times_f = deg * D * sqrt(averaged_eff_mass) * 1./n.sqrt(state_energy-energy) * MV_smearing(2*e_fermi-energy,beta_eV,e_fermi)\n #plt.figure(2)\n #plt.plot(energy,deg * D * sqrt(averaged_eff_mass) * 1./n.sqrt(state_energy-energy),\"b\")\n #plt.plot(energy,MV_smearing(2*e_fermi-energy,beta_eV,e_fermi),\"r\")\n #plt.plot(energy,g_times_f,\"k\")\n #plt.title(\"%s\"%e_fermi)\n #plt.show()\n h_density += n.trapz(g_times_f,dx=delta_E) * (state**2 / square_norm)\n contrib[j] += sum(n.trapz(g_times_f,dx=delta_E) * (state**2 / square_norm))\n \"\"\" \n if band_contribution == False:\n return h_density * 1.e8\n elif band_contribution == True and avg_eff_mass == False:\n return h_density * 1.e8, contrib * 1.e8\n else:\n return h_density * 1.e8, contrib * 1.e8, avg_mass", "title": "" }, { "docid": "0d11cc5570f55f3bad4e16547a84585f", "score": "0.55023724", "text": "def ecdf(data):\r\n\r\n # Number of data points: n\r\n n = len(data)\r\n\r\n # x-data for the ECDF: x\r\n x = np.sort(data)\r\n\r\n # y-data for the ECDF: y\r\n y = np.arange(1, len(x)+1) / n\r\n\r\n return x, y", "title": "" }, { "docid": "84d93d981d6cd013de6968fe30628c3f", "score": "0.5473807", "text": "def main():\n\n # Create a dummy spectrum\n spec = np.zeros(100)\n wav = np.arange(len(spec))\n\n # Superimpose an emission line on the spectrum\n g = models.Gaussian1D(amplitude=3, mean=30.0, stddev=10.0)\n gauss_1d_emission = g(wav)\n\n spec = spec + gauss_1d_emission\n\n \"\"\"\n fig = plt.figure()\n ax = fig.add_subplot(111)\n ax.plot(wav, spec)\n plt.show()\n \"\"\"\n\n # Now assign some width to the dummy galaxy\n # in pixels\n # say 1-sigma if it is shaped like a Gaussian\n # will deal with Sersic and other complicated profiles later\n galaxy_width = 10\n num_sigma = 2 # e.g., going from -2 to +2 sigma of galaxy width\n\n # Assume that each time the spectrum shifts \n # by some amount epsilon. I'm leaving this param\n # free for now.\n # in units of wavelength\n # this is saying that if a given pixel has the center of\n # the emission line at lambda then the adjacent pixel along\n # the dispersion direction will have the line center at\n # lambda + epsilon\n epsilon = 1.0\n # BE CAREFUL!! This currently means epsilon steps of wavelength\n # so that if the wavelenght is sampled at 1 A then epsilon is\n # 30 A but if the wavelength array is sampled at 10 A then epsilon\n # means 300 A.\n\n print(\"Width of galaxy provided:\", galaxy_width)\n print(\"Will consider between +- these many sigmas:\", num_sigma)\n print(\"Epsilon:\", epsilon)\n\n exten = int(epsilon * num_sigma * galaxy_width)\n print(\"Extension:\", exten)\n total_pix = num_sigma * galaxy_width\n print(\"Total pixels to consider:\", total_pix)\n\n final_spec = np.zeros(shape=(total_pix, (len(spec) + exten)))\n print(\"Shape of final spectra array:\", final_spec.shape)\n print(\"\\n\")\n\n fig = plt.figure()\n ax = fig.add_subplot(111)\n\n for pix in range(total_pix):\n\n # Create shifted spectrum\n i = int(epsilon*(1+pix)) # starting index for shift\n shifted_spec = np.zeros(len(spec) + i)\n shifted_spec[i:] = spec\n\n print(\"\\nAt pixel:\", pix+1)\n print(\"Shifting to index:\", i)\n print(\"len of shifted spec:\", len(shifted_spec))\n\n # Now fold it into the final spectrum\n final_spec[pix, :len(shifted_spec)] = shifted_spec\n\n ax.plot(shifted_spec)\n\n final_spec_comb = np.sum(final_spec, axis=0)\n print(\"Shape of final LSF broadened spectrum:\", final_spec_comb.shape)\n\n ax.plot(final_spec_comb, color='k')\n plt.show()\n\n # -----------\n fig1 = plt.figure()\n ax1 = fig1.add_subplot(111)\n ax1.imshow(final_spec)\n plt.show()\n\n return None", "title": "" }, { "docid": "e9a7d39f1f79bb0d7d90532104f5a8df", "score": "0.54614717", "text": "def spectralEmissionDensity(self):\n return self.sed", "title": "" }, { "docid": "ad29da81997d4ea6f22fb296cbb4f715", "score": "0.5445146", "text": "def erosion(self):\n \n self.Ke = 0.2e-6 / self.tau_crit**(0.5)\n shear_stress = self.rho_w * self.u_star**2\n \n# self.domain.set_quantity('shear_stress', shear_stress, location='centroids')\n \n \n edot = self.Ke * (shear_stress[self.ind] - self.tau_crit)\n edot[edot<0.0] = 0.0\n \n return edot", "title": "" }, { "docid": "8a8f63223cb6f4c076dc3aa6faebd4df", "score": "0.5443789", "text": "def __energy(self, a, b, c, d):\n return (a + b - c) / (2. * d)", "title": "" }, { "docid": "6b7eb9050f7193e88db86f1e325e7ed1", "score": "0.54365504", "text": "def calc_energy(img):\n img = img.astype('float32')\n\n R = img[:,:,0]\n G = img[:,:,1]\n B = img[:,:,2]\n\n filter_du = np.array([\n [1.0, 2.0, 1.0],\n [0.0, 0.0, 0.0],\n [-1.0, -2.0, -1.0],\n ])\n \n filter_dv = np.array([\n [1.0, 0.0, -1.0],\n [2.0, 0.0, -2.0],\n [1.0, 0.0, -1.0],\n ])\n\n # Sobel filter\n Gx = np.array([[1, 0, -1], [2, 0, -2], [1, 0, -1]])\n Gy = np.array([[1, 2, 1], [0, 0, 0], [-1, -2, -1]])\n convolved_r = np.absolute(convolve(R, filter_dv, mode='constant', cval=0)) + np.absolute(convolve(R, filter_du, mode='constant', cval=0))\n convolved_g = np.absolute(convolve(G, filter_dv, mode='constant', cval=0)) + np.absolute(convolve(G, filter_du, mode='constant', cval=0))\n convolved_b = np.absolute(convolve(B, filter_dv, mode='constant', cval=0)) + np.absolute(convolve(B, filter_du, mode='constant', cval=0))\n\n energy_map = convolved_r + convolved_g + convolved_b\n\n return energy_map", "title": "" }, { "docid": "380ee9ca3c71ab499948c539daff84a9", "score": "0.5423679", "text": "def energy(self, data):\n return -be.dot(data, self.params.loc)", "title": "" }, { "docid": "380ee9ca3c71ab499948c539daff84a9", "score": "0.5423679", "text": "def energy(self, data):\n return -be.dot(data, self.params.loc)", "title": "" }, { "docid": "e092de3303767b748c8417ca5f2d978e", "score": "0.53795356", "text": "def energy_grad(self, x):\n return self.M*x - self.y", "title": "" }, { "docid": "1b603cb20b69739a2d07596769a64d53", "score": "0.5379365", "text": "def generateDensities(self):\n densities = [] # storing them in a list\n for i in range(len(self.road)+1): \n densities.append(i/len(self.road))\n return densities", "title": "" }, { "docid": "2fea24198f907b64540abc4de65b5c1b", "score": "0.5378305", "text": "def KDE(data_array, bandwidth=None):\n data = data_array\n d_range = data.max()-data.min()\n n = len(data)\n x_axis = np.linspace(data.min()-0.4*d_range,data.max()+0.4*d_range,100)\n\n sigma = np.std(data)\n if bandwidth is None:\n bandwidth = ((4*(sigma**5))/(3*n))**(0.2) # std dev of kernel distributions\n \n kernel_list = []\n for point in data:\n # normal distribution at each point\n kernel = stats.norm(point,bandwidth).pdf(x_axis)\n kernel_list.append(kernel)\n \n #Scale for plotting\n kernel = kernel / kernel.max()\n kernel = kernel *0.25\n plt.plot(x_axis,kernel,color = 'grey',alpha=0.5)\n #plt.ylim(0,0.6)\n\n sum_of_kernels = np.sum(kernel_list,axis=0)*(1/n) # total kernels\n \n plt.plot(x_axis,sum_of_kernels,color='indianred')\n sns.rugplot(data,c='indianred')\n plt.title('sum of the basis functions')", "title": "" }, { "docid": "1f7da8a7947feb5ec731fb7dfb42ebd3", "score": "0.53734314", "text": "def ecdf(data):\r\n\r\n # Number of data points: n\r\n n = len(data)\r\n\r\n # x-data for the ECDF: x\r\n x = np.sort(data)\r\n\r\n # y-data for the ECDF: y\r\n y = np.arange(1, n+1) / n\r\n\r\n return x, y", "title": "" }, { "docid": "ba649f537af85785059609138888b0cc", "score": "0.53654814", "text": "def _compute_se(self, data, centroids, assigned, n_samples):\r\n se = 0\r\n for idx in range(n_samples):\r\n se += np.sqrt((data[idx, :]-centroids[assigned[idx], :])**2).sum()\r\n return se/n_samples", "title": "" }, { "docid": "2750e15839593227a497b1c19c7b61a2", "score": "0.53645927", "text": "def discretize(self, delta_t: float) -> None:", "title": "" }, { "docid": "f9e225ff8a376e42f812254e6ef2c8d0", "score": "0.53623915", "text": "def ecdf(data):\n # Number of data points: n\n n = len(data)\n # x-data for the ECDF: x\n x = np.sort(data)\n # y-data for the ECDF: y\n y = np.arange(1, n + 1) / n\n return x, y", "title": "" }, { "docid": "04620f2c03234d57938efe0bd8bcf893", "score": "0.53555024", "text": "def second_eccentricity(self):\n return self.e1", "title": "" }, { "docid": "3ac782dc9fdf8aa36c91cdc654d1d019", "score": "0.5352694", "text": "def calculate_energy(self):\n raise NotImplementedError", "title": "" }, { "docid": "960a254622eef1e11eddbadbbfe0704a", "score": "0.53485847", "text": "def det_hessian(self, img, box_size):\n #dxy = convolve(img, box_2nd_order('xy',box_size), mode='constant')\n #dxx = convolve(img, box_2nd_order('xx',box_size), mode='constant')\n #dyy = convolve(img, box_2nd_order('yy',box_size), mode='constant')\n dxx = self.box_xx(img, box_size)\n dxy = self.box_xy(img, box_size)\n dyy = self.box_yy(img, box_size)\n return dxx*dyy - (HESSIAN_WEIGHTS[box_size]*dxy)**2", "title": "" }, { "docid": "46e90810b92bcb66f29cd928af9c0d50", "score": "0.5347214", "text": "def divergence(arr: np.ndarray, out: np.ndarray) -> None:\n # inner radial boundary condition\n for i in range(1, dim_r + 1): # iterate radial points\n out[i - 1] = (arr[0, i + 1] - arr[0, i - 1]) * scale_r\n out[i - 1] += arr[0, i] / rs[i - 1]", "title": "" }, { "docid": "0bd0fff88ff42c813cf0dca1d0a4bec8", "score": "0.53469586", "text": "def ecdf(data):\n plt.plot(np.sort(data), np.linspace(0,1,len(data)))", "title": "" }, { "docid": "0c273fb870eef966fc8be27c1d4abdb4", "score": "0.53371334", "text": "def emissivity(self, density: u.cm**(-3), **kwargs) -> u.erg * u.cm**(-3) / u.s:\n density = np.atleast_1d(density)\n g = self.contribution_function(density, **kwargs)\n return g * (density**2)[np.newaxis, :, np.newaxis]", "title": "" }, { "docid": "36c6f9698fb1cd83cda646057f153fd5", "score": "0.53329754", "text": "def electron_line_average_density(self):\n elad = 1.45e+19\n\n return elad", "title": "" }, { "docid": "8e995538d93d32413f8a1638a2093adc", "score": "0.53268284", "text": "def calc_sse(self,experimento):\n pass", "title": "" }, { "docid": "1a088eefb743672096664d650b19c35f", "score": "0.53135514", "text": "def divergence(f):\n\tnum_dims = len(f)\n\tprint(f)\n\treturn np.ufunc.reduce(np.add, [np.gradient(f[i], axis=i) for i in range(num_dims)])", "title": "" }, { "docid": "97b947491374e59e642db2be97bf5109", "score": "0.53130823", "text": "def ecdf(data):\n # Number of data points: n\n n = len(data)\n\n # x-data for the ECDF: x\n x = np.sort(data)\n\n # y-data for the ECDF: y\n y = np.arange(1, n + 1) / n\n\n return x, y", "title": "" }, { "docid": "47af79ff39fb7e13328f6fac70898746", "score": "0.5308772", "text": "def ecdf(data):\n # Number of data points: n\n n = len(data)\n\n # x-data for the ECDF: x\n x = np.sort(data)\n\n # y-data for the ECDF: y\n y = np.arange(1, n+1) / n\n\n return x, y", "title": "" }, { "docid": "47af79ff39fb7e13328f6fac70898746", "score": "0.5308772", "text": "def ecdf(data):\n # Number of data points: n\n n = len(data)\n\n # x-data for the ECDF: x\n x = np.sort(data)\n\n # y-data for the ECDF: y\n y = np.arange(1, n+1) / n\n\n return x, y", "title": "" }, { "docid": "47af79ff39fb7e13328f6fac70898746", "score": "0.5308772", "text": "def ecdf(data):\n # Number of data points: n\n n = len(data)\n\n # x-data for the ECDF: x\n x = np.sort(data)\n\n # y-data for the ECDF: y\n y = np.arange(1, n+1) / n\n\n return x, y", "title": "" }, { "docid": "4881e6ac96182b4cbcc2bd770a1d49b5", "score": "0.5300936", "text": "def sd(alpha=0.0002):\n print \"STEEPEST DESCENT: start\"\n # gradient\n g = [diff(obj, i) for i in m]\n # Initialize xs\n xs = [[0.0, 0.0]]\n xs[0] = x_start\n # Get gradient at start location (df/dx or grad(f))\n iter_s = 0\n while np.linalg.norm(xs[-1] - x_result) > target_precision:\n # print \"STEEPEST DESCENT: distance:\", np.linalg.norm(xs[-1] - x_result)\n gs = dfdx(xs[iter_s], g)\n # Compute search direction and magnitude (dx)\n # with dx = - grad but no line searching\n xs.append(xs[iter_s] - np.dot(alpha, gs))\n # print xs[-1]\n iter_s += 1\n if iter_s > 10000:\n break\n print \"STEEPEST DESCENT: result distance:\", np.linalg.norm(xs[-1] - x_result)\n xs = np.array(xs)\n plt.plot(xs[:, 0], xs[:, 1], 'g-o')", "title": "" }, { "docid": "0143a2f4548ffdc607e38241b0b1edd5", "score": "0.5298317", "text": "def _compute_energy(self):\n raise NotImplementedError", "title": "" }, { "docid": "efa1001944739b8f1b7356f6824798c0", "score": "0.52960914", "text": "def propagator(self):\r\n\t\th, m = self.hbar, self.m\r\n\t\tdt,dx = self.dt,self.dx\r\n\t\tnx,nt = self.nx, self.nt\r\n\t\tw = self.w\r\n\t\t\r\n\t\tS = np.zeros(shape = (nx,nx), dtype = self.dtype )\r\n\t\tdS = np.zeros(shape = (nx,nx), dtype = self.dtype )\r\n\r\n\t\tx = self.x\r\n\r\n\t\tc0 = (m * dt /(2* h))\r\n\t\tc1 = c0/(dt*dt)\r\n\t\tc2 = c0*w*w*.25\r\n\r\n\t\tl = len(x)\r\n\r\n\t\tfor ii in range(l):\r\n\t\t\tfor jj in range(ii,l):\r\n\t\t\t\ts = c1*(x[jj]-x[ii])**2 + c2*(x[jj]+x[ii])**2\r\n\t\t\t\tS[ii,jj] = s\r\n\t\t\t\tS[jj,ii] = s\r\n\r\n\t\tfor ii in range(l):\r\n\t\t\tfor jj in range(l):\r\n\t\t\t\tds = (S[ii,ii] - S[ii,jj])\r\n\t\t\t\tdS[ii,jj] = ds\r\n\r\n\t\treturn np.exp(dS)", "title": "" }, { "docid": "52cb260a13b3756b2db709ab3968ccc4", "score": "0.5291767", "text": "def gkern(size=5, sigma=1.0):\n ax = np.arange(-size // 2 + 1.0, size // 2 + 1.0)\n xx, yy = np.meshgrid(ax, ax)\n kernel = np.exp(-(xx**2 + yy**2) / (2.0 * sigma**2))\n return kernel / np.sum(kernel)", "title": "" }, { "docid": "b56cb682c40ec19c81282ec2fc83cfaa", "score": "0.5291205", "text": "def ec_disc(ainvs):\n a1, a2, a3, a4, a6 = ainvs\n b2 = a1*a1 + 4*a2\n b4 = a3*a1 + 2*a4\n b6 = a3*a3 + 4*a6\n c4 = b2*b2 - 24*b4\n c6 = -b2*b2*b2 + 36*b2*b4 - 216*b6\n return (c4*c4*c4 - c6*c6) / 1728", "title": "" }, { "docid": "1c85dae29d30fe5ce67a22950816bde7", "score": "0.5290065", "text": "def K(self, x1, x2=0):\n diffs = (x1 - x2).reshape(self.D, 1) # Vector of differences\n #diffs = np.array([0] * self.D).reshape(self.D, 1)\n return e**(-.5 * (diffs.transpose() @ self.covMI @ diffs)[0][0]) / self.denom\n #return density", "title": "" }, { "docid": "fd0da4be2f26428b2ed9c8672f259544", "score": "0.5288685", "text": "def electronDensity(self, separation):\n return self.electronDensityFunction(separation)", "title": "" }, { "docid": "c3895f1ba9e89b0ab674f05f915cf232", "score": "0.5278172", "text": "def ecdf(data):\n data.dropna(inplace=True)\n\n # Number of data points: n\n n = len(data)\n\n # x-data for the ECDF: x\n x = np.sort(data)\n\n # y-data for the ECDF: y\n y = np.arange(1, n+1) / n\n\n return x, y", "title": "" }, { "docid": "434076a188c42a3d7fba7c559c97e3e3", "score": "0.5267953", "text": "def energy(self, data):\n return be.dot(data, self.params.loc)", "title": "" }, { "docid": "30ead6dd4621f1ed2bd8b9f7c6a87a8c", "score": "0.52505195", "text": "def FE_1D(self, xsamples, wsamples, nbins=100):\n # Take the projection.\n xproj = self.xproj(xsamples)\n # Procedure for calculating the free energy\n counts, bins = np.histogram(xproj, bins=nbins, weights=wsamples)\n p = counts / np.sum(counts) # the approximation of probability, px(x)\n centers = bins[:-1] + 0.5*(bins[1]-bins[0])\n FE = -np.log(p)\n E0 = np.min(FE)\n FE -= E0\n self.cutaway(zero=E0)\n plt.scatter(centers,FE, c='g')\n plt.ylabel(r'$\\Delta F$')\n plt.xlabel(r'$x_{1}$')", "title": "" }, { "docid": "4157735a17f42821be441227ac549f3b", "score": "0.52500194", "text": "def dpsfdxc(i0,i1,xc,yc,sigma):\n a=1/(np.sqrt(2)*sigma)\n return -a*0.25*2/np.sqrt(np.pi)*(np.exp(-(a*(i1+0.5-xc))**2)-np.exp(-(a*(i1-0.5-xc))**2))*(erf(a*(i0+0.5-yc))-erf(a*(i0-0.5-yc)))", "title": "" }, { "docid": "66a89674908fc712594c1572d2ba838c", "score": "0.52474517", "text": "def _gauss_kernel(self, kernel_size, nsig):\n\n x = np.linspace(-nsig, nsig, kernel_size + 1)\n kern1d = np.diff(st.norm.cdf(x))\n kern2d = np.outer(kern1d, kern1d)\n return kern2d / kern2d.sum()", "title": "" }, { "docid": "053840603f16657c58a56e2fbab6b87f", "score": "0.52409095", "text": "def sse_calculation(self):\n sse = 0\n for row in self.data:\n sse += (row[0]-self.centroid[0])**2 + (row[1]-self.centroid[1])**2 + (row[2]-self.centroid[2])**2\n return sse", "title": "" }, { "docid": "21d1749a7bd937f9ad5487847741f6b3", "score": "0.5237992", "text": "def se(self):\n \n sd = self.stdev()\n n = self.n()\n \n return sd / np.sqrt(n)", "title": "" }, { "docid": "e7ab95afbc7faf0e56a7322f81dffc49", "score": "0.52239615", "text": "def __init_coefs(self, g):\n df = {}\n df[0] = g[5]\n df[1] = g[6]\n df[2] = g[9]\n df[3] = g[10]\n\n # Derivatives in the East direction and the North direction\n df[4] = (-g[7] + 4 * g[6] - 3 * g[5]) / 2\n df[5] = (3 * g[6] - 4 * g[5] + g[4]) / 2\n df[6] = (-g[11] + 4 * g[10] - 3 * g[9]) / 2\n df[7] = (3 * g[10] - 4 * g[9] + g[8]) / 2\n df[8] = (-g[13] + 4 * g[9] - 3 * g[5]) / 2\n df[9] = (-g[14] + 4 * g[10] - 3 * g[6]) / 2\n df[10] = (3 * g[9] - 4 * g[5] + g[1]) / 2\n df[11] = (3 * g[10] - 4 * g[6] + g[2]) / 2\n\n # Equations for the cross derivative\n df[12] = ((g[0] + g[10]) - (g[2] + g[8])) / 4\n df[13] = ((g[1] + g[11]) - (g[3] + g[9])) / 4\n df[14] = ((g[4] + g[14]) - (g[6] + g[12])) / 4\n df[15] = ((g[5] + g[15]) - (g[7] + g[13])) / 4\n\n self.__a[0] = df[0]\n self.__a[1] = df[4]\n self.__a[2] = -3 * df[0] + 3 * df[1] - 2 * df[4] - df[5]\n self.__a[3] = 2 * df[0] - 2 * df[1] + df[4] + df[5]\n self.__a[4] = df[8]\n self.__a[5] = df[12]\n self.__a[6] = -3 * df[8] + 3 * df[9] - 2 * df[12] - df[13]\n self.__a[7] = 2 * df[8] - 2 * df[9] + df[12] + df[13]\n self.__a[8] = -3 * df[0] + 3 * df[2] - 2 * df[8] - df[10]\n self.__a[9] = -3 * df[4] + 3 * df[6] - 2 * df[12] - df[14]\n self.__a[10] = (9 * df[0] - 9 * df[1] - 9 * df[2] + 9 * df[3] + 6 * df[4] + 3\n * df[5] - 6 * df[6] - 3 * df[7] + 6 * df[8] - 6 * df[9] + 3\n * df[10] - 3 * df[11] + 4 * df[12] + 2 * df[13] + 2 * df[14]\n + df[15])\n self.__a[11] = (-6 * df[0] + 6 * df[1] + 6 * df[2] - 6 * df[3] - 3 * df[4] - 3\n * df[5] + 3 * df[6] + 3 * df[7] - 4 * df[8] + 4 * df[9] - 2\n * df[10] + 2 * df[11] - 2 * df[12] - 2 * df[13] - df[14]\n - df[15])\n self.__a[12] = 2 * df[0] - 2 * df[2] + df[8] + df[10]\n self.__a[13] = 2 * df[4] - 2 * df[6] + df[12] + df[14]\n self.__a[14] = (-6 * df[0] + 6 * df[1] + 6 * df[2] - 6 * df[3] - 4 * df[4] - 2\n * df[5] + 4 * df[6] + 2 * df[7] - 3 * df[8] + 3 * df[9] - 3\n * df[10] + 3 * df[11] - 2 * df[12] - df[13] - 2 * df[14]\n - df[15])\n self.__a[15] = (4 * df[0] - 4 * df[1] - 4 * df[2] + 4 * df[3] + 2 * df[4] + 2\n * df[5] - 2 * df[6] - 2 * df[7] + 2 * df[8] - 2 * df[9] + 2\n * df[10] - 2 * df[11] + df[12] + df[13] + df[14] + df[15])", "title": "" }, { "docid": "ca3065d3d91e7ea7d44cafab0eae7207", "score": "0.52211946", "text": "def s(self):\n xe = self._xe\n nobs = xe.shape[0]\n bw = self.bandwidth\n kernel = self._kernel\n kernel = KERNEL_LOOKUP[kernel]\n weights = kernel(bw, nobs - 1)\n out = _cov_kernel(xe, weights)\n\n return (out + out.T) / 2", "title": "" }, { "docid": "35d4cc491b2e48738c2b48db33d27387", "score": "0.522105", "text": "def Energy():\n pot = 0\n kin = 0\n for j in range(ParticleAmount):\n pot1 = 0\n kin += (0.5) * (sum([x * x for x in Particles[j, 1]]))\n for k in range(ParticleAmount):\n if k != j:\n r = DistancePoints(Particles[j, 0], Particles[k, 0])\n pot1 += 4 * ((1 / r) ** 12 - (1 / r) ** 6)\n pot += pot1 / 2 # *0.5 to prevent double counting, can rewrite the range too to save time\n Epot.append(pot)\n Ekin.append(kin)\n Etot.append(pot + kin)", "title": "" }, { "docid": "59ab17228bf4ee1b2a2cf26ccdf82ae5", "score": "0.52159405", "text": "def CalGen(self):\n # Energy dissipation mW/cm3-nm at each position and wavelength\n # (JAP Vol 86 p.487 Eq 22)\n if self.AbsRate is None:\n self.CalAbs()\n Q = self.AbsRate * self.AM15\n self.Gx = Q * 1e-12 / (h * c) * self.WL\n\n Gx_x = [np.sum(self.Gx[self.x_ind[i-1]:self.x_ind[i]])\n for i in range(1, len(self.layers))]\n self.Jsc = np.array(Gx_x) * self.WLstep * self.posstep * q * 1e-4\n\n return None", "title": "" }, { "docid": "fb262161e2b6720b53dda0580da6603e", "score": "0.5213868", "text": "def lasso_g(datax,datay,w,alpha):\n return mse_g(datax,datay,w) + alpha * np.sign(w).reshape((-1,1))", "title": "" }, { "docid": "dcb3e7c2bb7c19c7c709ee63678686ba", "score": "0.5213698", "text": "def get_energy(self):\n w = 0\n for i in range(3):\n for j in range(3):\n w = w + self.epsilon[i,j] * self.sigma[i,j]\n self.w = w", "title": "" }, { "docid": "dcb3e7c2bb7c19c7c709ee63678686ba", "score": "0.5213698", "text": "def get_energy(self):\n w = 0\n for i in range(3):\n for j in range(3):\n w = w + self.epsilon[i,j] * self.sigma[i,j]\n self.w = w", "title": "" }, { "docid": "6519c6a46cf4e1e9dcff86d413f8089a", "score": "0.520436", "text": "def ggprimeInScatter(g, l):\n return np.sum(skernel[l, g, :] * evalVecLegFlux(cell, l))", "title": "" }, { "docid": "890684aeb4dec4d6d600488d444854b4", "score": "0.5201224", "text": "def Recompute_densities(self, Over_IDs, box_exp_multiplier):\n\t\tSlicedParts, SlicedIDs, SliceRanges = OF.Get_particle_box_slices(self.ParticlePos)\n\t\tNf_Distances = []\n\t\tNf_Segids = []\n\t\tNf_Tsolutions = []\n\t\tNf_ParticleBoxes = []\n\t\tNf_Masked_IDs = []\n\t\tfor number in Over_IDs:\n\t\t\tDist_new, SegID_new, Tsols_new, Pbox_new, MaskIDs_new = self.Mask_and_compute_distances_again(self.Filament_3DPos[self.Small_filaments][number],\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tbox_exp_multiplier*6, SlicedParts, SlicedIDs, SliceRanges)\n\t\t\tNf_Distances.append(Dist_new)\n\t\t\tNf_Segids.append(SegID_new)\n\t\t\tNf_Tsolutions.append(Tsols_new)\n\t\t\tNf_ParticleBoxes.append(Pbox_new)\n\t\t\tNf_Masked_IDs.append(MaskIDs_new)\n\t\tNf_Distances = np.array(Nf_Distances)\n\t\tNf_Segids = np.array(Nf_Segids)\n\t\tNf_Tsolutions = np.array(Nf_Tsolutions)\n\t\tNf_ParticleBoxes = np.array(Nf_ParticleBoxes)\n\t\tNf_Masked_IDs = np.array(Nf_Masked_IDs)\n\n\t\tIncluded_fils, Filtered_fils, OverThreshold = self.Filter_filament_density_threshold(self.Filament_3DPos[self.Small_filaments][Over_IDs], Nf_Distances,\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t self.FilamentLength[self.Small_filaments][Over_IDs])\n\t\tReturn_included = Over_IDs[Included_fils] if Included_fils.any() else np.array([])\n\t\tReturn_excluded = Over_IDs[Filtered_fils] if Filtered_fils.any() else np.array([])\n\t\tReturn_overIDs = Over_IDs[OverThreshold] if OverThreshold.any() else np.array([])\n\t\treturn_dist = Nf_Distances[Included_fils] if Included_fils.any() else Nf_Distances\n\t\treturn_masks = Nf_Masked_IDs[Included_fils] if Included_fils.any() else Nf_Masked_IDs\n\t\treturn_segids = Nf_Segids[Included_fils] if Included_fils.any() else Nf_Segids\n\t\treturn Return_included, Return_excluded, Return_overIDs, return_dist, return_masks, return_segids", "title": "" }, { "docid": "a4aa423a2c7b7f9a592905d8f14245f1", "score": "0.51976824", "text": "def energies(self):\n return self._pha[self._filter_idx]", "title": "" }, { "docid": "84630f7b4385c7233eb8f8a0633001f7", "score": "0.5187491", "text": "def meas(self):\n meas = DubinsCarMeas(self.x, self.beacon_x, self.beacon_y)\n meas += self.meas_noise_coef @ np.random.standard_normal(self.meas_dim)\n return meas", "title": "" }, { "docid": "f69a962661317491bb66b9e06eb07ae4", "score": "0.5186381", "text": "def divergence(u):\n return reduce(np.add,[np.gradient(u[i])[i] for i in range(len(u))])", "title": "" }, { "docid": "30c03db0655c2dd521b33d248044b811", "score": "0.51840013", "text": "def elastic_spectrum(self,periods,acc,dt,damping):\n SD = []\n SV = []\n SA = []\n stiffness = []\n omega = []\n \n m = 1.0\n n = len(acc)\n for pp in range(len(periods)):\n # - Define aid variables \n if periods[pp] == 0.0:\n p = 5e-2\n else: \n p = periods[pp]\n \n w = 2*pi/p\n omega.append(w)\n \n k = m*(w**2)\n stiffness.append(k)\n c = 2*damping*(k*m)**0.5\n \n # - Initial conditions for Central differences\n u = []\n u0 = 0.0\n a0 = -1*acc[0]\n u1 = u0 + 0.5*a0*dt**2\n u.append(u0)\n u.append(u1)\n \n # - Run central differences\n for i in range(1,n-1):\n u_next = self.disp_next(u[i],u[i-1],acc[i],k,c,m,dt)\n u.append(u_next)\n \n \n sd = max(max(u),abs(min(u)))\n SD.append(sd)\n \n SD = np.array(SD)\n omega = np.array(omega)\n stiffness = np.array(stiffness)\n SV = omega*SD\n SA = SD*omega**2\n F_elastic = stiffness*SD\n return SA,SV,SD,F_elastic", "title": "" }, { "docid": "4b9f0ece68460b9bcfa435619e9270c3", "score": "0.51769805", "text": "def bond_energy_np(self, x):\n d2 = (x[:, 0]-x[:, 2])**2 + (x[:, 1]-x[:, 3])**2\n d4 = d2**2\n d = np.sqrt(d2) # Here is the thorn\n return 1/4*self.a*d4 - 1/2*self.b*d2 + self.c*d", "title": "" }, { "docid": "3fa481a61899674c89ee566d82f8c81a", "score": "0.5175087", "text": "def ecdf(data):\n x = np.sort(data)\n y = np.arange(1, 1+len(x)) /len(x)\n return x, y", "title": "" }, { "docid": "493b564c5ea04ca341ab608cfcbe063d", "score": "0.51748246", "text": "def test_discrete5():\n geo = psci.geometry.Rectangular(space_origin=(0.0, -1.0), space_extent=(2.0, 1.0))\n pdes = psci.pde.NavierStokes(0.01, 1.0)\n pdes, geo = psci.discretize(pdes, geo, space_nsteps=(4, 3))\n bc = geo.get_bc_index()\n sd = geo.get_space_domain()\n\n sd_t, bc_t = geo_discrete([(0.0, -1.0), (2.0, 1.0)], (4, 3))\n assert np.allclose(bc, bc_t)\n assert np.allclose(sd, sd_t)", "title": "" }, { "docid": "8287b48312fc13ece1193c6e5a88ba11", "score": "0.51728", "text": "def S(x):\n \n # return D/2 * (1-diam_narrow*(1+np.cos(2*np.pi*(x-x0)/L)))\n # L = 2*diam_steno_vessel\n return diam_steno_vessel/2 - diam_narrow/2*(1+np.cos(2*np.pi*(x)/length_steno))", "title": "" }, { "docid": "9df29893afcbdf33c220643090ad8ca2", "score": "0.5167013", "text": "def oldvals():\n #plt.figure(0)\n #plt.scatter([ant1_px,ant2_px,ant3_px,ant4_px],[ant1_py,ant2_py,ant3_py,ant4_py])\n #plt.scatter(0.0,0.0)\n #plt.scatter(SPICE_x_guess,SPICE_y_guess)\n #plt.show()\n\n #print('try sample f:')\n #init_chi2 = f(ant1_pxb,ant1_pyb,ant1_pzb,ant1_pxt,ant1_pyt,ant1_pzt, ant2_px, ant2_py, ant2_pzb, ant2_pzt,ant3_px,ant3_py,ant3_pzb,ant3_pzt,ant4_px,ant4_py,ant4_pzb,ant4_pzt,cal_pz,SPICE_x_guess,SPICE_y_guess,c0p,c1p,c8p,c9p,c16p,c17p,c24p,c25p)\n #print('Inital chi2 is:', init_chi2)", "title": "" }, { "docid": "bf3e630a0bdb88bf646fc8b5f9a88688", "score": "0.5164112", "text": "def evaluate_density(self, X):\n pass", "title": "" }, { "docid": "d87294af9811a381b012f8c5adaafb2c", "score": "0.51547587", "text": "def kernel_density(x, kde):\r\n return kde.score_samples(np.array([[x]]))[0]", "title": "" }, { "docid": "a2c40ac41dcc9dedd1dd3e6a55cfa10e", "score": "0.5148907", "text": "def Eccentricity(self, *args):\n return _gp.gp_Hypr2d_Eccentricity(self, *args)", "title": "" }, { "docid": "87d70ddf2e62b8fa9e9d86ae54f4f0d2", "score": "0.5140569", "text": "def _demo_sweepD():\n abf = pyabf.ABF(PATH_DATA+\"/17o05026_vc_stim.abf\")\n epochTable = EpochTable(abf, 0) # channel 0\n sweepWaveform = epochTable.epochWaveformsBySweep[0] # sweep 0\n sweepD = sweepWaveform.getDigitalWaveform(4) # digital output 4\n print(\"SweepD\", sweepD)\n plt.plot(sweepD)\n plt.show()", "title": "" }, { "docid": "5347d112ed9e6b4b51bb08e9f1553e2b", "score": "0.5140415", "text": "def delta_put(self):\r\n return (-norm.cdf(-self.d1))", "title": "" }, { "docid": "1a96741ab555b8daf9b20c245f0715d7", "score": "0.5138724", "text": "def discretize(sample, grid):\n # TODO: Implement this\n pass", "title": "" }, { "docid": "338b207987d5331e08bf473f0f018275", "score": "0.5133485", "text": "def compute_edges_dxdy(I):\n I = I.astype(np.float32)/255.\n \"\"\" Derivative Gaussian Filter \"\"\"\n # truncate calculation: t = (((w - 1)/2)-0.5)/s\n # using 5x5 filter here\n w = 5\n sigma = 10\n t = (((w - 1)/2)-0.5)/sigma\n dx = ndi.gaussian_filter(I,sigma,order=[1,0],truncate=t) # x Derivative\n dy = ndi.gaussian_filter(I,sigma,order=[0,1],truncate=t) # y Derivative\n\n # # \"\"\" original filter \"\"\"\n # dx = signal.convolve2d(I, np.array([[-1, 0, 1]]), mode='same', boundary='symm')\n # dy = signal.convolve2d(I, np.array([[-1, 0, 1]]).T, mode='same', boundary='symm')\n # dx = signal.convolve2d(I, np.array([[-1, 0, 1]]), mode='same')\n # dy = signal.convolve2d(I, np.array([[-1, 0, 1]]).T, mode='same')\n mag = np.sqrt(dx**2 + dy**2)\n mag = mag / np.max(mag)\n theta = np.arctan2(dy,dx)\n theta[theta < 0] += math.pi\n theta = theta*180/math.pi\n\n \"\"\" Non-maximum Suppression \"\"\"\n threshold = 0\n # NMS = np.copy(mag)\n for y in range(1, mag.shape[0]-1):\n for x in range(1, mag.shape[1]-1):\n if mag[y][x] > threshold:\n angle = theta[y][x]\n if (0 <= angle < 45):\n w = abs(dy[y][x])/abs(dx[y][x])\n p = w * mag[y-1][x-1] + (1-w) * mag[y][x-1]\n r = w * mag[y+1][x+1] + (1-w) * mag[y][x+1]\n\n elif (45 <= angle <= 90):\n w = abs(dx[y][x])/abs(dy[y][x])\n p = w * mag[y-1][x-1] + (1-w) * mag[y-1][x]\n r = w * mag[y+1][x+1] + (1-w) * mag[y+1][x]\n\n elif (90 < angle < 135):\n w = abs(dx[y][x])/abs(dy[y][x])\n p = w * mag[y-1][x+1] + (1-w) * mag[y-1][x]\n r = w * mag[y+1][x-1] + (1-w) * mag[y+1][x]\n\n elif (135 <= angle <= 180):\n w = abs(dy[y][x])/abs(dx[y][x])\n p = w * mag[y-1][x+1] + (1-w) * mag[y][x+1]\n r = w * mag[y+1][x-1] + (1-w) * mag[y][x-1]\n if mag[y][x] >= p and mag[y][x] >= r:\n # NMS[y][x] = mag[y][x]\n continue\n else:\n mag[y][x] = 0\n # NMS[y][x] = 0\n # # # mag = NMS\n mag = mag * 255.\n mag = np.clip(mag, 0, 255)\n mag = mag.astype(np.uint8)\n return mag", "title": "" }, { "docid": "a02baba939fff0c72335b727a4479e73", "score": "0.5133", "text": "def cdf(self):\n _checkisfit(self)\n kern = self.kernel\n if kern.domain is None: # TODO: test for grid point at domain bound\n a, b = -np.inf, np.inf\n else:\n a, b = kern.domain\n\n def func(x, s):\n return np.squeeze(kern.density(s, x))\n\n support = self.support\n support = np.r_[a, support]\n gridsize = len(support)\n endog = self.endog\n probs = [\n integrate.quad(func, support[i - 1], support[i], args=endog)[0]\n for i in range(1, gridsize)\n ]\n return np.cumsum(probs)", "title": "" }, { "docid": "e60b0fe9e0c03eb3b76a75c398b0ac94", "score": "0.51325965", "text": "def curvatures_fd(self, x: np.ndarray, eps=1e-6) -> np.ndarray:\n x = copy.deepcopy(x)\n f = self.compute_functional(x)\n curvs = np.zeros(x.shape)\n for i in range(x.size):\n x[i] += eps # x + eps\n fp = self.compute_functional(x)\n x[i] -= 2 * eps # x - eps\n fm = self.compute_functional(x)\n x[i] += eps # reset to original values\n curvs[i] += (fm - 2 * f + fp) / (eps**2)\n return curvs", "title": "" }, { "docid": "326c603a4c0a27d3451e1dcf7c7911e3", "score": "0.5127085", "text": "def conedf(df, mu, N):\n return 1. / (df + 2. * np.sqrt(df) + float(mu) / N)", "title": "" }, { "docid": "b54becf604e2ea6527fff11079f7ac2f", "score": "0.5122607", "text": "def cech_differential(self, start_coordinates, n_dim, deg):\n\n target_coordinates = []\n deg_sign = (-1)**deg\n # go through all dictionaries in input\n for i, coordinates in enumerate(start_coordinates):\n cech_image = {}\n # look at each nerve simplex on each dictionary\n for nerve_spx_index in iter(coordinates):\n points_IN = np.copy(self.points_IN[n_dim][nerve_spx_index])\n # Iterate over simplices in boundary of nerve simplex\n nerv_boundary_indx = np.nonzero(self.nerve_differentials[\n n_dim][:, nerve_spx_index])[0]\n nerv_boundary_coeff = self.nerve_differentials[n_dim][\n :, nerve_spx_index][nerv_boundary_indx]\n # go through all faces for each simplex in nerve\n for nerve_face_index, nerve_coeff in zip(\n nerv_boundary_indx, nerv_boundary_coeff):\n if deg == 0:\n # inclusions for points\n # Preallocate space\n if nerve_face_index not in cech_image:\n cech_image[nerve_face_index] = np.zeros(\n self.subcomplexes[n_dim - 1][\n nerve_face_index][0])\n # end if\n for point_idx, point_coeff in enumerate(coordinates[\n nerve_spx_index]):\n if point_coeff != 0:\n face_point_idx = np.argmax(self.points_IN[\n n_dim-1][nerve_face_index] == points_IN[\n point_idx])\n cech_image[nerve_face_index][\n face_point_idx\n ] += nerve_coeff * point_coeff * deg_sign\n cech_image[nerve_face_index][\n face_point_idx] %= self.p\n # end if\n # end for\n else:\n # inclusions for edges, 2-simplices and higher\n # simplices. Preallocate space as well.\n if nerve_face_index not in cech_image:\n cech_image[nerve_face_index] = np.zeros(len(\n self.subcomplexes[n_dim - 1][nerve_face_index][\n deg]))\n # end if\n # Iterate over nontrivial local simplices in domain\n spx_indices = np.nonzero(coordinates[\n nerve_spx_index])[0]\n spx_coefficients = coordinates[nerve_spx_index][\n spx_indices]\n for spx_index, spx_coeff in zip(\n spx_indices, spx_coefficients):\n # Obtain IN for vertices of simplex\n vertices_spx = points_IN[self.subcomplexes[n_dim][\n nerve_spx_index][deg][spx_index]]\n # Iterate over simplices in range to see which\n # one has vertices_spx as vertices.\n for im_indx, im_spx in enumerate(\n self.subcomplexes[n_dim-1][\n nerve_face_index][deg]):\n vertices_face = self.points_IN[n_dim-1][\n nerve_face_index][im_spx.astype(int)]\n # When the vertices coincide, break the loop\n if len(np.intersect1d(\n vertices_spx,\n vertices_face)) == deg + 1:\n cech_image[nerve_face_index][im_indx] += \\\n spx_coeff * nerve_coeff * deg_sign\n cech_image[nerve_face_index][im_indx] %= \\\n self.p\n break\n # end if\n # end for\n # end for\n # end else\n # end for\n # end for\n target_coordinates.append(cech_image)\n # end for\n\n return target_coordinates", "title": "" }, { "docid": "7701d77ce5dfaa2901565acee37938d8", "score": "0.51191807", "text": "def e(self):\n e = 0\n for x in range(self.n):\n for i in range(self.n):\n for y in range(self.n):\n for j in range(self.n):\n e += - 0.5 * self.w[x][i][y][j] * self.s[x][i] * self.s[y][j]\n e += - self.b[x][i] * self.s[x][i]\n return e", "title": "" }, { "docid": "e35dd859faecae1748b60ea82360451f", "score": "0.51150864", "text": "def evolve(self):", "title": "" }, { "docid": "02c1e8c82a496669dd55c4ba7e450061", "score": "0.5114417", "text": "def disc_dist():\n fig = Figure(figsize=(0.5, 0.5), dpi=100)\n sub = fig.add_subplot(111)\n x_pts = [0, 0.99, 1, 1.01, 1.99, 2, 2.01, 3, 3.99, 4, 4.01, 5, 6]\n y_pts = [0, 0, 1, 0, 0, 3, 0, 0, 0, 2, 0, 0, 0]\n sub.plot(x_pts, y_pts)\n return fig", "title": "" }, { "docid": "5d741685cf46e767762950f2291aff0f", "score": "0.5110884", "text": "def test_discrete2():\n geo = psci.geometry.Rectangular(space_origin=(0.0, 0.0), space_extent=(1.0, 1.0))\n pdes = psci.pde.NavierStokes(0.01, 1.0)\n pdes, geo = psci.discretize(pdes, geo, space_nsteps=(20, 20))\n bc = geo.get_bc_index()\n sd = geo.get_space_domain()\n\n sd_t, bc_t = geo_discrete([(0.0, 0.0), (1.0, 1.0)], (20, 20))\n assert np.allclose(bc, bc_t)\n assert np.allclose(sd, sd_t)", "title": "" }, { "docid": "c89caa2bc1611ec7253ad343b5536c5f", "score": "0.5105004", "text": "def CoreSersic2D(x, y, amplitude=1, r_eff=1, r_break=1, n=1, x_0=0, y_0=0,\n alpha=10, gamma=0.1, ellip=0, theta=0):\n bn = gammaincinv(2. * n, 0.5)\n a, b = r_eff, (1 - ellip) * r_eff\n cos_theta, sin_theta = np.cos(theta), np.sin(theta)\n x_maj = (x - x_0) * cos_theta + (y - y_0) * sin_theta\n x_min = -(x - x_0) * sin_theta + (y - y_0) * cos_theta\n r = np.sqrt((x_maj / a) ** 2 + (x_min / b) ** 2)\n\n r[np.where(r == 0)] = np.nan\n\n I = amplitude * (2 ** -(gamma / alpha)) * np.exp(\n bn * (2 ** (1 / alpha) * (r_break / r_eff)) ** (1 / n)\n )\n\n return I * (1 + (r_break / r) ** (alpha)) ** (gamma / alpha) * np.exp(\n - bn * ((r ** alpha + r_break ** alpha) / (r_eff) ** alpha) ** (1 / (alpha * n))\n )", "title": "" }, { "docid": "53d462b951a0da3fc7642890f81aae7f", "score": "0.5104522", "text": "def gradientDivergence(c, xR, xL, yU, yD, vx, vy):#yU,yD,xR,xL, vx, vy):# y, x, vx, vy, r, norm):\n\n\n\t#print \"using gradDiv in observer\"\n\t#print \"VY: %s VX: %s\\nY: %s X: %s\" %(vy, vx, y, x)\n\n\t#print \"find gradient and Divergence\"\n\n\t#yU = self.concentration(y+2*r, x, r)\t* norm\n\t#yD = self.concentration(y-2*r, x, r)\t* norm\n\t#xU = self.concentration(y, x+2*r, r) * norm\n\t#xD = self.concentration(y, x-2*r, r) * norm\n\t#c = self.concentration(y,x,r) * norm\n\n\t#print \"yU: %s yD: %s\\nxU: %s xD: %s\\nc: %s \"%(yU, yD, xU, xD, c)\n\t#print \"vx: %s vy: %s\"%(vx,vy)\n\n\t#print \"concentrations: %s, %s, %s, %s\" %(yU, yD, xR, xL)\n\t###print vx, vy\n\t#print r\n\n\n\n\n\n\n\tDU_dy0 = (vy>=0)*(yU-c)/(r)+(vy<0)*(c-yD)/(r)\n \tDU_dx0 = (vx>=0)*(xR-c)/(r)+(vx<0)*(c-xL)/(r)\n\n\tD2U0 = (yU+yD-2*c)/r**2+(xR+xL-2*c)/r**2\n\treturn DU_dx0, DU_dy0, D2U0", "title": "" }, { "docid": "59ee0ee61d4d63cdfeecace08bbe9b3c", "score": "0.5094887", "text": "def compute_energy(self):\n E=df.assemble(self.E)\n return E", "title": "" }, { "docid": "024fb963828b3e7b9c7c47c22720be2a", "score": "0.50926286", "text": "def getenergies(self):\n\n\t\tself.data[\"eint\"] = np.zeros(len(self.data[\"rho\"]))\n\t\tself.data[\"edeg\"] = np.zeros(len(self.data[\"rho\"]))\n\t\tself.data[\"c_s\"] = np.zeros(len(self.data[\"rho\"]))\n\t\tfor i in range(len(self.data[\"rho\"])):\n\t\t\t[self.data[\"eint\"][i], self.data[\"edeg\"][i], self.data[\"c_s\"][i]] = self.gethelmeos_energies(self.data[\"rho\"][i], self.data[\"T\"][i])\n\t\tself.data[\"eth\"] = self.data[\"eint\"] - self.data[\"edeg\"]\n\t\tself.data[\"erot\"] = (2./3.)*self.data[\"R\"]**2*self.omega**2\t\t# specific rotational energy I*omega^2/m = 2/3*r^2*omega^2\n\n\t\tself.data[\"Edeg\"] = 4.*np.pi*scipyinteg.cumtrapz(self.data[\"edeg\"]*self.data[\"R\"]**2*self.data[\"rho\"], x=self.data[\"R\"], initial=0.)\n\t\tself.data[\"Eth\"] = 4.*np.pi*scipyinteg.cumtrapz(self.data[\"eth\"]*self.data[\"R\"]**2*self.data[\"rho\"], x=self.data[\"R\"], initial=0.)\n\t\t# Binding energy E_pot = \\int_0^m -GM_enc/r dm (http://farside.ph.utexas.edu/teaching/301/lectures/node153.html)\n\t\tself.data[\"Epot\"] = -4.*np.pi*self.grav*scipyinteg.cumtrapz(self.data[\"M\"]*self.data[\"R\"]*self.data[\"rho\"], x=self.data[\"R\"], initial=0.)\n\t\tself.data[\"Erot\"] = 0.5*self.getmomentofinertia(self.data[\"R\"], self.data[\"rho\"])*self.omega**2\n\t\tself.data[\"eb\"] = self.data[\"B\"]**2/8./np.pi/self.data[\"rho\"]\t# B^2/8pi is the magnetic energy density\n\t\tself.data[\"EB\"] = 0.5*scipyinteg.cumtrapz(self.data[\"B\"]**2*self.data[\"R\"]**2, x=self.data[\"R\"], initial=0.)", "title": "" }, { "docid": "1b8055a169f5b35ea13f23b721ead370", "score": "0.50918263", "text": "def elemental_creep_strain_energy_density(self, **kwargs):\n self._check_elemental_location(**kwargs)\n return self._get_result_data_function_of_operator(\n \"ENL_CRWK\",\n self,\n self._data_sources,\n location=\"Elemental\",\n b_elem_average=True,\n **kwargs\n )", "title": "" }, { "docid": "fdc63bf56a5f8fa3deb3c7764aab1ab3", "score": "0.5089217", "text": "def rse(cls):\n rss = sum((cls.orig_endog.values - cls.predict().values) ** 2)[0]\n n = cls.orig_endog.shape[0]\n p = len(cls.results.x) - 1 # excludes constant coefficient\n return np.sqrt(rss / (n - p - 1))", "title": "" }, { "docid": "34c63c15d438ce099ab6dbf7cd863a5c", "score": "0.5088638", "text": "def calc_perigee(semi_major_axis, eccentricity):\n return float((1 - eccentricity) * semi_major_axis)", "title": "" }, { "docid": "10571d1fad69fbf5d065c8d89cfcc83c", "score": "0.5085874", "text": "def calulate_SSE(df, fkm_cluster_centres, km_clusters_centres):\r\n df[\"fkm_SSE\"] = None\r\n df[\"km_SSE\"] = None\r\n for index, subject in df.iterrows():\r\n subject_dim = np.array(subject[[\"component_1\",\"component_2\"]])\r\n df.iloc[index, -2] = min([(np.sum(np.square(subject_dim - cluster))) for cluster in fkm_cluster_centres])\r\n df.iloc[index, -1] = min([np.sum(np.square(subject_dim - cluster)) for cluster in km_clusters_centres])\r\n return df", "title": "" } ]
4f6e8955cbc1ecda2a3c874da38e334e
Convert the dict of probabilities into a single numpy array. Used to pass the probabilities to the C numpy extension.
[ { "docid": "7357f9b100e6c8991a9109526506d64c", "score": "0.63100165", "text": "def _prob_dict_to_np(self, nframes, naive_bayes=False):\n\n # Get a list of classes for results\n clses = self._classnames\n k = clses[0]\n\n # Allocate and fill each array\n sprobs = np.zeros((len(clses), np.shape(self._marg[k])[0], 2), dtype=np.float64)\n if not naive_bayes:\n jprobs = np.zeros((len(clses), np.shape(self._cond[k])[0],\n np.shape(self._cond[k])[1], 4), dtype=np.float64)\n likelihood = np.zeros((len(clses), nframes), dtype=np.float64)\n res = np.zeros((len(clses), nframes), dtype=np.float64)\n\n for i, key in enumerate(clses):\n sprobs[i] = self._marg[key]\n if not naive_bayes:\n jprobs[i] = self._cond[key]\n\n if not naive_bayes:\n return sprobs, jprobs, likelihood, res\n else:\n return sprobs, likelihood, res", "title": "" } ]
[ { "docid": "00e96822a67220e93768623e53908805", "score": "0.68825066", "text": "def dict_to_array(dicti):\n vals = [i for i in dicti.keys()]\n counts = [i for _, i in dicti.items()]\n return np.repeat(vals, counts)", "title": "" }, { "docid": "00e96822a67220e93768623e53908805", "score": "0.68825066", "text": "def dict_to_array(dicti):\n vals = [i for i in dicti.keys()]\n counts = [i for _, i in dicti.items()]\n return np.repeat(vals, counts)", "title": "" }, { "docid": "f8438398831ab06fdd8fa15720323be4", "score": "0.68129086", "text": "def C_dict2array(C):\n return np.hstack([np.asarray(C[k]).ravel() for k in C_keys])", "title": "" }, { "docid": "a0ec9debc8b93fc39c365abebf000cc2", "score": "0.6796614", "text": "def dict_to_array(self, d):\n n_fit_p = len(self.fit_parameters)\n n_wc = len(self.fit_wc_names)\n arr = np.zeros(n_fit_p + n_nui_p + n_wc)\n arr[:n_fit_p] = [d['fit_parameters'][p] for p in self.fit_parameters]\n arr[n_fit_p:] = [d['fit_wc'][c] for c in self.fit_wc_names]\n return arr", "title": "" }, { "docid": "107b70e035320e15407a260d80cdfba4", "score": "0.67675424", "text": "def dict_to_array(self, d):\n n_fit_p = len(self.fit_parameters)\n n_nui_p = len(self.nuisance_parameters)\n n_wc = len(self.fit_wc_names)\n arr = np.zeros(n_fit_p + n_nui_p + n_wc)\n arr[:n_fit_p] = [d['fit_parameters'][p] for p in self.fit_parameters]\n arr[n_fit_p:n_fit_p+n_nui_p] = [d['nuisance_parameters'][p] for p in self.nuisance_parameters]\n arr[n_fit_p+n_nui_p:] = [d['fit_wc'][c] for c in self.fit_wc_names]\n return arr", "title": "" }, { "docid": "8da945df3033cabbf07ff48881b1f23f", "score": "0.65217113", "text": "def to_numpy(self):\n return np.array([(x, *np.atleast_1d(y)) for x, y in sorted(self.data.items())])", "title": "" }, { "docid": "5090c97ac430d73a28b3f1f10541e492", "score": "0.6356121", "text": "def kpoints_dict_to_array(kpoints_dict: dict, dim: int = 2):\n assert isinstance(kpoints_dict, dict)\n assert dim == 2 or dim == 3\n\n kpoints_list = []\n for key in _KP_NAMES:\n if key in kpoints_dict:\n kpoints_list.append(kpoints_dict[key])\n else:\n kpoints_list.append(np.full(shape=(dim,), fill_value=-1))\n\n kpoints_array = np.asarray(kpoints_list, dtype=float)\n return kpoints_array", "title": "" }, { "docid": "9f638712c1c1a63f94f36d5c67401697", "score": "0.63134253", "text": "def array_dict_map(dictionary, keys):\n out = np.zeros((len(keys), 50))\n for i,key in enumerate(keys):\n out[i] = dictionary[key]\n return out", "title": "" }, { "docid": "7d8ce5d62be38c032d27f72ef8d9bf80", "score": "0.6284463", "text": "def _get_matrix_values(self, seq_dict):\n return np.array([seq_dict[i] for i in seq_dict.keys()])", "title": "" }, { "docid": "55360068b005e24661034de4f4d701e3", "score": "0.60913867", "text": "def fmt1darray(cdict):\n # Make sure this represents a 1d array\n if any(len(inds) != 1 for inds in cdict):\n raise ValueError(\n 'The keys of the input dictionary must be length-1 tuples')\n\n jmax = max((inds[-1] for inds in cdict), default=0)\n carr = np.zeros(jmax+1)\n for ((j,), c) in cdict.items():\n carr[j] = c\n return carr", "title": "" }, { "docid": "1ddd8441a414241d3fef869ec20999c0", "score": "0.6070513", "text": "def arrayize(d):\r\n return np.array(d)", "title": "" }, { "docid": "f1a4d3f6f363128cc1fe4e7624b20a13", "score": "0.59904647", "text": "def _get_ampdet_data_as_array(data: Dict[Any, AmpDetData], column: str) -> ArrayLike:\n return np.vstack([getattr(d, column) for d in data.values()])", "title": "" }, { "docid": "a837cee92a4931c363365d451e8165a9", "score": "0.5963877", "text": "def dictToNpMatrix(d):\n return np.asmatrix([d[k] for k in sorted(d)])", "title": "" }, { "docid": "8a5622c1c2ca190a6ad5f712c3215731", "score": "0.58722794", "text": "def pvary_levels_to_array(popt, pcmap={}):\n for pname, pconds in pcmap.items():\n popt[pname] = np.array([popt[c] for c in pconds])\n return popt", "title": "" }, { "docid": "804e09f28bdc2df914577d67a5ccc46b", "score": "0.58197963", "text": "def array(values):\n return numpy.array(values, dtype=dtype)", "title": "" }, { "docid": "1ef915815b4ad41329552defa962c064", "score": "0.5807538", "text": "def to_array_from_dict_set(self, N):\n\t\tA = np.zeros((N,N))\n\n\t\tfor key in self.dict_col2row_set:\n\t\t\tfor row_index in self.dict_col2row_set[key]:\n\t\t\t\tA[row_index, key] = 1\n\t\treturn A", "title": "" }, { "docid": "a257d736617c4803d5c8cf2bbd7d7730", "score": "0.5805067", "text": "def dict_to_array(data, default_value, node_num):\n array = np.ones((node_num,))*default_value\n for i in xrange(node_num):\n if data.has_key(i+1):\n array[i] = data[i+1]\n return array", "title": "" }, { "docid": "31227a2f533eb1b16387ec9b9a31c633", "score": "0.5781736", "text": "def convert_numpy(data):\n return np.array(data)", "title": "" }, { "docid": "31b18e49df0999f9ab063becda624160", "score": "0.57281286", "text": "def extract_from_json_as_np_array(key, json_data):\n data_as_array = []\n for p in json_data:\n data_as_array.append(p[key])\n\n return np.array(data_as_array)", "title": "" }, { "docid": "7b16d61a5eb614f48677abaa5bd28a22", "score": "0.57124", "text": "def _get_key_ndarray(self, key):\n return self.bstr_to_ndarray(self._client.get(key))", "title": "" }, { "docid": "17406d8409d3b805aa6b255dd71a5d8b", "score": "0.5694752", "text": "def predict(self, input_array_dict):\n print(input_array_dict)\n return ({\"output_array1\": np.array([100, 200]).astype(np.float32),\n \"output_array2\": np.array([\"foo\".encode(),\"bar\".encode()]).astype(object), # you can get and pass strings encoded as bytes also\n })", "title": "" }, { "docid": "bd9f98c9762ac87b95c654a07ba5682b", "score": "0.5677484", "text": "def _get_key_ndarray(self, key):\n return bstr_to_ndarray(self._client.get(key))", "title": "" }, { "docid": "de8ee5824d937e15c4b24a32a5844fe1", "score": "0.5666336", "text": "def get_key_ndarray(self, key):\n return self.bstr_to_ndarray(self._client.get(key))", "title": "" }, { "docid": "4e62d908b68ea7a40e35f443783490e2", "score": "0.5632855", "text": "def json_numpy_obj_hook(dct):\n if isinstance(dct, dict) and '__ndarray__' in dct:\n data = base64.b64decode(dct['__ndarray__'])\n return np.frombuffer(data, dct['dtype']).reshape(dct['shape'])\n return dct", "title": "" }, { "docid": "4e62d908b68ea7a40e35f443783490e2", "score": "0.5632855", "text": "def json_numpy_obj_hook(dct):\n if isinstance(dct, dict) and '__ndarray__' in dct:\n data = base64.b64decode(dct['__ndarray__'])\n return np.frombuffer(data, dct['dtype']).reshape(dct['shape'])\n return dct", "title": "" }, { "docid": "2769ee83b6ab0e2a8a3d7ff6b691968e", "score": "0.562029", "text": "def to_ndarray(self):", "title": "" }, { "docid": "4aaf99a2f79fc57382ede1f74fa80983", "score": "0.56192243", "text": "def to_array(self, **kwargs):", "title": "" }, { "docid": "425756974a3d5af9c1401d45796fa9bd", "score": "0.56039935", "text": "def cupy_ndarray(nb_arr):\n import cupy\n return cupy.ndarray(nb_arr.shape, dtype=cupy.uint8,\n strides=nb_arr.strides,\n memptr=cupy_cuda_MemoryPointer(nb_arr))", "title": "" }, { "docid": "8099c53dceb67574de74a1c0516adf81", "score": "0.5597376", "text": "def to_numpy(self) -> None:\n for k, v in self.items():\n if isinstance(v, torch.Tensor):\n self.__dict__[k] = v.detach().cpu().numpy()\n elif isinstance(v, Batch):\n v.to_numpy()", "title": "" }, { "docid": "64bc1be5622b95b0baaee0397ea2bcc9", "score": "0.5572673", "text": "def policy_to_array(policy, shape=(4,4)):\n array_policy = np.zeros(shape)\n for key, action in policy.iteritems():\n best_a = 0\n for a in xrange(nAction):\n if policy[key][a] > policy[key][best_a]:\n best_a = a\n\n array_policy[key[0]][key[1]] = best_a\n\n return array_policy", "title": "" }, { "docid": "138941e6079a918134f3d88f3d59bba7", "score": "0.55547583", "text": "def _unwrap_aclu_epoch_values_dict_to_array(mean_epochs_all_frs):\n aclus = list(mean_epochs_all_frs.keys())\n values = np.array(list(mean_epochs_all_frs.values())) # \n return aclus, values # values.shape # (108, 36)", "title": "" }, { "docid": "e3c8ffa8efaeef77a60f0775ed8034ac", "score": "0.5538326", "text": "def get_predictions(estimator, states):\n predict_input_fn = lambda: numpy_predict_fn(states)\n prediction = estimator.predict(input_fn=predict_input_fn)\n list_predictions = [p['logits'] for p in prediction]\n np_array_prediction_values = np.array(list_predictions)\n return np_array_prediction_values", "title": "" }, { "docid": "d0196b6763262f969d75f8e6657171ca", "score": "0.5500729", "text": "def dict_to_array(dictionary, order):\n\tres = []\n\tfor key, data in dictionary.items():\n\t\tentry = [key]\n\t\tentry.extend([data[item] for item in order])\n\t\tres.append(entry)\n\treturn res", "title": "" }, { "docid": "781bb9368a4d998befdcbf5da5076257", "score": "0.54993504", "text": "def gen_arr(embeddings, seq_id_to_label):\n # keys = embeddings.files\n output, labels, ids = [], [], []\n for key in embeddings:\n # print(embeddings[key])\n d = embeddings[key]\n labels.append(seq_id_to_label[key])\n output.append(d)\n ids.append(key)\n return np.array(output), labels, ids", "title": "" }, { "docid": "ad4fb7d2a2454d5433056ad9795752d5", "score": "0.54954565", "text": "def to_np(var):\n return var.detach().cpu().numpy()", "title": "" }, { "docid": "46f841666cb4073d1a922916043e594f", "score": "0.5491247", "text": "def return_np_array(args) -> np.array:\n\n return np.array(args)", "title": "" }, { "docid": "5dc4fefa18ca1a5af40bda83ff181e43", "score": "0.5470624", "text": "def counter_to_array(counter):\n keys = counter.keys()\n vals = counter.values()\n tot = np.sum(vals)\n arr = np.zeros(tot, dtype=int)\n i = 0\n for key, val in zip(keys, vals):\n arr[i:i + val] = key\n i += val\n return arr", "title": "" }, { "docid": "a992ba82d2b8673d9baab0065e26f463", "score": "0.54561305", "text": "def _arr(x, keys):\n arr = []\n for key in keys:\n val = x[key]\n try:\n arr.extend(val)\n except Exception:\n arr.append(val)\n arr = np.asarray(arr)\n return arr", "title": "" }, { "docid": "ce57e140e00a25e53bf9720eae0412ae", "score": "0.5448542", "text": "def string_to_array(nts, lookup_dict=_DEFAULT_NTDICT_):\n labels = [lookup_dict[ch] for ch in nts if (ch in lookup_dict.keys())]\n return np.array(labels, dtype=np.int32)", "title": "" }, { "docid": "a71acc1db5a1118bf55b876f8a92db78", "score": "0.5445526", "text": "def to_numpy(self, dtype=None, copy=False, na_value=None):\n out_data = self.__to_dict_impl(orient=\"list\", include_index=False)\n return np.array(list(out_data.values()), dtype=dtype).T", "title": "" }, { "docid": "ee2a8ca4b64d95078f60def91da0ebc5", "score": "0.5423703", "text": "def as_array(counts, k):\n return np.array([counts[''.join(s)] for s in\n itertools.product('ACGT', repeat=k)])", "title": "" }, { "docid": "033e728c6b3b057e87ce923896a77908", "score": "0.5392687", "text": "def toarray(arr):\n try:\n return arr.todense()\n except AttributeError:\n return asarray(arr)", "title": "" }, { "docid": "777189737b6f8829422e5e706772819a", "score": "0.5391778", "text": "def to_numpy(self):\n return type(self)(ten2ar(self.mu), ten2ar(self.log_sigma))", "title": "" }, { "docid": "56d36c1d03255d311b1eb5a785e7c18b", "score": "0.53792375", "text": "def to_numpy(po: pd.DataFrame) -> np.ndarray:\n try:\n return po.to_numpy()\n except AttributeError:\n return po.values", "title": "" }, { "docid": "7b3eebf5f26d1646d187fab82911959e", "score": "0.5375303", "text": "def map_to_arrays(m):\n length = len(m)\n x = np.empty(length)\n y = np.empty(length)\n for k, i in zip(sorted(m.keys()), range(length)):\n x[i] = k\n y[i] = m[k]\n return x, y", "title": "" }, { "docid": "52884181156754d2b3cd9d277e34ced5", "score": "0.5363327", "text": "def json_to_ndarray(payload: NdArray) -> np.ndarray:\n arr = np.array(payload.array)\n if payload.shape:\n arr = arr.reshape(*payload.shape)\n if payload.dtype:\n arr = arr.astype(payload.dtype)\n return arr", "title": "" }, { "docid": "d786b4723a421af307b0e9c62babe4df", "score": "0.5354856", "text": "def to_numpy(in_array: Union[np.ndarray, torch.Tensor]) -> np.ndarray:\n if isinstance(in_array, torch.Tensor):\n in_array = in_array.cpu().numpy()\n\n np_array = cast(np.ndarray, in_array)\n\n return np_array", "title": "" }, { "docid": "50d1f7ab787e9bd5ef0689261c8b0fa8", "score": "0.53488183", "text": "def posterior_predictive_to_xarray(self):\n data = self.posterior_predictive\n dims = {var_name: self.sample_dims + self.dims.get(var_name, []) for var_name in data}\n return dict_to_dataset(\n data, library=pymc, coords=self.coords, dims=dims, default_dims=self.sample_dims\n )", "title": "" }, { "docid": "c3f30a929605830f535fccf2f047b3d7", "score": "0.534839", "text": "def get_predictions(predictions):\r\n pred_y, pred_scores = [], []\r\n val = next(predictions, None)\r\n while val is not None:\r\n pred_y.append(val['classes'])\r\n pred_scores.append(val['probabilities'])\r\n val = next(predictions, None)\r\n return np.array(pred_y), np.array(pred_scores)", "title": "" }, { "docid": "ce161f83f56f98e9fe7171e339aaf8bb", "score": "0.53476906", "text": "def numpy_ndarray(nb_arr):\n return nb_arr.copy_to_host()", "title": "" }, { "docid": "ae2bf98b5c8a696cdd12334870479786", "score": "0.53096044", "text": "def tensor_to_np(tensor):\n return tensor.permute(1, 2, 0).numpy()", "title": "" }, { "docid": "5bba23b67a678aa539093b6e198d325f", "score": "0.53067946", "text": "def klasses_binary(self):\n (_, data, _, _) = self.data\n return np.asarray(data)", "title": "" }, { "docid": "6bd29d79f29cf8b016268b86055551a8", "score": "0.5296831", "text": "def value_to_array(V, shape=(4,4)):\n array_V = np.zeros(shape)\n for key, value in V.iteritems():\n array_V[key[0]][key[1]]=value\n\n return array_V", "title": "" }, { "docid": "e1b5e53caed2de5edf0060e191aecd8e", "score": "0.5292031", "text": "def toarray(arr):\n try:\n return arr.todense()\n except AttributeError:\n return _asarray(arr)", "title": "" }, { "docid": "aaa70ef8790b88ebf0988866fb84a8a7", "score": "0.52824736", "text": "def __array__(self, dtype=None):\n return self.to_numpy()", "title": "" }, { "docid": "f95fa954b56fddbe146b4c9bf37cecb8", "score": "0.52746457", "text": "def numpy_array(self):\n if self.arrC is not None:\n return self.arr.get()+self.arrC.get()*1.0j\n return self.arr.get()", "title": "" }, { "docid": "60a442b612375938598410c75754c596", "score": "0.5266783", "text": "def _convert_evals_to_numpy(raw_evals, key):\n # get rid of dictionaries\n evals = [val[key] if isinstance(val, dict) else val for val in raw_evals]\n # get rid of pandas objects\n evals = [np.array(val) if isinstance(val, pd.Series) else val for val in evals]\n\n # find out the correct output shape\n try:\n array = next(x for x in evals if hasattr(x, \"shape\") or isinstance(x, dict))\n out_shape = array.shape\n except StopIteration:\n out_shape = \"scalar\"\n\n # convert to correct output shape\n if out_shape == \"scalar\":\n evals = [np.atleast_1d(val) for val in evals]\n else:\n for i in range(len(evals)):\n if isinstance(evals[i], float) and np.isnan(evals[i]):\n evals[i] = np.full(out_shape, np.nan)\n\n return evals", "title": "" }, { "docid": "37e6033b3c4a70927d429a223e7834fc", "score": "0.52635384", "text": "def numpy_ndarray(pd_ser, nan_to_null=False):\n return pd_ser.to_numpy()", "title": "" }, { "docid": "ba27b1258fc170f7438283a31f29ad1c", "score": "0.5261366", "text": "def to_numpy(self, **kwargs):\n return [p.to_numpy(**kwargs) for p in self.providers]", "title": "" }, { "docid": "b7ada9dd259b32c956026059d4bb5771", "score": "0.5259715", "text": "def to_numpy(x: t.Union[np.ndarray, torch.tensor]) -> np.ndarray:\n return (x.cpu().numpy()).astype(np.float32) if torch.is_tensor(x) else x", "title": "" }, { "docid": "c0486a3899af2ae129ef2068356ef7b5", "score": "0.5244956", "text": "def to_ndarray(item):\n \n return type(item), sp.array(item, sp.float64, ndmin=1)", "title": "" }, { "docid": "9886b95dc17b9a732542cc8b3c6b9e27", "score": "0.52355564", "text": "def make_into_weight_arrays(result: dict):\n\n data = np.zeros((len(result), 3))\n\n for idx in range(len(result)):\n data[idx][0] = result[idx]['weights']['wto']\n data[idx][1] = result[idx]['weights']['wf']\n data[idx][2] = result[idx]['weights']['wpl']\n\n return data", "title": "" }, { "docid": "9290171ced841fd1b75ae75aadef71c6", "score": "0.5218645", "text": "def _probabilities(\n self, input_data: Optional[np.ndarray], weights: Optional[np.ndarray]\n ) -> Union[np.ndarray, SparseArray]:\n raise NotImplementedError", "title": "" }, { "docid": "9e678744c27d3e3d03665e094efacdaa", "score": "0.52183974", "text": "def to_array(x):\n if hasattr(x, 'todense'):\n return np.array(x.todense())\n if hasattr(x, 'cpu'):\n return x.data.cpu().numpy()\n return x", "title": "" }, { "docid": "3f4b0db567f108d7d4d1cd2c4619ef7b", "score": "0.52145636", "text": "def vector_to_numpy_array(vector: Vector) -> np.array:\n if isinstance(vector, Vector):\n return np.array(vector.values)\n else:\n raise ValueError(\"Not a vector!\")", "title": "" }, { "docid": "7852ebd07cc3dd944016ddac9933a708", "score": "0.5213827", "text": "def priors_to_xarray(self):\n if self.prior is None:\n return {\"prior\": None, \"prior_predictive\": None}\n if self.observations is not None:\n prior_predictive_vars = list(set(self.observations).intersection(self.prior))\n prior_vars = [key for key in self.prior.keys() if key not in prior_predictive_vars]\n else:\n prior_vars = list(self.prior.keys())\n prior_predictive_vars = None\n\n priors_dict = {}\n for group, var_names in zip(\n (\"prior\", \"prior_predictive\"), (prior_vars, prior_predictive_vars)\n ):\n priors_dict[group] = (\n None\n if var_names is None\n else dict_to_dataset(\n {k: np.expand_dims(self.prior[k], 0) for k in var_names},\n library=pymc,\n coords=self.coords,\n dims=self.dims,\n )\n )\n return priors_dict", "title": "" }, { "docid": "537b15d7b798783b43b77b15d3b0ac3f", "score": "0.52123576", "text": "def var_to_np(x):\n if RUN_ON_GPU:\n x = x.cpu()\n return x.data.numpy()", "title": "" }, { "docid": "537b15d7b798783b43b77b15d3b0ac3f", "score": "0.52123576", "text": "def var_to_np(x):\n if RUN_ON_GPU:\n x = x.cpu()\n return x.data.numpy()", "title": "" }, { "docid": "1afbe15ace8ffde47d807132f9ae5c72", "score": "0.52104324", "text": "def _probs_and_log_probs_np(probs):\n to_return = ()\n p = np.array(probs)\n to_return += (1 - p, p)\n to_return += (np.log1p(-p), np.log(p))\n return to_return", "title": "" }, { "docid": "84c2a327113f2adf8b285845e3471e02", "score": "0.520424", "text": "def to_numpy(tensor):\n return np.copy(tensor)", "title": "" }, { "docid": "c01b3a31e22bbc02ca98cf8a3dc9e64a", "score": "0.5183275", "text": "def to_arrays(self):\n cdef int size = self._map.size()\n cdef np.ndarray[D_t, ndim=1] keys = np.empty(size, dtype=Dt)\n cdef np.ndarray[V_t, ndim=1] values = np.empty(size, dtype=np.float64)\n self._to_arrays(keys, values)\n return keys, values", "title": "" }, { "docid": "61a6334b90eab994fbdc5f28ffd7cd79", "score": "0.51803404", "text": "def to_array(tensor):\n tensor_ = tensor.squeeze()\n\n unnormalize_transform = Compose([Normalize(mean=[0, 0, 0],\n std=[1 / 0.229, 1 / 0.224, 1 / 0.225]),\n Normalize(mean=[-0.485, -0.456, -0.406],\n std=[1, 1, 1])])\n arr_ = unnormalize_transform(tensor_)\n arr = arr_.permute(1, 2, 0).detach().numpy()\n\n return arr", "title": "" }, { "docid": "c83f5c62ddc35610b33dbcadb668c0af", "score": "0.51670134", "text": "def _ndarray_values(self):\n # type: () -> np.ndarray\n return np.array(self)", "title": "" }, { "docid": "8cd941a74bc49857f73c82913771ee11", "score": "0.5164753", "text": "def to_numpy(tensor: th.Tensor, flatten=False) -> np.ndarray:\n if flatten:\n tensor.squeeze_(-1)\n return tensor.detach().cpu().numpy()", "title": "" }, { "docid": "867a1134d7e2ce93057f7adb3325515f", "score": "0.51640046", "text": "def sample(prediction):\n p = np.zeros(shape=[1, VOCABULARY_SIZE], dtype=np.float)\n p[0, sample_distribution(prediction[0])] = 1.0\n return p", "title": "" }, { "docid": "5563c4378d8ee1ebcad7196e523055cd", "score": "0.5161914", "text": "def to_numpy(img: ndarray) -> ndarray:\n\n h = img.shape[0]\n w = img.shape[1]\n ret_img = np.zeros((h, w, 3))\n\n for y in range(0, h):\n for x in range(0, w):\n ret_img[y, x] = np.array(decimalToGCM(img[y, x]))\n\n # return the thresholded image\n return ret_img", "title": "" }, { "docid": "41a719542ac33e7be54168a09cf2d173", "score": "0.5161229", "text": "def predictions_to_xarray(self):\n data = self.predictions\n dims = {var_name: self.sample_dims + self.dims.get(var_name, []) for var_name in data}\n return dict_to_dataset(\n data, library=pymc, coords=self.coords, dims=dims, default_dims=self.sample_dims\n )", "title": "" }, { "docid": "f8f5a9492a8bb60d709f8b0082e42727", "score": "0.5141708", "text": "def wcxf2arrays(d):\n C = {}\n for k, v in d.items():\n name = k.split('_')[0]\n s = C_keys_shape[name]\n if s == 1:\n C[k] = v\n else:\n ind = k.split('_')[-1]\n if name not in C:\n C[name] = np.zeros(s, dtype=complex)\n C[name][tuple([int(i) - 1 for i in ind])] = v\n return C", "title": "" }, { "docid": "7c85b5f2ea6eacadab17f3555c331eb9", "score": "0.5125818", "text": "def _C_as_ndarray(self, dataobj):\n shape = tuple(dataobj._obj.size[i] for i in range(self.ndim))\n ctype_1d = dtype_to_ctype(self.dtype) * int(reduce(mul, shape))\n buf = cast(dataobj._obj.data, POINTER(ctype_1d)).contents\n return np.frombuffer(buf, dtype=self.dtype).reshape(shape)", "title": "" }, { "docid": "a2fe5ab7702ebad44650d76a21b8e8a3", "score": "0.5121655", "text": "def _as_array(nested):\n\n def __as_array(x):\n if np.isscalar(x):\n return np.array(x)\n return x\n\n return nest.map_structure(__as_array, nested)", "title": "" }, { "docid": "5e0e888316c25ddd9538046d2581a2cf", "score": "0.51154757", "text": "def tensor_to_numpy(t):\n arr = t.detach().cpu().numpy()\n return arr", "title": "" }, { "docid": "8f3ba374904db4e15ca38d5decd6c67f", "score": "0.5104118", "text": "def numpy(self) -> np.ndarray:\n return self.C", "title": "" }, { "docid": "a27ec4d3b54a77c798f97596c29235f6", "score": "0.5103691", "text": "def to_numpy(self):\n\n return self.to_pandas().to_numpy()", "title": "" }, { "docid": "08acadf66c0ea6c60ccb6fcc10d374f5", "score": "0.5094192", "text": "def convert_dict_to_matrix(gdf):\n\n samples = sp.concatenate(gdf.values())\n rval = sp.zeros((len(samples), 2))\n rval[:, 0] = samples\n idx = 0\n ids = sp.zeros(0)\n for key in sorted(gdf.keys()):\n ids = sp.concatenate((ids, sp.ones((len(gdf[key]))) * idx))\n idx += 1\n rval[:, 1] = ids\n rval = sortrows(rval)[:, [1, 0]]\n return rval", "title": "" }, { "docid": "214f7628207547174a0ac38fcc5d87ec", "score": "0.5089382", "text": "def to_ndarray(self, **kwargs):\n return np.asarray(self.copy())", "title": "" }, { "docid": "7838d19b3acfd930b3bac9eae4d81db7", "score": "0.50893474", "text": "def klasses_non_binary(self):\n (_, _, data, _) = self.data\n return np.asarray(data)", "title": "" }, { "docid": "4270959670f9eba130a89b35a9b67610", "score": "0.5086882", "text": "def binary_entropy_np(probs):\n probs0, probs1, log_probs0, log_probs1 = _probs_and_log_probs_np(probs)\n return -1. * (\n np.where(probs0 == 0, 0, np.multiply(log_probs0, probs0)) +\n np.where(probs1 == 0, 0, np.multiply(log_probs1, probs1)))", "title": "" }, { "docid": "2932d72269afe9364a3c86ab2f38e907", "score": "0.50840074", "text": "def gen_an_aug(self, results: Dict) -> np.ndarray:\n\n all_kps = results['keypoint'].astype(np.float32)\n kp_shape = all_kps.shape\n\n if 'keypoint_score' in results:\n all_kpscores = results['keypoint_score']\n else:\n all_kpscores = np.ones(kp_shape[:-1], dtype=np.float32)\n\n img_h, img_w = results['img_shape']\n\n # scale img_h, img_w and kps\n img_h = int(img_h * self.scaling + 0.5)\n img_w = int(img_w * self.scaling + 0.5)\n all_kps[..., :2] *= self.scaling\n\n num_frame = kp_shape[1]\n num_c = 0\n if self.with_kp:\n num_c += all_kps.shape[2]\n if self.with_limb:\n num_c += len(self.skeletons)\n\n ret = np.zeros([num_frame, num_c, img_h, img_w], dtype=np.float32)\n\n for i in range(num_frame):\n # M, V, C\n kps = all_kps[:, i]\n # M, C\n kpscores = all_kpscores[:, i] if self.use_score else \\\n np.ones_like(all_kpscores[:, i])\n\n self.generate_heatmap(ret[i], kps, kpscores)\n return ret", "title": "" }, { "docid": "7e61f3dce3570c3384a9f532298e46d4", "score": "0.50658816", "text": "def as_c_array(self):\n pyarr = self.as_array()\n return (ctypes.c_double * len(pyarr))(*pyarr)", "title": "" }, { "docid": "7e61f3dce3570c3384a9f532298e46d4", "score": "0.50658816", "text": "def as_c_array(self):\n pyarr = self.as_array()\n return (ctypes.c_double * len(pyarr))(*pyarr)", "title": "" }, { "docid": "7e61f3dce3570c3384a9f532298e46d4", "score": "0.50658816", "text": "def as_c_array(self):\n pyarr = self.as_array()\n return (ctypes.c_double * len(pyarr))(*pyarr)", "title": "" }, { "docid": "7e61f3dce3570c3384a9f532298e46d4", "score": "0.50658816", "text": "def as_c_array(self):\n pyarr = self.as_array()\n return (ctypes.c_double * len(pyarr))(*pyarr)", "title": "" }, { "docid": "7e61f3dce3570c3384a9f532298e46d4", "score": "0.50658816", "text": "def as_c_array(self):\n pyarr = self.as_array()\n return (ctypes.c_double * len(pyarr))(*pyarr)", "title": "" }, { "docid": "5fddf6570d7994f93fbd5f30ba36d25f", "score": "0.5060462", "text": "def _sample_from_policy(cls, policy: torch.FloatTensor) -> np.array:\n softmax = F.softmax(policy, dim=1)\n return torch.multinomial(softmax,\n num_samples=1).data", "title": "" }, { "docid": "46ade162c3025b53ab686e5748e4f850", "score": "0.5043946", "text": "def _im2np(im):\n w = int(im['width'])\n h = int(im['height'])\n d = 1\n bp = int(im['bitpix'])\n dtype = _bp2np(bp)\n dlen = h * w * abs(bp) // 8\n if js9Globals['retrieveAs'] == 'array':\n s = im['data'][0:h*w]\n if d > 1:\n arr = numpy.array(s, dtype=dtype).reshape((d, h, w))\n else:\n arr = numpy.array(s, dtype=dtype).reshape((h, w))\n elif js9Globals['retrieveAs'] == 'base64':\n s = base64.decodebytes(im['data'].encode())[0:dlen]\n if d > 1:\n arr = numpy.frombuffer(s, dtype=dtype).reshape((d, h, w))\n else:\n arr = numpy.frombuffer(s, dtype=dtype).reshape((h, w))\n else:\n raise ValueError('unknown retrieveAs type for GetImageData()')\n return arr", "title": "" }, { "docid": "7ae4bfdb3a6c874522e7453140770328", "score": "0.50430226", "text": "def dtype_torch_to_numpy(dtype: torch.dtype) ->np.dtype:\n return torch.empty([], dtype=dtype).numpy().dtype", "title": "" }, { "docid": "057c0577eec40043ecb37ea0b4d2bfb5", "score": "0.5043009", "text": "def load_numpy(file_path: str) -> Union[numpy.ndarray, Dict[str, numpy.ndarray]]:\n file_path = clean_path(file_path)\n array = numpy.load(file_path)\n\n if not isinstance(array, numpy.ndarray):\n tmp_arrray = array\n array = OrderedDict()\n for key, val in tmp_arrray.items():\n array[key] = val\n\n return array", "title": "" }, { "docid": "08291d48524b3b1bcce0aa624011eaa4", "score": "0.5041989", "text": "def observed_data_to_xarray(self):\n if self.predictions:\n return None\n return dict_to_dataset(\n self.observations,\n library=pymc,\n coords=self.coords,\n dims=self.dims,\n default_dims=[],\n )", "title": "" }, { "docid": "bd3a0384641c43949f0e9bd322a5e956", "score": "0.5037962", "text": "def to_numpy(X):\n if isinstance(X, np.ndarray):\n return X\n\n if is_pandas_ndframe(X):\n return X.values\n\n if X.is_cuda:\n X = X.cpu()\n\n if isinstance(X, Variable):\n data = X.data\n else:\n data = X\n return data.numpy()", "title": "" }, { "docid": "ad124aca9a9a8ef3bdff46e149d02257", "score": "0.5036325", "text": "def sitk2np(array_sitk):\n array_np = []\n for i in range(len(array_sitk)):\n array_np.append(sitk.GetArrayFromImage(array_sitk[i]))\n\n return array_np", "title": "" } ]
67185509529d539e7c34a5b449ea8449
Stores the given value via the specified ``overlay`` and ``key``.
[ { "docid": "e3998d155d60a5f22ecf7f83b4401d29", "score": "0.80998534", "text": "def setData(self, overlay, key, value):\n ovlDict = self.__overlayData.get(overlay, None)\n\n if ovlDict is not None:\n ovlDict[key] = value\n else:\n self.__overlayData[overlay] = {key : value}", "title": "" } ]
[ { "docid": "b6bf62ad11dfb2ed2bf62b7d4870f25d", "score": "0.60467625", "text": "def put(key, value):", "title": "" }, { "docid": "65cad391471ae0eb47c07344470a771c", "score": "0.59677273", "text": "def put(self, key, value):", "title": "" }, { "docid": "e578787b9ae54aa9ef9eb8d68402a409", "score": "0.58847845", "text": "def store(self, key, value):\r\n self.client.set(key, value, time=self.timeout)", "title": "" }, { "docid": "3c44674ca45fcd767700f331f78aaa88", "score": "0.5868344", "text": "def apply_overlay(source, overlay):\n for k, v in overlay.items():\n if type(v) == dict:\n source.setdefault(k, {})\n source[k] = apply_overlay(source[k], overlay[k])\n else:\n source[k] = v\n return source", "title": "" }, { "docid": "f59fc7311c48e46e3ac5e6d2bea3acc4", "score": "0.5831373", "text": "def store(self, key, value):\r\n raise NotImplementedError", "title": "" }, { "docid": "6b2d144f569e8ee5ad5bb3e8e80b83b0", "score": "0.582248", "text": "def save(self, key, value):\n key = os.path.join(self.root_key, key)\n self.etcd_wrapper.write(key, value)", "title": "" }, { "docid": "09ad9b2df0c8f1c8617346e15a227104", "score": "0.57936543", "text": "def store_search_value(\n self, search_id: Hashable, key: Hashable, value: Any\n ) -> None:", "title": "" }, { "docid": "0f9abe68bd78360db6099d7fe31236f3", "score": "0.57772046", "text": "def put(key):", "title": "" }, { "docid": "5f2e95ce130c73c3648c6fc44cb6e892", "score": "0.56612575", "text": "def store_data(self, key, value):\n\t\tself.data[key] = value\n\t\treturn value", "title": "" }, { "docid": "e6ba599e193444c8df0b43e29aa8d7f4", "score": "0.5542449", "text": "def __setitem__(self,key,value):\n self.iniparent.Sval(self.catg,key,value,autosave = True)", "title": "" }, { "docid": "e236b268c2037d42f67adcee89bc37a5", "score": "0.5540054", "text": "def put(self, key, value):\n f = open(self.path(key), 'w')\n f.write(value.encode('utf8'))\n f.close()", "title": "" }, { "docid": "212d1eda2f61ceb95bae17653fe517d5", "score": "0.5435588", "text": "def __set_asset(self, key, data):\r\n LOG.debug(\"%s:%s\", key, data)\r\n self.asset[key] = data", "title": "" }, { "docid": "e173be5ec9dfc771afe951bf42005876", "score": "0.5435003", "text": "def store(self, key: object, value: object):\n self._user_data.update({key: value})", "title": "" }, { "docid": "8e3193835a881db7d8d311d461011ded", "score": "0.54061186", "text": "def put(self, key, persistable):\n pass", "title": "" }, { "docid": "0d71c0c8af92b411d90262a962b9af18", "score": "0.5397544", "text": "def __setitem__(self, key, value):\n if value is not None and self._persist:\n self._persist_value(value)\n\n self._entries[key] = value", "title": "" }, { "docid": "6188782b8b0b425bc7fcea4871b769dc", "score": "0.5389499", "text": "def __setitem__(self, key, value):\n state = self.state\n state[key] = value\n self._save(state)", "title": "" }, { "docid": "42d060834764026929f5c2b7e90cc3a2", "score": "0.5372119", "text": "def getData(self, overlay, key, *args):\n if len(args) not in (0, 1):\n raise RuntimeError('Invalid arguments: {}'.format(args))\n\n ovlDict = self.__overlayData.get(overlay, {})\n\n if len(args) == 1:\n return ovlDict.get(key, args[0])\n else:\n return ovlDict[key]", "title": "" }, { "docid": "5eb5585523efb94ea96d10834e94eeed", "score": "0.5348603", "text": "def set(self, key, value):\n self.storage[key] = value", "title": "" }, { "docid": "607ce10f94178057ca53b8bd3215d197", "score": "0.53193974", "text": "def saveValue(self, id, value):\n self._state[id] = value", "title": "" }, { "docid": "e0b23c977ca851a10fa6a6cfe4b2ea74", "score": "0.53123623", "text": "def Put(self, key, value):\n pass", "title": "" }, { "docid": "2c0ad63ceff273fcfb381a2fa0e5741c", "score": "0.5309686", "text": "def set(cls, db, key, value, timestamp=datetime.datetime.now()):\n cls.create_or_update(db, {'timestamp' : timestamp, 'key' : key, 'value' : str(value)})", "title": "" }, { "docid": "ae17f8744f51fe4129b5c6b820f8c212", "score": "0.52994484", "text": "def save(self, key, value):\r\n if self.db is None:\r\n self.load_db()\r\n\r\n if isinstance(key, unicode) is True:\r\n log.debug(u'Key Name: {}'.format(key))\r\n log.debug(u'Key type: {}'.format(type(key)))\r\n key = str(key)\r\n db = shelve.open(self.filename)\r\n db[key] = value\r\n db.close()", "title": "" }, { "docid": "f08095b402c8044217dd85f81773c415", "score": "0.5298401", "text": "def __setitem__(self, key, value):\n k1, k2, k3 = key\n self.A[k1][k2, k3] = value", "title": "" }, { "docid": "3901bf78e0947ec06faf9d31aff77844", "score": "0.5294998", "text": "def __setitem__(self, key, value):\n encoded_key = self.key_type.encode(key)\n encoded_val = self.value_type.encode(value)\n self.session.client.hset(self.key, encoded_key, encoded_val)", "title": "" }, { "docid": "4e2951c61d8cc76864e033e5b2731f92", "score": "0.52916306", "text": "def write(self, key, value):\n self.database[key] = value\n self.commit()", "title": "" }, { "docid": "7aa168b6f6d7bd94fd3eff3b8f1ccedc", "score": "0.52891684", "text": "def __setitem__(self, key, val):\n if isinstance(val, core_model.Resource):\n self._persist_resource(key, val)\n else:\n self._persist_link(key, val)\n\n super(EntityDictionary, self).__setitem__(key, val)", "title": "" }, { "docid": "d1b8a476530df17b943fb6c4419d01f4", "score": "0.5287988", "text": "def put(self, key: int, value: int) -> None:\n x = self.arr[key % self.mod]\n found = False\n for i in range(len(x)):\n if x[i][0] == key:\n x[i] = (key, value)\n found = True\n break\n if not found:\n x.append((key, value))", "title": "" }, { "docid": "af51782d649d497a4db82a617a5bd726", "score": "0.52805144", "text": "def set_layer_data(self, id, key, value, data_id=None):\r\n if data_id is None:\r\n data_id = 'DEFAULT_DATA_ID'\r\n if data_id not in self._layer_data_values[id]:\r\n self._layer_data_values[id][data_id] = {}\r\n if isinstance(value, OutputBaseVal):\r\n self._layer_data_values[id][data_id][key] = value.get()\r\n else:\r\n self._layer_data_values[id][data_id][key] = value", "title": "" }, { "docid": "3d72d5a9586beafe594290bf3b92d162", "score": "0.5275841", "text": "def __setitem__(self, key: Tuple[int, int], value: float):\n\n self.value[key[0]][key[1]] = value", "title": "" }, { "docid": "2ecb87e3126404ce1f7eb9cd3cbed919", "score": "0.52752364", "text": "def stat_put(self, key: str, value: float) -> None:\n self.stat_dict[key] = value", "title": "" }, { "docid": "e2d777a7e18a10e4b509b59ac5f7436e", "score": "0.52732927", "text": "def set(aMap, key, value):\n bucket = get_bucket(aMap, key)\n i, k, v = get_slot(aMap, key)\n\n if i >= 0:\n # the key exists, replace it\n bucket[i] = (key, value)\n else:\n # the key does not, append to create it\n bucket.append((key, value))", "title": "" }, { "docid": "b3d1b94febc9e55646ebdf194062a6c1", "score": "0.5266557", "text": "def store(self, key, value, expires=None):\n raise NotImplementedError # pragma: no cover", "title": "" }, { "docid": "0c3a6394c8671b716c80ea752577fa09", "score": "0.5259576", "text": "def put(self, key, value):\n hashkey = sum([ord(x) for x in str(key)]) % self.size\n \n if self.map[hashkey] == None:\n self.map[hashkey] = [[key, value]]\n return\n else:\n for pair in self.map[hashkey]:\n if pair[0] == key:\n pair[1] = value\n return\n self.map[hashkey].append([key, value])", "title": "" }, { "docid": "e835a6d7e13ad5f28a3400751d0c06a5", "score": "0.52532655", "text": "def set(self, key, value):\n if self.prefix:\n key = \"{0}{1}\".format(self.prefix, key)\n\n if self.encoder:\n value = self.encoder.dumps(value)\n\n if hasattr(self.storage, 'set'):\n self.storage.set(key, value)\n else:\n self.storage[key] = value", "title": "" }, { "docid": "c51c01a815172e77030136bcec656daf", "score": "0.5251638", "text": "def set(self,key, value):\n self.data[key] = value\n self.save()", "title": "" }, { "docid": "c5fed4b4754e7cbcae195478f85a49ef", "score": "0.52427745", "text": "def __setitem__(self, key, value):\n self._val[key] = value", "title": "" }, { "docid": "dceeb2292c37a64b72d0a2fa25b7bfd3", "score": "0.52420604", "text": "def put(self, key, value):\n hKey=self.mHash(key)\n if self.l[hKey]==None:\n self.l[hKey]=[[key,value]]\n else:\n temp=self.l[hKey]\n for x in temp:\n if x[0]==key:\n x[1]=value\n return\n temp.append([key,value])", "title": "" }, { "docid": "bab1079af45d0d64273d648872d29879", "score": "0.52414733", "text": "def __setitem__(self, key, value):\n if not isinstance(key, Var):\n raise Exception('Keys must be Var objects!')\n self.assignment[key] = value", "title": "" }, { "docid": "403e6fab6b9138a92905ae6f6fb63e82", "score": "0.5237282", "text": "def __setitem__(self, key, value):\n self.data[key] = value", "title": "" }, { "docid": "dfdd67b127c4d41f52ea67f841223038", "score": "0.5230874", "text": "def set(key, value, userid=None, appid=None, if_match=None):", "title": "" }, { "docid": "9a325033438293d7d40671ed0a9c20a4", "score": "0.52288103", "text": "def store(bank, key, data):\n _init_client()\n etcd_key = \"{}/{}/{}\".format(path_prefix, bank, key)\n etcd_tstamp_key = \"{}/{}/{}\".format(path_prefix, bank, key + _tstamp_suffix)\n try:\n value = salt.payload.dumps(data)\n client.write(etcd_key, base64.b64encode(value))\n client.write(etcd_tstamp_key, int(time.time()))\n except Exception as exc: # pylint: disable=broad-except\n raise SaltCacheError(\n \"There was an error writing the key, {}: {}\".format(etcd_key, exc)\n )", "title": "" }, { "docid": "7b254bbd5ffc742663395a9b76cb7358", "score": "0.5226111", "text": "def put(self, key, value, address):\n key_hash = dht_hash(key)\n self.logger.debug(\"Put: %s %s\", key, key_hash)\n\n if (contains(self.predecessor_id,self.identification,key_hash)):\n if (key not in self.keystore): \n self.keystore[key] = value \n \n self.send(address, {\"method\": \"ACK\"})\n else:\n self.send(address, {\"method\": \"NACK\"})\n\n elif (contains(self.identification,self.successor_id,key_hash)):\n self.send(self.successor_addr, {\"method\": \"PUT\", \"args\": {\"key\": key, \"value\": value, \"from\": address}})\n\n else:\n self.send(self.finger_table.find(key_hash), {\"method\": \"PUT\", \"args\": {\"key\": key, \"value\": value, \"from\": address}})", "title": "" }, { "docid": "54ff21d2db3398d588306077c1416340", "score": "0.5212679", "text": "def upsert(key, value):\n \n pass", "title": "" }, { "docid": "dafb6e049abaae9e84714f15e9404da3", "score": "0.5193869", "text": "def __setitem__(self, key, value):\r\n\r\n\t\tkey = dumps(key)\r\n\t\tvalue = dumps(value)\r\n\t\tself.data[key] = value", "title": "" }, { "docid": "dec36a46e7a6ce67fc250dee1e707583", "score": "0.5188595", "text": "def __setitem__(self,key,val):\n path = self.key2path(key)\n quote = urllib.quote(key,'')\n if(not os.path.exists(os.path.join(path,quote))):\n os.makedirs(path)\n open(os.path.join(path,quote),'wb').write(pickle.dumps(val))", "title": "" }, { "docid": "0c54bc317dacfa89d984bd5722e26d96", "score": "0.51679844", "text": "def put(self, key, value):\n bucket = self.get_bucket(key)\n for current in bucket:\n if current[0] == key:\n current[1] = value\n return\n bucket.append([key, value])", "title": "" }, { "docid": "c77bac25b89d218c92d93a4eb3b6007a", "score": "0.5166643", "text": "def store_value(self, key, data, expire_seconds):\n self._store_value(key, data, expire_seconds)", "title": "" }, { "docid": "dea5630cae86e21202d40d731cb4dd59", "score": "0.5162751", "text": "def put(self, key, value):\n # acquire the synchronization lock\n self.lock.acquire()\n # set the value\n self.bridge.put(key, value)\n # release the synchronization lock\n self.lock.release()", "title": "" }, { "docid": "fc019bb7fb0dcf6c3c1611b260297b92", "score": "0.51570785", "text": "def kvSet(self, key, value):\n self.__conn.execute_command('set', key, value)", "title": "" }, { "docid": "7af703b25243559ef5532d19f76f1b05", "score": "0.51486206", "text": "def put(self,_id,key=None,value=None,**pair):\n raise NotImplementedError", "title": "" }, { "docid": "e253ad7dfaf2f73d7fb760b2493e8c2b", "score": "0.514727", "text": "def __setitem__(self, key, value):\n self._cache[key] = value\n state, depth_searched = key\n\n c = self._conn.cursor()\n parameters = {\n \"white\": state.board._white_pieces,\n \"black\": state.board._black_pieces,\n \"turn\": state.turn.value,\n \"depth\": depth_searched,\n \"heuristic\": value\n }\n\n update = \"\"\"\n UPDATE transposition_table\n SET\n depth_searched=:depth,\n heuristic=:heuristic\n WHERE\n white_pieces=:white AND\n black_pieces=:black AND\n turn=:turn;\n \"\"\"\n\n insert = \"\"\"\n INSERT INTO transposition_table\n (white_pieces, black_pieces, turn, depth_searched, heuristic)\n SELECT :white, :black, :turn, :depth, :heuristic\n WHERE (SELECT CHANGES()=0);\n \"\"\"\n\n with self._lock:\n # Update existing item if exists.\n c.execute(update, parameters)\n\n # Insert if no update occurred.\n c.execute(insert, parameters)\n\n self._conn.commit()\n c.close()", "title": "" }, { "docid": "09eff2db9d18d2cc324a5b48f273f3c5", "score": "0.51431084", "text": "def __setitem__(self, key, value):\n self.put(key, value)", "title": "" }, { "docid": "09eff2db9d18d2cc324a5b48f273f3c5", "score": "0.51431084", "text": "def __setitem__(self, key, value):\n self.put(key, value)", "title": "" }, { "docid": "8695cc9bbcac1cd53668649479dd4075", "score": "0.5141547", "text": "def set(key: str, value: str, export: bool=False):\n key_up = key.upper()\n _vars[key_up] = value\n if export:\n export_var(key_up)\n _run_var_hooks(key_up)\n _run_var_hooks(key_up, run_global_hooks=True)", "title": "" }, { "docid": "a72877b1360fe4fe9dac71f82ee9b22d", "score": "0.5137778", "text": "def set(appid, userid, key, value, if_match=None):", "title": "" }, { "docid": "9876171c398d30387f531283dba04e1b", "score": "0.5129124", "text": "def set_key(self, key, value):\n with open(self.path, \"rb\") as f:\n parser = parse(f.read())\n\n parser[\"tool\"][\"commitizen\"][key] = value\n with open(self.path, \"wb\") as f:\n f.write(parser.as_string().encode(self.encoding))\n return self", "title": "" }, { "docid": "44b9064bdc6f236bcfb43c264f4f4ea9", "score": "0.51249236", "text": "def __setitem__(self, key, value):\r\n self.set({key: value})", "title": "" }, { "docid": "a70306b4997c9079622fb5fa7c8d9fc1", "score": "0.5119154", "text": "def put(self, key, value):\n index = self.hash_index(key)", "title": "" }, { "docid": "ba3e6fcdb39b3ddf73b369ca8764f02e", "score": "0.5109948", "text": "def set_parameter(self, key, value):\n self.p[key] = value", "title": "" }, { "docid": "ec0fb4cb1ddf9eaa03f6a62278a27313", "score": "0.5109339", "text": "def set(self, key, value, *args, **kwargs):\n if self.cfg.jsonpickle:\n value = jsonpickle.encode(value)\n return self.conn.set(key, value, *args, **kwargs)", "title": "" }, { "docid": "6fcb1dc16dd04045272f7ad70b438f28", "score": "0.51080644", "text": "def _set(self, key, value):\n raise NotImplementedError", "title": "" }, { "docid": "75dd2b63b7d0ac80d5632dcb82773c5e", "score": "0.51048803", "text": "def set(self, key, val):\n raise NotImplementedError", "title": "" }, { "docid": "0747564d1281f9ae54b5abe772f5c030", "score": "0.5103691", "text": "def __setitem__(self, key, value):\n self._field[self._convert_key(key)] = value", "title": "" }, { "docid": "0db661f6587080c39fcaea2c3e5f523b", "score": "0.5102625", "text": "def lset(self, key, index, value):\n return self.execute_command(\"LSET\", key, index, value)", "title": "" }, { "docid": "2d5f2f7bb5ef041ca282a7d78e33cb3a", "score": "0.5097772", "text": "def set(self, key, value):\r\n pass", "title": "" }, { "docid": "c407eb6421faacb7d967f4fec6f884ac", "score": "0.5095082", "text": "def append_val(self, key, val):\n raise NotImplementedError", "title": "" }, { "docid": "1bb205d9650248701c6b02dd096f09f6", "score": "0.5094953", "text": "def put_func(self, key, func):\n return self.backend_handler.put_object(key, func)", "title": "" }, { "docid": "08a42df39b82e5cc828f23f13867b539", "score": "0.5091089", "text": "def put(self, pool: str, key: str, value: _Any, ttl: int = None) -> _Any:\n self._client.set(self._fqkn(pool, key), _pickle.dumps(value), ttl)\n\n return value", "title": "" }, { "docid": "6ade4f7d18aa60682cec4af9809309d0", "score": "0.5089943", "text": "def __setitem__(self, key, value):\n self.delegate.put_immediate(key, value)", "title": "" }, { "docid": "b4f925022209c165495921edb4dfa789", "score": "0.5086865", "text": "def put(self, key, value):\r\n self.array[key] = value", "title": "" }, { "docid": "17328090b1064080c4d3cdec8530fd1e", "score": "0.5085051", "text": "def __setitem__(\n self,\n key: bytes,\n value: bytes\n ) -> None:\n path = self.absolute_path(key)\n self.container[path] = value", "title": "" }, { "docid": "d7eb0dc5b5c0671ea3fa94f0f4508200", "score": "0.50796574", "text": "def __setitem__(key, value):", "title": "" }, { "docid": "a4913f9d8c1e25fc3a52c77881a6de0f", "score": "0.5079134", "text": "def __setitem__(self, key, value):\n key.__hash__() # See that it's hashable, otherwise it's not a key\n self.redis.zadd(self.name, value + 0.0, self.serializer.dumps(key))", "title": "" }, { "docid": "48a64cd35d0ec070161a741bc1cb2c5c", "score": "0.5062456", "text": "def put(key, value):\n if oob_chars.match(key):\n raise DiscoError(\"OOB key contains invalid characters (%s)\" % key)\n if value is not None:\n file(Task.oob_file(key), 'w').write(value)\n OOBData(key, Task)", "title": "" }, { "docid": "4435e6779bc6768e60fe1c3fbf5da741", "score": "0.50612134", "text": "def Set(self, key, value):\n self._parser.set(self._PREF_SECTION, key, value)", "title": "" }, { "docid": "f22e6d595ab65a510c285ca174268aaf", "score": "0.50482297", "text": "def setOverlay(self, overlay):\n overlay = bool(overlay)\n if overlay != self._overlay:\n self._overlay = overlay\n self._updated(ItemChangedType.OVERLAY)", "title": "" }, { "docid": "b0f97703ce7bf2dc7be27b5dac7303e3", "score": "0.50471944", "text": "def __setitem__(self, key, data):\n return self.put(key, data)", "title": "" }, { "docid": "d3a64fc723c1168eef842835a93de9e7", "score": "0.5042733", "text": "def assign(self, key, value):\n current_value = self.get(key)\n if current_value == value:\n return\n self.__update_num_equal_to(current_value, value)\n self.__update_current_transaction(key, current_value)\n self.entries[key] = value", "title": "" }, { "docid": "12cc9703d481c6b04ee35e9774c389bb", "score": "0.5029738", "text": "def put(self, key: int, value: int) -> None:\n hashKey = key % self.hashValue\n self.Map[hashKey].update(key,value)", "title": "" }, { "docid": "50e1ce072e5fb1c934c56196fe92447b", "score": "0.50205773", "text": "def _append_to(self, key, value, container):\n\n if key in container:\n container[key].append(value)\n else:\n container[key] = [value]", "title": "" }, { "docid": "82fa13098d20678d41c5efc2c8a54a72", "score": "0.50169486", "text": "def put(self, key, val):\n if key == None:\n raise Exception(\"key is None\")\n if val == None:\n self.delete(key)\n return None\n self.root = self.__put(self.root, key, val)", "title": "" }, { "docid": "59161a5c408d06788b64c8bb18300cf3", "score": "0.5016568", "text": "def set(self, key, value):\n\n command = 'SET {0} {1}'.format(key, value)\n res, output = self.exec_command(command)\n return res", "title": "" }, { "docid": "54980e60b4713575787ed6c6e566b12d", "score": "0.5015761", "text": "def record(self, key: str, value: str):\n log.debug(f'Recording key {key} value {value}')\n self.kv_dict[key] = value", "title": "" }, { "docid": "cf884ba57cd1d1cd26e71307fd9c647a", "score": "0.50105906", "text": "def __setitem__(self, key, value):\n candidate_place = self.linear_probe(key)\n if self.array[candidate_place] is None:\n self.count += 1\n self.array[candidate_place] = kv_pair(key, value)", "title": "" }, { "docid": "31b757dc62c990ef1816d1e0a4cba14e", "score": "0.5004684", "text": "def __setitem__(self, key, value):\n self.add(key, value, cleanup=None)", "title": "" }, { "docid": "3f26c71b31eec3a6087c5e6960c2cdc0", "score": "0.5002628", "text": "def put(self, key, value):\n if not self.exist(key):\n index = self._hash(key)\n self._slots[index].append((key,value))\n self._len += 1\n if self._len > self._size:\n self._expand()", "title": "" }, { "docid": "2f50c07322fe21432a121a46947ecfd2", "score": "0.5001709", "text": "def set(self, key, value):\n # we have to load the existing data because all of it is\n # serialized into 1 json'ed dict when we store it again\n self.read()\n self.data[key] = value", "title": "" }, { "docid": "876e436290c2d393f1112a4fa19d84e3", "score": "0.50016356", "text": "def append_to(self, key, value):\n # find base\n if isinstance(key, (list, tuple)):\n base = self.store\n for key_part in key[:-1]:\n if key_part not in base:\n # XXX - TODO: - wtf to do here?\n if False:#create_base:\n base[key_part] = {}\n else:\n raise Exception('key_part not in base: {}'.format(key))\n base = base[key_part]\n key = key[-1]\n else:\n base = self.store\n\n base[key].append(value)\n\n self.save()", "title": "" }, { "docid": "066ef2b0ba1150d1125da4e7afaf2d4d", "score": "0.49980336", "text": "def __setitem__(self, k, v):\n self.db[k] = UTSubsys.from_value(v)", "title": "" }, { "docid": "14db03fe01a70b9018b6c731a2555bc3", "score": "0.49929792", "text": "def __setitem__(self, key: Any, value: Any) -> None:\n self._rbt.insert(key=key, data=value)", "title": "" }, { "docid": "08301fc4754315db32e033b2cdccd801", "score": "0.49825755", "text": "def set(self, key, val):\n if not isinstance(key, (str)):\n raise TypeError\n\n bucket = self._storage[self._hash(key)]\n key_found = False\n for index, (t_key, t_val) in enumerate(bucket):\n if key == t_key:\n # if the key is already in the bucket update\n self._storage[self._hash(key)][index] = (key, val)\n key_found = True\n break\n if key_found is False:\n self._storage[self._hash(key)].append((key, val))", "title": "" }, { "docid": "a5c6320de184c94171539431c1b7264c", "score": "0.49797976", "text": "def put(self, key, value):\n self._assert_open()\n key = _to_bytes(key, \"key\")\n value = _to_bytes(value, \"value\")\n _logwriter_put(self._log, len(key), key, len(value), value)", "title": "" }, { "docid": "ef6528d0d7dc9df930f5bcb944d78297", "score": "0.4977101", "text": "def set_layer(self, id, value):\r\n if isinstance(value, LayerBaseVal):\r\n self._layer_values[id] = value.get()\r\n else:\r\n self._layer_values[id] = value", "title": "" }, { "docid": "bd6e0d070a4316d67fe79363df130916", "score": "0.49758032", "text": "def add_key(key, value):\n with shelve.open(config.storage_namekv) as db:\n db[str(key)] = value", "title": "" }, { "docid": "88c6c9a42cc775214cd253a28df3cb47", "score": "0.49735078", "text": "def put(self, key, value):\r\n if self.__getitem__(key) is None:\r\n self._data[key] = [value]\r\n else:\r\n self._data[key].append(value)", "title": "" }, { "docid": "9686ae7983e2707a1a999bbc7a6d98b1", "score": "0.49718443", "text": "def __setitem__(self, key: KT, value: VT):\n ...", "title": "" }, { "docid": "c97397a19faacb18b90ef4e96bbad612", "score": "0.49684113", "text": "def set(\n self,\n key: bytes,\n value: bytes,\n *,\n store_batch=None\n ) -> None:\n path = self.absolute_path(key)\n self.container.set(path, value, store_batch=store_batch)", "title": "" }, { "docid": "5c686fc25fb4405d73df0d1791e9e52d", "score": "0.49668607", "text": "def set(key, val):\n CopyBuffer._copy_buffer[key] = val", "title": "" }, { "docid": "3cd999c598b02a7ba464dd9498ab2c2c", "score": "0.4965663", "text": "def set_val(self, key, val):\n raise NotImplementedError", "title": "" }, { "docid": "bd309ff18106834debe8cdf7ea42c672", "score": "0.49655473", "text": "def addData(self, key, datum):\n self.data[key] = datum", "title": "" } ]
a406a3ccb0968cd4c5d2af4f392707c4
Get the first element returned by parse_baseline.
[ { "docid": "576c1609710a86c974ee0caefe1fb4c0", "score": "0.7085118", "text": "def get_baseline_0(row: element.Tag) -> Optional[str]:\n try:\n return parse_baseline(row)[0].strip()\n except Exception as e:\n logger.debug(\"Function get_baseline_0 for row %s : %s\", row, e)\n return None", "title": "" } ]
[ { "docid": "710e2239fe3c5649e4a0b2809f187014", "score": "0.7128509", "text": "def get_baseline_1(row: element.Tag) -> Optional[str]:\n try:\n return parse_baseline(row)[1].strip()\n except Exception as e:\n logger.debug(\"Function get_baseline_1 for row %s : %s\", row, e)\n return None", "title": "" }, { "docid": "09e3cd8e5a21cbebfec053b87ad51dd6", "score": "0.69373304", "text": "def first(self):\n\t\tif self.begin is None:\n\t\t\treturn None\n\t\telse:\n\t\t\treturn self.begin.value", "title": "" }, { "docid": "6c6aedef0b2e964760d7cfb229d29cee", "score": "0.6690186", "text": "def get_first(self):\n return self.get_at(0)", "title": "" }, { "docid": "8ee1df0228dc420c8c7623dfe8552ebc", "score": "0.65878475", "text": "def get_baseline_2(row: element.Tag) -> Optional[str]:\n try:\n return parse_baseline(row)[2].strip()\n except Exception as e:\n logger.debug(\"Function get_baseline_2 for row %s : %s\", row, e)\n return None", "title": "" }, { "docid": "cccfea22eae54879888b84fadf072483", "score": "0.64580125", "text": "def first_value(self):\n return self.data[self.head]", "title": "" }, { "docid": "3d28ed5b12e039dd6d386fbdc4415314", "score": "0.64490837", "text": "def first_start(self):\n return min([start for start, end in self.spans])", "title": "" }, { "docid": "9e6cb49a12ac8689d22ae1b2b5f21f1f", "score": "0.6427783", "text": "def first(self):\n return self.samples[0]", "title": "" }, { "docid": "1543e0b769f2adc4b1b39d924d28c2f5", "score": "0.63309765", "text": "def first(self):\n result = super().first()\n try:\n if len(result) == 1:\n return result[0]\n except:\n return result", "title": "" }, { "docid": "f2dc5cfb108b5d7017fdc2a3c402d2d1", "score": "0.6327398", "text": "def get_first(self):\n if self.is_empty():\n return None\n else:\n return self.__header.get_next()", "title": "" }, { "docid": "a1df24123fcf3340b76b9446ed292e70", "score": "0.6306911", "text": "def peek_first(self):\n _, first_batch = self._ensure_first_batch()\n return first_batch[0]", "title": "" }, { "docid": "8c43e400388094ab9867eb29cd327a98", "score": "0.62805206", "text": "def parse_baseline(row: element.Tag) -> Optional[List[str]]:\n try:\n return (\n row.find(\"p\", {\"class\": \"elco-baseline\"})\n .text.replace(\"\\n\", \"\")\n .replace(\"\\t\", \"\")\n .split(\".\")\n )\n except Exception as e:\n logger.debug(\"Function parse_baseline for row %s : %s\", row, e)\n return None", "title": "" }, { "docid": "e901a08aa2c579af18dc2ab201aa04ec", "score": "0.6251123", "text": "def first(self):\n return self.tagslice(0, 1)", "title": "" }, { "docid": "50d60eeabda6e077db3143ee60fbfe57", "score": "0.62467295", "text": "def peek(self):\n if self.arr:\n return self.arr[0]", "title": "" }, { "docid": "0a71bf088ee4b1d831ecd09005df0a26", "score": "0.6221597", "text": "def first(self):\n if self.items:\n return self.items[0]", "title": "" }, { "docid": "e0b87006d597938e4968186fe563221a", "score": "0.6214531", "text": "def first (self) :\n try :\n return first (self)\n except IndexError :\n return None", "title": "" }, { "docid": "8fd6f38c13d5b38a54ca736e9baddd79", "score": "0.620454", "text": "def get_head(self):\n return self._segments[0]", "title": "" }, { "docid": "26578e7177ced2575a621abca3ba1c5a", "score": "0.6141231", "text": "def get_baseline_sample(self):\n parameters = self.get_parameters()\n sample = {}\n for p in parameters:\n baseline = p.baseline\n if baseline is None: raise RuntimeError(f'{p} has no baseline value') \n sample[p.index] = baseline\n return pd.Series(sample)", "title": "" }, { "docid": "75c51afce433f4751dd1b5d772e96407", "score": "0.60780454", "text": "def first(self) -> pa.Array:\n return ListChunk.element(self, 0)", "title": "" }, { "docid": "a18f6639a018e2992f08d56eed1181b7", "score": "0.6074708", "text": "def peek(self):\n return self._ary.get_first()", "title": "" }, { "docid": "f16687844530e14fb90802a4ca887307", "score": "0.6073662", "text": "def first(self):\n return next(self.data(sorted_by='energy', name='Sample'))", "title": "" }, { "docid": "729418649c95585555e6ae3daa6cc977", "score": "0.6068123", "text": "def first(self):\n if self.head == None:\n return None\n else:\n return self.head.value", "title": "" }, { "docid": "aa165978eb8b5401e45b7fbc97f671c8", "score": "0.6058903", "text": "def first(self):\n return self._data.first()", "title": "" }, { "docid": "4d453624174a9bd37ffac980b804e1c2", "score": "0.6058266", "text": "def first(self):\n\n if self.head is None:\n return None\n return self.head.value", "title": "" }, { "docid": "6fa3b694fe0c53722080cda676958203", "score": "0.604163", "text": "def _first_result(self):\n try:\n return self.response['items'][0]\n except KeyError:\n return None", "title": "" }, { "docid": "50e1b3b129b0c040f27142cdfdc09fbc", "score": "0.60217494", "text": "def first_value(self):\n if self.filled:\n return next(iter(self))\n else:\n return None", "title": "" }, { "docid": "ae6ba243339a8e88f1bd6ac8e88a5743", "score": "0.60005623", "text": "def getStart(self):\n return self.val[0]", "title": "" }, { "docid": "ccea9e27e8f628714c805d278817b173", "score": "0.5999604", "text": "def top(self): #3 DONE\n if self.is_empty():\n return None\n else:\n return self.array [0][1]", "title": "" }, { "docid": "a32ef3a74bdd265d38db40fabd5af987", "score": "0.5983396", "text": "def peek_first_item(self):\n if self.root is None:\n raise ListEmptyError()\n else:\n return self.root.data", "title": "" }, { "docid": "243c873fbe2f07d98465b04d581fa9a9", "score": "0.59776497", "text": "def first(a):\n return a[0] if a else None", "title": "" }, { "docid": "d258eb4ef890c245b5b4e0d829ca5180", "score": "0.59640396", "text": "def first(self):\n return self._first", "title": "" }, { "docid": "a2f64342281ef369fd328b86f69ebe4c", "score": "0.5946894", "text": "def peek(self):\n if not self._array:\n return None\n return self._array[0]", "title": "" }, { "docid": "2ad20d2e89d1a2ef5fcf9fedd1745ced", "score": "0.5941803", "text": "def first():", "title": "" }, { "docid": "cc54142dda64eaf68e83b9d0aa61aa3d", "score": "0.59395105", "text": "def first(self):\n raise NotImplementedError()", "title": "" }, { "docid": "ac8d807386bbcc59cc22facc3dd2c3cb", "score": "0.5925241", "text": "def peek(self):\n assert(len(self) > 0)\n return self._arr[0]", "title": "" }, { "docid": "7973cf1e6b1cd896bb604dac6eb4ec20", "score": "0.59048754", "text": "def first(self):\n for i in self:\n return i", "title": "" }, { "docid": "d75f68e26743cda29385f08d84992194", "score": "0.5902272", "text": "def first(self):\n return self._first", "title": "" }, { "docid": "4395c865d76d0376246ad74be9ceb5b0", "score": "0.58983415", "text": "def first(self):\n if self.is_empty():\n raise Empty('stack is empty')\n return self._head._element", "title": "" }, { "docid": "cbdf1ed2e64b5fd1159d281e51cbd182", "score": "0.5894874", "text": "def getFirstElement():\n return list(Board)[0].value", "title": "" }, { "docid": "90d089c1a2f02ca01506d9af7195684a", "score": "0.5884135", "text": "def get_first_item(li):\n return li[0]", "title": "" }, { "docid": "cb6e86372009185e4065f90f2e287e17", "score": "0.58828145", "text": "def get_first(self):\n return self.left[0]", "title": "" }, { "docid": "402fdcdf3a1a51c9f6493ac84edc882a", "score": "0.58825946", "text": "def head(self):\n if self._count == 0: raise IndexError\n return self._array[self._head]", "title": "" }, { "docid": "ca22da283ece1bec87a0dfc535774977", "score": "0.5876176", "text": "def first(xs):\n return xs[0]", "title": "" }, { "docid": "bbeb9eb0d9e80fb6e93204bb6df16332", "score": "0.5870784", "text": "def first_value(self) -> float:\n return float(self[0])", "title": "" }, { "docid": "f50c529701806100d1d6eafb7a817e2c", "score": "0.58650196", "text": "def first(items):\n return items[0]", "title": "" }, { "docid": "6a87ce647397f90f84dabe6167364c9e", "score": "0.5859272", "text": "def peek(self):\n return self.s1[-1]", "title": "" }, { "docid": "99be31846bc4e7a88526ffe6659c4d2b", "score": "0.58343756", "text": "def first(self):\n pass", "title": "" }, { "docid": "225ce9407f776cd3071d663e9f4c3f6a", "score": "0.57952803", "text": "def top(self):\n if len(self.elements)>0:\n return self.elements[0]\n return None", "title": "" }, { "docid": "a80c701530a022be011b2d9f22026cd4", "score": "0.57723725", "text": "def GetFirstItem(self):\n pass", "title": "" }, { "docid": "5417dfe7e6a511f232ad0190f12278cd", "score": "0.5737916", "text": "def find_baseline(y, avgnum=50):\n \n diffs = np.zeros(len(y))\n for i in range(len(y)-1):\n for k in range(avgnum):\n if k == 0:\n total_diff = y[i]-y[i-1]\n else:\n new_diff = y[i-k] - y[i-k-1]\n total_diff = total_diff + new_diff\n avg_diff = total_diff/avgnum\n diffs[i+1] = avg_diff\n \n i_min = np.argmin(diffs[diffs!=0])\n # print(i_min, y[i_min])\n return y[i_min]", "title": "" }, { "docid": "48208c111c2478bd56c7efd189d6a341", "score": "0.5715695", "text": "def first(self, lignore=DEFAULT_IGNORE):\n return self.firstOfNotType(lignore)", "title": "" }, { "docid": "8a018fd2694c8a380b7b1d8db92a4f82", "score": "0.57063156", "text": "def first(self) -> Annotation:\n\n return self._topmost", "title": "" }, { "docid": "ef55d304bf561257ea949f89bf511514", "score": "0.57037294", "text": "def peek(self):\n if self.entries:\n return self.entries[0]", "title": "" }, { "docid": "f6b3ebb79533847458a7824b177aeafd", "score": "0.5678208", "text": "def peek(self):\n return self.st2[-1]", "title": "" }, { "docid": "624108646662ebe5cd0327e5bedd53f0", "score": "0.5677838", "text": "def peek(self):\n return self.data[0]", "title": "" }, { "docid": "497c41ded544ff4ed23d976c93a62a47", "score": "0.5668815", "text": "def best(self):\r\n if self:\r\n return self[0][0]", "title": "" }, { "docid": "497c41ded544ff4ed23d976c93a62a47", "score": "0.5668815", "text": "def best(self):\r\n if self:\r\n return self[0][0]", "title": "" }, { "docid": "3894947f588993f5628f6aea4b34b4d3", "score": "0.5664863", "text": "def _first (self):\n raise NotImplementedError('%s.first' % (type(self).__name__,))", "title": "" }, { "docid": "3444b20be014005c975d133e068bc92c", "score": "0.5662909", "text": "def get_iaq_baseline(self):\n return self.sgp.get_iaq_baseline()", "title": "" }, { "docid": "2b4f5f54b0115086274410367b347a1a", "score": "0.5657674", "text": "def get_start(self):\n if len(self.values.index) == 1:\n self.start_agg = None\n return self.values[0]\n elif isinstance(self.start_agg, str):\n self.start_agg = stats[self.start_agg]()\n\n self.start_agg.set_data(self.values)\n return self.start_agg.value", "title": "" }, { "docid": "439ae5978b1482e10529e59d998e52ea", "score": "0.5657543", "text": "def get_begin(self):\n\n self.__check_data()\n return [i[1] for i in self._data]", "title": "" }, { "docid": "b2f2c2a958e62239a2a69a9bf27fc87e", "score": "0.56542677", "text": "def get_baseline_model_spec(\n eval_config: config.EvalConfig) -> Optional[config.ModelSpec]:\n for spec in eval_config.model_specs:\n if spec.is_baseline:\n return spec\n return None", "title": "" }, { "docid": "c05f9916b6ea533dabbe1bc51aa05db6", "score": "0.56493324", "text": "def peek(self):\r\n assert not self.is_empty()\r\n return self.items[0]", "title": "" }, { "docid": "9e75f44b494aa02afc2d6a999ece3c6f", "score": "0.56241566", "text": "def __get_first (self):\n if self.__first is None:\n self.__first = frozenset(self._first())\n return self.__first", "title": "" }, { "docid": "08b73e1506a58f29bb4f96d10af7032f", "score": "0.56206554", "text": "def baseline_data(self):\n if self._baseline_data is None:\n data = [self['primary'].time_slice(start,stop).data for start,stop in self.baseline_regions]\n if len(data) == 0:\n data = np.empty(0, dtype=self['primary'].data.dtype)\n else:\n data = np.concatenate(data)\n data = data[np.isfinite(data)]\n self._baseline_data = TSeries(data, sample_rate=self['primary'].sample_rate, recording=self)\n return self._baseline_data", "title": "" }, { "docid": "5748b0f41a9af05aa1137aaa09dc7314", "score": "0.5619834", "text": "def peek(self):\r\n return self._items[0]", "title": "" }, { "docid": "ef6ffe3a116a4493fff32a937f72c9fd", "score": "0.56180036", "text": "def get_first_item(self):\n if self.root is None:\n raise ListEmptyError()\n else:\n data = self.root.data\n self.root = self.root.next_node\n return data", "title": "" }, { "docid": "60206c25cd2312175b12cd11d3a01455", "score": "0.5617048", "text": "def get_element_start(self, element, group):\n return self.element_start_end[element][group][0]", "title": "" }, { "docid": "3c09fa9060204cab0afbcf8903214213", "score": "0.5610974", "text": "def peek(self):\n return self.items[0]", "title": "" }, { "docid": "77699ba5ef9ccb71b0f7717ee189ee36", "score": "0.5609731", "text": "def earliestValue(self):\n return self._values[0]", "title": "" }, { "docid": "1d561ada4ff36dc29bbb7fd5035a2eca", "score": "0.5607155", "text": "def peek(self):\r\n if not self.is_empty():\r\n return self.items[0]\r\n return None", "title": "" }, { "docid": "605e84399244790c7c7592b844bed4bd", "score": "0.5601736", "text": "def first_child(self):\n\t\tif self.children:\n\t\t\treturn self.children[0]", "title": "" }, { "docid": "fd348f4abdd84b17401906971c819581", "score": "0.55924374", "text": "def peek(self):\n if self.is_empty(): # self is empty then raise index error\n raise IndexError\n item = self.chart[0] # first item(highest priority)\n return item # return that highest priority item", "title": "" }, { "docid": "46eb89a850e228b71708ba360c245008", "score": "0.55891573", "text": "def extract_single_top(data):\n if(type(data)==np.ndarray):\n return data[0]\n return data", "title": "" }, { "docid": "d7b0b99dff1faaba4a562b4d355dde6e", "score": "0.55810744", "text": "def get_first_list(array):\n return array[0] if array else None", "title": "" }, { "docid": "162928354730efa0af9cb4fbab01986c", "score": "0.55448395", "text": "def first(self, func=None) -> Any:\n if func is not None:\n return self.where(func).element_at(0)\n return self.element_at(0)", "title": "" }, { "docid": "af32b57eec01366d39566e92f04cf3d7", "score": "0.5537539", "text": "def takeFirst(n):\r\n return n[0]", "title": "" }, { "docid": "e28833ed5a21c8df37ee44e0d2787ac6", "score": "0.553361", "text": "def lowest(self):\n if not self:\n return None\n else:\n return self[0].lowest()", "title": "" }, { "docid": "74ee04ad50b87560170ca9912516b732", "score": "0.55305934", "text": "def peek(self) -> int:\n return self.st1[-1]", "title": "" }, { "docid": "f719b494fde5f9412bc31a9efe1ea8c8", "score": "0.5530234", "text": "def head(self):\n if self._count == 0: raise IndexError\n return self._list.head.datum", "title": "" }, { "docid": "a852e6aa63cc722bc8a28cf402136432", "score": "0.5524465", "text": "def get_first(self, history) :\n idx = history[['Time']].idxmin()[0]\n return idx, history['Address'][idx]", "title": "" }, { "docid": "31d0ead2fd3fb4067caeabd846e453be", "score": "0.5521085", "text": "def top(self):\n return self.b[0]", "title": "" }, { "docid": "cfe7c9cc8a3af824d3a981980bc4b80a", "score": "0.5519291", "text": "def front(self):\n if self.is_empty():\n return None\n return self.list[0]", "title": "" }, { "docid": "55e2b486f7f0f5b41bbdee9749c7f840", "score": "0.5506317", "text": "def first(val):\n return Use(lib.LLVMGetFirstUse(val))", "title": "" }, { "docid": "cab93da12484e26da7d53e2698f852c1", "score": "0.5496607", "text": "def peek(self):\r\n \r\n if self.items:\r\n return self.items[-1]\r\n return None", "title": "" }, { "docid": "34a0985b7033a00c3b17d23e8b7e60f2", "score": "0.5495288", "text": "def first(self):\n with open(self.path, 'r') as handle:\n for line in handle: return line", "title": "" }, { "docid": "58ffc742057d68b3fc482ed6a89f7d44", "score": "0.5493607", "text": "def peek(self):\n if not self.items:\n return None\n return self.items[-1]", "title": "" }, { "docid": "58ffc742057d68b3fc482ed6a89f7d44", "score": "0.5493607", "text": "def peek(self):\n if not self.items:\n return None\n return self.items[-1]", "title": "" }, { "docid": "58ffc742057d68b3fc482ed6a89f7d44", "score": "0.5493607", "text": "def peek(self):\n if not self.items:\n return None\n return self.items[-1]", "title": "" }, { "docid": "58ffc742057d68b3fc482ed6a89f7d44", "score": "0.5493607", "text": "def peek(self):\n if not self.items:\n return None\n return self.items[-1]", "title": "" }, { "docid": "cadbf5dba90363b792d733b91fec4d4f", "score": "0.54918784", "text": "def first(self):\n if self.is_empty():\n raise Exception('The queue is empty')\n return self._head._element", "title": "" }, { "docid": "6b08a73e9b812f0dfcf164e2f4abd5ff", "score": "0.5490557", "text": "def first(self):\n\n if self.root is None:\n raise KeyError\n\n for e in self:\n return e", "title": "" }, { "docid": "c24e76b062a03b290d37d252ec74ad98", "score": "0.5479485", "text": "def peek(self):\r\n try:\r\n return self._lines[self._line_index + 1]\r\n except IndexError:\r\n return None", "title": "" }, { "docid": "5f3de4f951ff0b74a7a810651823545f", "score": "0.5467871", "text": "def top(self):\n return self._first()", "title": "" }, { "docid": "87912d58715081f27a0ca5dc582efea1", "score": "0.5465797", "text": "def get_from_start(self, span_begin):\n return self.start_map[span_begin]", "title": "" }, { "docid": "26aa1655dc02f5068620a230b8c47d95", "score": "0.5459693", "text": "def get_first(self):\n if self.is_empty():\n raise NoSuchNodeException()\n\n return self.head.data", "title": "" }, { "docid": "f6a573c1943524fd53f2aa17ee308a9a", "score": "0.54583293", "text": "def top(self):\r\n if self.is_empty():\r\n return None\r\n return self.data[self.size - 1]", "title": "" }, { "docid": "c1af6037c6552656e0519c0b0ebf4bd7", "score": "0.5454799", "text": "def first(s):\r\n return s[0]", "title": "" }, { "docid": "d1868f09516134151e69a84ede018b3a", "score": "0.5454548", "text": "def peek(self):\n if self.items:\n return self.items[-1]\n return None", "title": "" }, { "docid": "df0beea4562d348faf9b8db780b23666", "score": "0.5449001", "text": "def peek(self):\r\n assert not self.is_empty()\r\n return self.items[-1]", "title": "" }, { "docid": "ec70f6608e038fc51f879eff146196c9", "score": "0.5442174", "text": "def baseline_id(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"baseline_id\")", "title": "" } ]
958dc260041318e94a7d2992e82f4966
Either pass name or enum itself, e.g `"local"` or `ChannelPicker.local`, or a list of one or more of those, e.g. `["national", "local"]`. If `remove_variants` is True, only keep the canonical form (the last) where there are multiple entries with the same title. Exclude the children's channel "CBeebies" (does not parse to catalogue for some reason and is not the same type as the other national channels anyway).
[ { "docid": "66f31fb67bbbdca81de26329cd0c585a", "score": "0.4037361", "text": "def keys_by_category(cls, category, remove_variants=False, remove_cbeebies=True):\n keys = []\n if isinstance(category, list):\n for c in category:\n keys.extend(cls.keys_by_category(c, remove_variants=remove_variants))\n remove_variants = False # ensure not to re-remove at the end\n else:\n if isinstance(category, str): # name e.g. \"local\"\n category = cls[category]\n keys.extend(sorted([*category.value.__members__]))\n if remove_variants:\n keys = cls.remove_variants(keys, sort=True)\n if remove_cbeebies:\n if \"cr\" in keys:\n keys.remove(\"cr\")\n return keys", "title": "" } ]
[ { "docid": "7e995dec9689c3c0d5f15c1a0d2c4a80", "score": "0.49973446", "text": "def filter_releases_by_channel(releases: List[dict], channel: str):\n if channel not in [\"stable\", \"beta\", \"alpha\"]:\n raise ValueError(\"Channel must be one of 'stable', 'beta', 'alpha'\")\n\n if channel == \"stable\":\n return [r for r in releases if \"alpha\" not in r[\"name\"] and \"beta\" not in r[\"name\"]]\n\n elif channel == \"beta\":\n return [r for r in releases if \"alpha\" not in r[\"name\"]]\n\n return list(releases)", "title": "" }, { "docid": "0900e3b1675e2b414f12c8a8bcbd05d2", "score": "0.4835335", "text": "def filter(self, btype='', name=''):\n filtered = []\n for x in self.children:\n if name and isinstance(x, Key) and x.name == name:\n filtered.append(x)\n elif isinstance(x, Container) and x.__class__.__name__ == btype \\\n and x.value == name:\n filtered.append(x)\n elif not name and btype and x.__class__.__name__ == btype:\n filtered.append(x)\n return filtered", "title": "" }, { "docid": "0900e3b1675e2b414f12c8a8bcbd05d2", "score": "0.4835335", "text": "def filter(self, btype='', name=''):\n filtered = []\n for x in self.children:\n if name and isinstance(x, Key) and x.name == name:\n filtered.append(x)\n elif isinstance(x, Container) and x.__class__.__name__ == btype \\\n and x.value == name:\n filtered.append(x)\n elif not name and btype and x.__class__.__name__ == btype:\n filtered.append(x)\n return filtered", "title": "" }, { "docid": "df07b44c4c32bf17265a9bcf31583157", "score": "0.47789168", "text": "def clean_genre_category(soup_obj):\n genres = [get_string(e) for e in soup_obj.find_all(\"a\")]\n if not genres:\n assert get_string(soup_obj).strip() == \"N/A\"\n genres = genres[:-1] # Drop the \"see more thing\")\n return(genres)", "title": "" }, { "docid": "4195ddb6c85d5f891084c548ed988b88", "score": "0.46707535", "text": "def test_filter_de_novos(self):\n \n # make a family without parents\n family = Family(\"fam_id\")\n child_gender = \"female\"\n family.add_child('child_id', 'mother_id', 'father_id', child_gender, '2', 'child_path')\n \n # set up an autosomal variant\n gender = \"M\"\n args = [\"1\", \"100\", \".\", \"T\", \"G\", \"1000\", \"PASS\", \".\", \"GT\", \"0/1\", gender]\n child_var = SNV(*args)\n \n # combine the variant into a list of TrioGenotypes\n child_vars = [child_var]\n mother_vars = []\n father_vars = []\n trio_variants = combine_trio_variants(family, child_vars, mother_vars, father_vars)\n \n # check that vars without parents get passed through automatically\n self.assertEqual(filter_de_novos(trio_variants, 0.9), trio_variants)\n \n # now add parents to the family\n family.add_mother(\"mother_id\", '0', '0', 'female', '1', \"mother_vcf_path\")\n family.add_father(\"father_id\", '0', '0', 'male', '1', \"father_vcf_path\")\n family = family\n \n # re-generate the variants list now that parents have been included\n trio_variants = combine_trio_variants(family, child_vars, mother_vars, father_vars)\n \n # check that vars with parents, and that appear to be de novo are\n # filtered out\n self.assertEqual(filter_de_novos(trio_variants, 0.9), [])\n \n # check that vars with parents, but which are not de novo, are retained\n mother_vars = child_vars\n trio_variants = combine_trio_variants(family, child_vars, mother_vars, father_vars)\n \n self.assertEqual(filter_de_novos(trio_variants, 0.9), trio_variants)", "title": "" }, { "docid": "d718efedb4de38e37700d8b720a0c5e4", "score": "0.4625562", "text": "def filter_variants(df):\n \n waddell_list = ['missense_variant',\n 'stop_gained',\n 'frameshift_variant',\n 'splice_acceptor_variant',\n 'splice_donor_variant',\n 'start_lost',\n 'inframe_deletion',\n 'inframe_insertion',\n 'stop_lost']\n \n return df[df['One_Consequence'].isin(waddell_list)]", "title": "" }, { "docid": "598315fe032a4a7509acb1deffdf7cf3", "score": "0.46117678", "text": "def filter_variants(self, variants):\n \n # if we have flagged CNVs on three different chroms, drop all CNVs,\n # since the sample is sufficiently anomalous\n# if self.count_cnv_chroms(variants) > 2:\n# variants = self.remove_cnvs(variants)\n \n # and filter by a lower MAF threshold\n\n variants = self.filter_by_maf(variants)\n variants = self.filter_polyphen(variants)\n variants = self.filter_exac(variants)\n \n return variants", "title": "" }, { "docid": "fad45bd6306310c98f24b319f6ea877d", "score": "0.4594125", "text": "def prune(self, keep_channels=True, *, verbose=True):\n for v in self.variables:\n for var in wt_kit.flatten_list([ax.variables for ax in self._axes + self._constants]):\n if v == var:\n break\n else:\n self.remove_variable(v.natural_name, implied=False, verbose=verbose)\n if keep_channels is not True:\n try:\n if isinstance(keep_channels, str):\n raise TypeError\n indexes = tuple(keep_channels)\n except TypeError:\n indexes = (keep_channels,)\n\n for i, ch in enumerate(self.channels):\n if i not in indexes and not ch.natural_name in indexes:\n self.remove_channel(ch.natural_name, verbose=verbose)", "title": "" }, { "docid": "4196db0a2bf1fcb8eac78a80527e505c", "score": "0.45561054", "text": "def _prune(self, name_parts, content):\n if not self.prune:\n return content\n for ep_filter in FILTERS:\n ep_filter_parts = ep_filter.split(\"/\")\n if len(name_parts) != len(ep_filter_parts):\n continue\n for filter_part, part in zip(ep_filter_parts, name_parts):\n if filter_part not in (\"*\", part):\n break\n else:\n for pth in FILTERS[ep_filter]:\n if isinstance(content, list):\n for o in content:\n pop_matches(pth, o)\n else:\n pop_matches(pth, content)\n return content", "title": "" }, { "docid": "9f81cf1db570b4791444355f08a3fd43", "score": "0.4537968", "text": "def filter_variants(job, config, name, input_vcf):\n\n output_vcf = \"{}.filtered.vcf\".format(name)\n filter_log = \"{}.variantfiltration.log\".format(name)\n\n filter_command = [\"{}\".format(config['gatk-filter']['bin']),\n \"-T\",\n \"VariantFiltration\",\n \"-R\",\n \"{}\".format(config['reference']),\n \"--filterExpression\",\n \"'MQ0 > {}'\".format(config['mq0_threshold']),\n \"--filterName\",\n \"'HighMQ0'\",\n \"--filterExpression\",\n \"'DP < {}'\".format(config['coverage_threshold']),\n \"--filterName\",\n \"'LowDepth'\",\n \"--filterExpression\",\n \"'QUAL < {}'\".format(config['var_qual_threshold']),\n \"--filterName\",\n \"'LowQual'\",\n \"--filterExpression\",\n \"'MQ < {}'\".format(config['map_qual_threshold']),\n \"--filterName\",\n \"'LowMappingQual'\",\n \"--variant\",\n \"{}\".format(input_vcf),\n \"-o\",\n \"{}\".format(output_vcf)]\n\n job.fileStore.logToMaster(\"GATK VariantFiltration Command: {}\\n\".format(filter_command))\n pipeline.run_and_log_command(\" \".join(filter_command), filter_log)\n\n return output_vcf", "title": "" }, { "docid": "1c733603bdac2925ff9dd6187c7dc270", "score": "0.448448", "text": "def remove(x, amount): # TO-DO: ONSET HANDLING (DO WE WANT IT TO NORMALIZE TO STARTING AT ZERO?)\n if amount<=0:\n return x # nothing to remove!\n #elif (x.__class__.__name__ == 'Music'):\n # remove(x.tree, amount)\n # return x\n elif (x.__class__.__name__ == 'Note' or x.__class__.__name__ == 'Rest'):\n if amount >= x.dur:\n x.dur = 0\n if amount < x.dur:\n x.dur = x.dur - amount\n return x\n elif (x.__class__.__name__ == 'Seq'):\n dLeft = amount\n newTree = []\n for t in x.trees:\n d = dur(t)\n newTree.append(remove(t, dLeft))\n dLeft = max(0,dLeft - d)\n x.trees = newTree\n return x\n elif isinstance(x, Par):\n newTrees = []\n for t in x.trees:\n newTrees.append(remove(t,amount))\n x.trees = newTrees\n return x\n elif (x.__class__.__name__ == 'Part'):\n #if (x.mod.__class__.__name__ == 'Tempo'):\n # remove(x.tree, amount*x.mod.value)\n #else:\n remove(x.tree, amount)\n return x\n else: raise MusEciException(\"Unrecognized musical structure: \" + str(x))", "title": "" }, { "docid": "fb9c4d9f7561372c40ff1aeff3845279", "score": "0.44445187", "text": "def remove(self, name, parent=None):\n raise NotImplementedError", "title": "" }, { "docid": "68152c3b86f3ef3200734d1357ac7da7", "score": "0.44322827", "text": "def remove_list_item(self, data):\n\n if isinstance(data, QJSValue):\n data = data.toVariant()\n\n rolename = \"displayName\"\n value = data[rolename]\n type = data[\"type\"]\n\n if type == \"Taxon\":\n\n idx = self.avFullModel.get_item_index(rolename=rolename, value=value)\n if idx >= 0:\n self.avFullModel.removeItem(idx)\n self.avFullSpecies = [x for x in self.avFullSpecies if x[\"displayName\"] != value]\n self.avFullSpeciesFiltered = [x for x in self.avFullSpeciesFiltered if x[\"displayName\"] != value]\n\n if data[\"isMostRecent\"] == \"True\":\n\n idx = self.avRecentModel.get_item_index(rolename=rolename, value=value)\n if idx >= 0:\n self.avRecentModel.removeItem(idx)\n self.avRecentSpecies = [x for x in self.avRecentSpecies if x[\"displayName\"] != value]\n self.avRecentSpeciesFiltered = [x for x in self.avRecentSpeciesFiltered if x[\"displayName\"] != value]\n\n elif type == \"Debris\":\n\n idx = self.avDebrisModel.get_item_index(rolename=rolename, value=value)\n if idx >= 0:\n self.avDebrisModel.removeItem(idx)\n self.avDebris = [x for x in self.avDebris if x[\"displayName\"] != value]\n self.avDebrisFiltered = [x for x in self.avDebrisFiltered if x[\"displayName\"] != value]", "title": "" }, { "docid": "07c1e4440ab74a153155ce2e84b4c0b5", "score": "0.4429736", "text": "def absent_downstream_subtypes(subtype: str,\n subtypes: pd.Series,\n scheme_subtypes: List[str]) -> Optional[List[str]]:\n escaped_subtype = re.escape(subtype)\n re_subtype = re.compile(r'^{}\\.\\d+$'.format(escaped_subtype))\n downstream_subtypes = [s for s in scheme_subtypes if re_subtype.search(s)]\n absentees = [x for x in downstream_subtypes if not (subtypes == x).any()]\n return absentees if absentees else None", "title": "" }, { "docid": "90507711d6c52a2f746f15c2b231d0c4", "score": "0.44296724", "text": "def remove_tree_item(self, index):\n if not isinstance(index, QModelIndex):\n return\n\n model = self.seModel\n\n # Get the existing catchId from the data - Do before deleting the actual row\n item = model.getItem(index)\n typeCol = model.getColumnNumber(\"type\")\n catchId = item.data(model.getColumnNumber(\"catchId\")).value()\n\n type = item.data(typeCol).value()\n if type == \"Taxon\":\n self.speciesCount -= 1\n\n elif type == \"Mix\":\n\n if isinstance(self._active_mix, QJSValue):\n self._active_mix = self._active_mix.toVariant()\n if catchId == self._active_mix[\"catchId\"]:\n self.activeMix = {\"displayName\": None, \"catchId\": None}\n\n # recurse to check all children + subchildren\n self.speciesCount -= len([x for x in item.children if x.data(typeCol).value() == \"Taxon\"])\n submixes = [x for x in item.children if x.data(typeCol).value() == \"Submix\"]\n for submix in submixes:\n self.speciesCount -= len([x for x in submix.children if x.data(typeCol).value() == \"Taxon\"])\n\n # If the submix is the activeMix and we're removing the submix, then set the activeMix to None\n if submix.data(model.getColumnNumber('catchId')).value() == self._active_mix[\"catchId\"]:\n self.activeMix = {\"displayName\": None, \"catchId\": None}\n\n elif type == \"Submix\":\n\n if isinstance(self._active_mix, QJSValue):\n self._active_mix = self._active_mix.toVariant()\n if catchId == self._active_mix[\"catchId\"]:\n self.activeMix = {\"displayName\": None, \"catchId\": None}\n\n # recurse to check all children\n self.speciesCount -= len([x for x in item.children if x.data(typeCol).value() == \"Taxon\"])\n\n\n # Remove the rows\n parentIdx = model.parent(index)\n status = model.removeRows(index.row(), 1, parentIdx)\n\n # Decrement the species count - this is shown in the upper right corner of the screen\n # self.speciesCount -= 1\n\n # Delete from the database\n if isinstance(catchId, int):\n\n catch_sql = \"\"\"\n WITH RECURSIVE subcatch(n) AS (\n SELECT CATCH_ID FROM CATCH WHERE CATCH_ID = ?\n UNION\n SELECT c.CATCH_ID FROM CATCH c, subcatch\n WHERE c.PARENT_CATCH_ID = subcatch.n\n )\n DELETE FROM CATCH WHERE CATCH_ID in subcatch;\n \"\"\"\n\n specimen_sql = \"\"\"\n WITH RECURSIVE subcatch(n) AS (\n SELECT CATCH_ID FROM CATCH WHERE CATCH_ID = ?\n UNION\n SELECT c.CATCH_ID FROM CATCH c, subcatch\n WHERE c.PARENT_CATCH_ID = subcatch.n\n ),\n subspecimens(n) AS (\n SELECT SPECIMEN_ID FROM SPECIMEN s INNER JOIN CATCH c\n ON c.CATCH_ID = s.CATCH_ID WHERE c.CATCH_ID in subcatch\n UNION\n SELECT s.SPECIMEN_ID FROM SPECIMEN s, subspecimens\n WHERE s.PARENT_SPECIMEN_ID = subspecimens.n\n )\n DELETE FROM SPECIMEN WHERE SPECIMEN_ID IN subspecimens;\n \"\"\"\n\n params = [catchId, ]\n self._db.execute(query=specimen_sql, parameters=params)\n self._db.execute(query=catch_sql, parameters=params)", "title": "" }, { "docid": "d906e7b03fb499d2e52ea262ebf14bf4", "score": "0.44267872", "text": "def _unregister_propagate(self, types):\n channel = self\n while channel is not None:\n channel.registered_types.difference_update(types)\n channel = channel._parent", "title": "" }, { "docid": "dda5a1a1aede81c7983cc66e7ce7b35d", "score": "0.44106168", "text": "def remove_child_banks(self, *args, **kwargs):\n # Implemented from kitosid template for -\n # osid.resource.BinHierarchyDesignSession.remove_child_bins\n self._get_provider_session('bank_hierarchy_design_session').remove_child_banks(*args, **kwargs)", "title": "" }, { "docid": "f91daab046a23db26815af2cda4f421f", "score": "0.4371158", "text": "def process_variants(variant_titles: list[dict[str, Any]], variant_type: str, report_on_match: bool) -> None:\r\n\r\n variants: list[DatNode] = []\r\n\r\n for variant_title in variant_titles:\r\n if 'searchTerm' not in variant_title:\r\n printwrap(\r\n f'{Font.warning}* The following variants entry is missing a '\r\n f'{Font.bold}searchTerm{Font.warning} key and will be skipped:',\r\n 'error')\r\n eprint(f'\\n {variant_title}{Font.end}')\r\n\r\n if config.user_input.warningpause:\r\n eprint(f'\\n{Font.disabled}Press enter to continue{Font.end}')\r\n input()\r\n continue\r\n else:\r\n variant_name = variant_title['searchTerm']\r\n\r\n name_type: str = ''\r\n\r\n if 'nameType' in variant_title:\r\n name_type = variant_title['nameType']\r\n if not (name_type == 'full'\r\n or name_type == 'short'\r\n or name_type == 'regionFree'\r\n or name_type == 'tagFree'\r\n or name_type == 'regex'):\r\n name_type = 'short'\r\n else:\r\n name_type = 'short'\r\n\r\n # Look up the title in the dictionary, then process the required changes\r\n found_titles: set[DatNode] = set()\r\n\r\n if name_type == 'regex':\r\n valid_regex: list[str] = regex_test([variant_name], 'variants', 'clone list')\r\n\r\n if not valid_regex:\r\n continue\r\n\r\n found_titles = TitleTools.find_title(variant_name, name_type, processed_titles, missing_titles, config, deep_search=True)\r\n else:\r\n found_titles = TitleTools.find_title(variant_name, name_type, processed_titles, missing_titles, config)\r\n\r\n old_group_names: set[str] = set()\r\n\r\n for title in found_titles:\r\n old_group_names.add(title.group_name)\r\n\r\n new_group_name: str = TitleTools.get_group_name(value['group'], config)\r\n\r\n new_group_name = new_group_name.lower()\r\n\r\n if new_group_name not in processed_titles:\r\n processed_titles[new_group_name] = set()\r\n\r\n # If the title's not found in the DAT, add it to missing_titles,\r\n # otherwise add it to the delete list, then move it to the new\r\n # group\r\n if not found_titles:\r\n missing_titles.add(variant_name)\r\n else:\r\n if config.user_input.trace:\r\n report_on_match = TraceTools.trace_enable(set(found_titles), config.user_input.trace)\r\n\r\n for title in found_titles:\r\n for old_group_name in old_group_names:\r\n if title in processed_titles[old_group_name]:\r\n if (\r\n report_on_match\r\n and new_group_name != title.group_name\r\n and not is_includes):\r\n eprint('')\r\n TraceTools.trace_title('REF0055')\r\n eprint(f'* {title.full_name}')\r\n eprint(f' New group: {new_group_name}\\n{Font.disabled} Old group: {title.group_name}{Font.end}')\r\n eprint(f'\\n{Font.disabled}Press enter to continue{Font.end}')\r\n input()\r\n\r\n if (\r\n variant_type == 'title'\r\n or variant_type == 'superset'):\r\n new_title: DatNode = copy.deepcopy(title)\r\n\r\n if 'priority' in variant_title:\r\n if new_title.clonelist_priority == 1:\r\n new_title.clonelist_priority = variant_title['priority']\r\n\r\n if variant_type == 'superset':\r\n new_title.is_superset = True\r\n\r\n variants.append(new_title)\r\n\r\n for old_group_name in old_group_names:\r\n if title in processed_titles[old_group_name]:\r\n delete_titles.add((title, old_group_name))\r\n\r\n elif variant_type =='compilation':\r\n for compilation_title in processed_titles[old_group_name]:\r\n if compilation_title.full_name == title.full_name:\r\n title_position: int = 1\r\n if 'titlePosition' in variant_title:\r\n title_position = variant_title['titlePosition']\r\n\r\n if 'priority' in variant_title:\r\n clonelist_priority: int = variant_title['priority']\r\n else:\r\n clonelist_priority = 1\r\n\r\n compilation_title.contains_titles[value['group']] = {\"position\": title_position, \"priority\": clonelist_priority}\r\n\r\n if 'compilationPriority' in variant_title:\r\n compilation_title.clonelist_priority = variant_title['compilationPriority']\r\n\r\n # Set these variant properties after the processing, to make sure they\r\n # don't mess with title look ups\r\n for variant in variants:\r\n variant.group_name = new_group_name\r\n variant.short_name = value['group'].lower()\r\n processed_titles[new_group_name].add(variant)", "title": "" }, { "docid": "2a50556d71444c30eb83ba99577ca164", "score": "0.436203", "text": "def variant(self, name):\n return filter(lambda s: s.name == name, self.variants)[0]", "title": "" }, { "docid": "8c77687cfa248b2fe9f2e2ddc541d557", "score": "0.43561602", "text": "def filter_processes():\n \n foreground_process_category_list = []\n for process in db.getAll(Process):\n category = process.category\n if category is None:\n foreground_process_category_list.append(process) \n \n else:\n path = category.toPath()\n \n if not any(elem in path for elem in ECOINVENT_PARENT_CATEGORY_LIST):\n foreground_process_category_list.append(process)\n \n return foreground_process_category_list", "title": "" }, { "docid": "18a74abde3d65bf841b822796b766cea", "score": "0.43430316", "text": "def filter_categories(self, raw_data):\n raw_categories = []\n\n for product in raw_data['products']:\n category_list = product['categories_tags']\n raw_categories += category_list\n\n fr_categories = []\n\n for category in raw_categories:\n if 'fr:' in category:\n fr_categories.append(category)\n\n keepers = []\n occurrence_limit = 2\n\n for category in fr_categories:\n if fr_categories.count(category) >= occurrence_limit:\n keepers.append(category)\n\n return keepers", "title": "" }, { "docid": "8ccfb52fc20283f8f708685867829f80", "score": "0.4340506", "text": "def _filter_dact(self, dact, raw_utterance):\n slot_filter_priority = [\n Slots.GENRES, Slots.ACTORS, Slots.DIRECTORS, Slots.KEYWORDS,\n Slots.TITLE\n ]\n # first see if multiple genres lead to a plot keyword or title\n for slot in slot_filter_priority:\n params = [p for p in dact.params if p.slot == slot.value]\n for param in params:\n if any([\n re.search(r'\\b{0}\\b'.format(pattern), param.value)\n for pattern in self.dont_care_pattern +\n self.basic_patterns[UserIntents.ACKNOWLEDGE] +\n self.basic_patterns[UserIntents.DENY]\n ]):\n self._remove_param(param, dact)\n continue\n if param.slot == Slots.KEYWORDS.value:\n # remove the plot_keyword if it is also in other slot\n # values or is a sub-string\n if param.value in [\n p.value for p in dact.params if p.slot != param.slot\n ]:\n self._remove_param(param, dact)\n if param.slot in [\n Slots.GENRES.value, Slots.KEYWORDS.value,\n Slots.ACTORS.value, Slots.DIRECTORS.value\n ]:\n # remove genre if it is a sub-string of any other value\n values = param.value.strip().split() if param.slot == \\\n Slots.GENRES.value else [param.value]\n for value in values:\n for p in dact.params:\n if p.slot != param.slot and value in \\\n p.value.split() and len(p.value.split()) != \\\n len(value.split()):\n self._remove_param(param, dact)\n if param.slot == Slots.TITLE.value:\n # remove the title if it is also in other slot values\n if param.value in [\n p.value for p in dact.params if p.slot != param.slot\n ]:\n self._remove_param(param, dact)\n elif param.value in self.slot_values[Slots.KEYWORDS.value]:\n param.slot = Slots.KEYWORDS.value\n elif len([p for p in param.value.split() if p in self.slot_values[\n Slots.GENRES.value] or p in self.slot_annotator.genres_alternatives]) == \\\n len(param.value.split()):\n param.slot = Slots.GENRES.value\n\n # extra check for if an annotation is a sub-string of another\n for param in dact.params:\n if param.slot in [Slots.YEAR.value, Slots.GENRES.value]:\n continue\n param.value = self.slot_annotator.find_in_raw_utterance(\n raw_utterance, param.value, len(word_tokenize(param.value)))\n for param in deepcopy(dact.params):\n if any([param.value.lower() in p.value.lower() and param.value.lower() != \\\n p.value.lower() for p in dact.params if p.slot != param.slot]):\n self._remove_param(param, dact)\n\n self._filter_genres(dact)\n dual_persons = self._filter_person_names(dact)\n values_neg = self._get_annotation_relevance(dact, raw_utterance,\n dual_persons)\n for param in dact.params:\n if param.value in values_neg:\n if param.op == Operator.EQ:\n param.op = Operator.NE\n else:\n param.value = f'{param.op} {param.value}'\n param.op = Operator.NE", "title": "" }, { "docid": "ed4f3c3bf11e705c08026e2ed359722a", "score": "0.43332943", "text": "def collect_children_type_of(parent, ch_type):\n collected = []\n\n for child in parent.winfo_children():\n if child.winfo_class() == ch_type:\n collected.append(child)\n\n return collected", "title": "" }, { "docid": "5c8f84f63cc5153442c32918b3323faf", "score": "0.43160313", "text": "def choose_media_format(items):\n\n items_by_media_id = {}\n for item in items:\n media_items = items_by_media_id.get(item.media_id, list())\n media_items.append(item)\n items_by_media_id[item.media_id] = media_items\n\n pruned_items = []\n\n # Desired format in descending order\n desired_formats = [\n (smscsv.MediaFormat.VIDEO, smscsv.MediaQuality.HIGH),\n (smscsv.MediaFormat.MPEG4, smscsv.MediaQuality.HIGH),\n (smscsv.MediaFormat.MPEG4, smscsv.MediaQuality.HIGH_RES),\n (smscsv.MediaFormat.MPEG4, smscsv.MediaQuality.LOW_RES),\n (smscsv.MediaFormat.WMV, smscsv.MediaQuality.HIGH),\n (smscsv.MediaFormat.FLV, smscsv.MediaQuality.HIGH),\n (smscsv.MediaFormat.FLV, smscsv.MediaQuality.MEDIUM),\n (smscsv.MediaFormat.FLV, smscsv.MediaQuality.LOW),\n (smscsv.MediaFormat.IPOD, smscsv.MediaQuality.HIGH),\n (smscsv.MediaFormat.IPOD, smscsv.MediaQuality.MEDIUM),\n (smscsv.MediaFormat.IPOD, smscsv.MediaQuality.LOW),\n (smscsv.MediaFormat.AUDIO, smscsv.MediaQuality.HIGH),\n (smscsv.MediaFormat.AAC, smscsv.MediaQuality.HIGH),\n (smscsv.MediaFormat.MP3, smscsv.MediaQuality.HIGH),\n (smscsv.MediaFormat.AAC, smscsv.MediaQuality.MEDIUM),\n (smscsv.MediaFormat.MP3, smscsv.MediaQuality.MEDIUM),\n (smscsv.MediaFormat.AAC, smscsv.MediaQuality.LOW),\n (smscsv.MediaFormat.MP3, smscsv.MediaQuality.LOW),\n ]\n\n for media_items in items_by_media_id.values():\n if set(item.filename for item in media_items) == {''}:\n LOG.warning(\n 'Skipping item media_id=%s since it has no files at all', media_items[0].media_id)\n continue\n\n format_quality_pairs = {(item.format, item.quality): item for item in media_items}\n\n best_item = None\n for f in desired_formats:\n item = format_quality_pairs.get(f)\n if item is not None and item.filename != '':\n best_item = item\n break\n\n if best_item is not None:\n pruned_items.append(best_item)\n else:\n LOG.warning('Could not find format for item: media_id=%s', media_items[0].media_id)\n LOG.warning('Formats and filenames:')\n for item in media_items:\n LOG.warning(' %s', repr([(item.format, item.quality, item.filename)]))\n\n return pruned_items", "title": "" }, { "docid": "dd27ae07fff4ed85ad53c81a72bf6f22", "score": "0.43114892", "text": "def channel_remove(runner, remove_info):\n db = runner.get_hn()\n tmp_params = runner.get_params()\n num_of_anchors = remove_info[\"num_of_anchors\"] if \"num_of_anchors\" in remove_info else None\n for idx, (layer, mask) in enumerate(zip(remove_info['layer_name'], remove_info['mask'])):\n assert layer in db['layers'], \"Layer does not exist in the HN\"\n assert db['layers'][layer]['type'] == 'output_layer', \"Chosen layer is not an output\"\n sh = db['layers'][layer]['output_shapes'][0]\n mask_tile = list(mask) if num_of_anchors is None else list(np.tile(mask, [num_of_anchors[idx]]))\n if not sh[3] == 1:\n # normal output\n assert len(mask_tile) == sh[3], \"One hot vector is not in the right length\"\n _channel_remove(db, layer, mask_tile, sh[3], tmp_params)\n else:\n # output layer after argmax\n pred_layer = db['layers'][layer]['input'][0]\n sh = db['layers'][pred_layer]['input_shapes'][0]\n assert len(mask) == sh[3], \"One hot vector is not in the right length\"\n _channel_remove(db, pred_layer, mask, sh[3], tmp_params)\n runner.set_hn(db)\n runner.load_params(tmp_params)", "title": "" }, { "docid": "4aa8344063ccd901810a0ea8cd2e02f0", "score": "0.42951268", "text": "def __remove_nones_variant_data (self, variant_data):\r\n\t\tfor v in variant_data.keys():\r\n\t\t\tvariant_data[v] = [x for x in variant_data[v] if x is not None]\r\n\t\treturn variant_data", "title": "" }, { "docid": "13a3047051446f3935246cf9e09f0458", "score": "0.4283493", "text": "def remove_steam_link_filter(value):\n\treturn _list_or_string(value, _remove_steam_link_filter)", "title": "" }, { "docid": "196691121793488e5452d8632a4fbb50", "score": "0.4277088", "text": "def channel_remove(self):\n logger.info(f\"OSN: Channel remove start: name={self.channel_name}\")\n\n osn_admin = OSNAdmin(\n self.orderers_eps[-1],\n ORDERER_ADMIN_TLS_CERT,\n ORDERER_ADMIN_TLS_KEY,\n ORDERER_ADMIN_TLS_CA,\n )\n res = osn_admin.list_single_channel(self.channel_name)\n self.assertTrue(res)\n self.assertEqual(res[\"status\"], 'active')\n osn_admin.remove(self.channel_name)\n res = osn_admin.list_single_channel(self.channel_name)\n self.assertEqual(res[\"status\"], 'inactive')\n\n logger.info(f\"OSN: Channel remove done: name={self.channel_name}\")", "title": "" }, { "docid": "fee8bc6ca702768975de62ba308ca269", "score": "0.4267723", "text": "def _build_categories(title):\n _, name, thumbnail = title.split(\"#\", 2)\n base = BrowseMediaSource(\n domain=DOMAIN,\n identifier=f\"{title}\",\n media_class=MediaClass.GAME,\n media_content_type=\"\",\n title=name,\n can_play=False,\n can_expand=True,\n children=[],\n children_media_class=MediaClass.DIRECTORY,\n thumbnail=thumbnail,\n )\n\n owners = [\"my\", \"community\"]\n kinds = [\"gameclips\", \"screenshots\"]\n for owner in owners:\n for kind in kinds:\n base.children.append(\n BrowseMediaSource(\n domain=DOMAIN,\n identifier=f\"{title}~~{owner}#{kind}\",\n media_class=MediaClass.DIRECTORY,\n media_content_type=\"\",\n title=f\"{owner.title()} {kind.title()}\",\n can_play=False,\n can_expand=True,\n children_media_class=MEDIA_CLASS_MAP[kind],\n )\n )\n\n return base", "title": "" }, { "docid": "621d0daebe9467d5e766595121df96f8", "score": "0.42472777", "text": "def name_variants(self, key, value):\n if value.get('g'):\n self.setdefault('extra_words', [])\n self['extra_words'].extend(force_force_list(value.get('g')))\n\n values = self.get('name_variants', [])\n values.append({\n 'source': value.get('9'),\n 'value': force_force_list(value.get('a', [])),\n })\n\n return values", "title": "" }, { "docid": "7f04abee56cc63dcdf24c79bfecd1536", "score": "0.42433727", "text": "def remove(self, name):\n found = [q for q in self.items if q.name == name]\n if len(found) == 1:\n self.items.remove(found[0])", "title": "" }, { "docid": "5c6ac8705ae45dd05cf1e99b6173432c", "score": "0.4217444", "text": "def process_tree(self, channel_node):\n file_names = []\n self.process_tree_recur(file_names, channel_node)\n return [x for x in set(file_names) if x] # Remove any duplicate or None filenames", "title": "" }, { "docid": "aeb23b0eb8c8d9fbef8859fce89d74d3", "score": "0.42132205", "text": "def removeChildren(self, *args):\n return _osg.Switch_removeChildren(self, *args)", "title": "" }, { "docid": "b1d4598523d3fb06c2f62d5ffc155e13", "score": "0.4200512", "text": "def __init__(self, name, cuisine_type='ice_cream'):\n super().__init__(name, cuisine_type)\n self.flavors = []", "title": "" }, { "docid": "812f0d135ca78842ec3f67a0b950f24e", "score": "0.41887367", "text": "def filter_variants(self, max_homol=None):\n\n\tfor variant in self.variants: \n\t for adj in variant.adjs:\n\t\tif 'N' in adj.probes[0].upper():\n\t\t adj.filtered_out = True\n\t\t if self.debug:\n\t\t\tsys.stdout.write('N_in_probe %s %s\\n' % (adj.contigs, adj.probes[0]))\n\t\t\t\n\t\tif not adj.filtered_out and adj.novel_seq is not None and adj.novel_seq != 'NA' and adj.novel_seq != '-' and\\\n\t\t re.search('[^ATGC]', adj.novel_seq, re.IGNORECASE):\n\t\t adj.filtered_out = True\n\t\t if self.debug:\n\t\t\tsys.stdout.write('non AGTC in novel sequence %s %s\\n' % (adj.contigs, adj.novel_seq))\n\t\t\t\n\t\tif not adj.filtered_out and (target_non_canonical(adj.chroms[0]) or target_non_canonical(adj.chroms[1])):\n\t\t adj.filtered_out = True\n\t\t if self.debug:\n\t\t\tsys.stdout.write('non canonical chromosome %s %s\\n' % (adj.contigs, adj.chroms))\n\t\t\t\n\t\tif not adj.filtered_out and adj.homol_seq and max_homol is not None:\n\t\t if len(adj.homol_seq[0]) > max_homol:\n\t\t\tadj.filtered_out = True\n\t\t\tif self.debug:\n\t\t\t sys.stdout.write('homolgous sequence length %s too long (>%s)\\n' % (len(adj.homol_seq[0]), max_homol))\n\t\t\t \n\t\tif adj.filtered_out:\n\t\t variant.filtered_out = True", "title": "" }, { "docid": "defc55a118c7fcdeae9eef6c80176d58", "score": "0.41869694", "text": "def remove_lonely_choose_1(self,tree_original):\n\t\t\n\t\ttree= copy.deepcopy(tree_original)\n\t\tfor parent in tree.getiterator():\n\t\t\tfor child in list(parent):\t\t\t\t\n\t\t\t\tif child.tag ==\"choose_1\" and len(child) == 1:\n\t\t\t\t\tparent.remove(child)\n\t\t\t\t\tparent.extend(child)\n\t\treturn tree", "title": "" }, { "docid": "e97266ab9146a747070617d8623030e9", "score": "0.41719", "text": "def test_concurrent_facet_change_via_uninstall(self):\n\n change_facets = [\n ['facet.A-incorp-sync',\n False, None, 'parent', False, False],\n ['facet.AA-sync', False, None, 'parent', False, False],\n ]\n remove_packages = self.__pkg_names_to_fmris([\n \"AB-sync@1\",\n ])\n change_packages = self.__pkg_name_tuples_to_fmris([\n [\"A-incorp-sync@1\", \"A-incorp-sync@3\"],\n [\"AA-sync@1\", \"AA-sync@4\"],\n [\"B-incorp-sync@1\", \"B-incorp-sync@2\"],\n [\"BA@1\", \"BA@2\"],\n [\"entire-sync@1\", \"entire-sync@2\"],\n ])\n self.__test_concurrent_facet_change_via_child_op(\n \"uninstall\", \"AB-sync\",\n extra_child_pkgs=[\"AB-sync@1\"],\n change_facets=change_facets,\n remove_packages=remove_packages,\n change_packages=change_packages)", "title": "" }, { "docid": "c8b769698402ecc35f1aa8605b968f17", "score": "0.41515467", "text": "async def rmgallery(self, ctx: commands.Context, channel: discord.TextChannel):\n if channel.id in await self.config.guild(ctx.guild).channels():\n async with self.config.guild(ctx.guild).channels() as channels:\n channels.remove(channel.id)\n await ctx.send(\n f\"{channel.mention} has been removed from the Gallery channels list.\"\n )\n else:\n await ctx.send(\n f\"{channel.mention} already isn't in the Gallery channels list.\"\n )", "title": "" }, { "docid": "92b40771d65177e61bcabb1abf2db6ba", "score": "0.41502494", "text": "def filter_variants(log, variants, retain=True):\r\n if check_is_dataframe(log):\r\n check_dataframe_columns(log)\r\n from pm4py.algo.filtering.pandas.variants import variants_filter\r\n return variants_filter.apply(log, [\",\".join(v) for v in variants],\r\n parameters={variants_filter.Parameters.POSITIVE: retain})\r\n else:\r\n from pm4py.algo.filtering.log.variants import variants_filter\r\n return variants_filter.apply(log, [\",\".join(v) for v in variants],\r\n parameters={variants_filter.Parameters.POSITIVE: retain})", "title": "" }, { "docid": "7174854a0ebde31f853c7f266f52bd56", "score": "0.4145753", "text": "def prune(\n self,\n modifiers=None,\n modifier_types=None,\n samples=None,\n channels=None,\n measurements=None,\n ):\n # avoid mutable defaults\n modifiers = [] if modifiers is None else modifiers\n modifier_types = [] if modifier_types is None else modifier_types\n samples = [] if samples is None else samples\n channels = [] if channels is None else channels\n measurements = [] if measurements is None else measurements\n\n return self._prune_and_rename(\n prune_modifiers=modifiers,\n prune_modifier_types=modifier_types,\n prune_samples=samples,\n prune_channels=channels,\n prune_measurements=measurements,\n )", "title": "" }, { "docid": "ba1680d3397a77c48f5a4fff8e2604e2", "score": "0.4134821", "text": "def __init__(self, name, cuisine_type):\r\n\t\tsuper().__init__(name, cuisine_type)\r\n\t\tself.flavors = []", "title": "" }, { "docid": "17bdd0bbedc1abc2034935a1ce64f551", "score": "0.41189975", "text": "def single_prune(self, info, exclude=None):\n\n for module, name in self.conv_names.items():\n if exclude is not None and module in exclude:\n continue\n fisher = self.accum_fishers[module]\n in_mask = module.in_mask.view(-1)\n ancestors = self.conv2ancest[module]\n if self.delta == 'flops':\n # delta_flops is a value indicate how much flops is\n # reduced in entire forward process after we set a\n # zero in `in_mask` of a specific conv_module.\n delta_flops = self.flops[module] * module.out_mask.sum() / (\n module.in_channels * module.out_channels)\n for ancestor in ancestors:\n delta_flops += self.flops[ancestor] * ancestor.in_mask.sum(\n ) / (ancestor.in_channels * ancestor.out_channels)\n fisher /= (float(delta_flops) / 1e9)\n if self.delta == 'acts':\n delta_acts = 0\n for ancestor in ancestors:\n delta_acts += self.acts[ancestor] / ancestor.out_channels\n fisher /= (float(max(delta_acts, 1.)) / 1e6)\n info.update(\n self.find_pruning_channel(module, fisher, in_mask, info))\n return info", "title": "" }, { "docid": "fa5619399e49084a85f8d6014856e724", "score": "0.41178423", "text": "def get_remove_filter() :\n\n return []", "title": "" }, { "docid": "fa5619399e49084a85f8d6014856e724", "score": "0.41178423", "text": "def get_remove_filter() :\n\n return []", "title": "" }, { "docid": "29462616611df69f7a5b3a8b25e68e7d", "score": "0.40990835", "text": "def remove_cats(self, df, name):\n cat_mapping: dict = self._lookup_mapping(name)\n drop: list = self._lookup_cleanup(name)\n self._remove_cats(df, drop, cat_mapping)", "title": "" }, { "docid": "de0e7b2eac88ef38789cb9cf8f0f5db9", "score": "0.4095526", "text": "def filter_exac(self, variants):\n \n passed_vars = []\n \n for (var, check, inh, hgnc) in variants:\n \n # figure out what the het and hemi counts are in ExAC (if available)\n hemi, het = 0, 0\n if \"AC_Hemi\" in var.child.info and var.get_chrom() == \"X\":\n hemi = sum([int(x.replace('.', '0')) for x in var.child.info[\"AC_Hemi\"].split(\",\")])\n if \"AC_Het\" in var.child.info:\n het = sum([int(x.replace('.', '0')) for x in var.child.info[\"AC_Het\"].split(\",\")])\n \n geno = var.get_trio_genotype()\n # filter out hemizygous variants on chrX in males. Autosomal\n # and female chrX variants should pass through unfiltered.\n # We don't filter out de novo variants based on the ExAC hemizygous\n # count. We only apply this filter to inherited variants.\n if \"Hemizygous\" in inh and self.family.child.is_male() and hemi > 0 and \\\n geno != var.get_de_novo_genotype() and geno[1:] != (\"NA\", \"NA\"):\n inh.remove(\"Hemizygous\")\n \n # filter out monoallelic variants with high ExAC het counts.\n if var.get_chrom() != \"X\" and het > 4 and \"Monoallelic\" in inh:\n inh.remove(\"Monoallelic\")\n elif var.get_chrom() == \"X\" and (het + hemi) > 4 and \"X-linked dominant\" in inh:\n inh.remove(\"X-linked dominant\")\n \n if inh == []:\n log_str = \"{}\\t{} dropped from ExAC frequency count\".format(self.family.child.get_id(), var)\n logging.info(log_str)\n if var.get_chrom() == self.debug_chrom and var.get_position() == self.debug_pos:\n print(log_str)\n \n if inh != []:\n passed_vars.append((var, check, inh, hgnc))\n \n return passed_vars", "title": "" }, { "docid": "37241e17629516eef6eb8bf1317a142f", "score": "0.40910226", "text": "def remove_cnvs(self, variants):\n \n # remove CNVs from the list of flagged variants\n passed_vars = []\n for (var, check, inh, hgnc) in variants:\n if not var.is_cnv():\n passed_vars.append((var, check, inh, hgnc))\n else:\n log_str = \"{}\\t{} dropped from excess CNVs in proband\".format(self.family.child.get_id(), var)\n logging.info(log_str)\n if var.get_chrom() == self.debug_chrom and var.get_position() == self.debug_pos:\n print(log_str)\n \n return passed_vars", "title": "" }, { "docid": "3f64d0076c01b6133485d323a3e6e0f2", "score": "0.408558", "text": "def remove(self, *args):\n for x in args:\n self.children.remove(x)\n return self.children", "title": "" }, { "docid": "3f64d0076c01b6133485d323a3e6e0f2", "score": "0.408558", "text": "def remove(self, *args):\n for x in args:\n self.children.remove(x)\n return self.children", "title": "" }, { "docid": "8c0f550d65793098ec124581775ebdde", "score": "0.40757728", "text": "def filter_for_meta_sig_variants(testable_variants_file_name, meta_analysis_output_folder,\n final_meta_analysis_sig_variants_results):\n\n meta_global_counts_dict = {\n 'Biallelic_Testable_Variants': 0,\n 'Total_Tests': 0,\n 'Total_Sig_ASE_Variants': 0,\n 'Total_Biallelic_Samples' : 0,\n 'Total_Sig_Biallelic_Samples' : 0,\n 'Total_Sig_ASE_Ref': 0,\n 'Total_Sig_ASE_Alt': 0,\n 'Total_Biallelic_No_ASE': 0,\n 'Total_Passing_Homozygous': 0,\n 'Total_Passing_Homozygous_Ref': 0,\n 'Total_Passing_Homozygous_Alt': 0,\n 'Total_Non_Testable': 0} \n\n # Getting \n meta_analysis_sig_variants_list = final_meta_analysis_sig_variants_results['meta_analysis_sig_variants_list']\n\n # creating new filtered file\n sig_meta_file_name = meta_analysis_output_folder + \"/sig_meta_analysis_variants.txt\"\n\n # Create output folder for failing data for verification purposes\n not_sig_meta_file_name = meta_analysis_output_folder + \"/not_sig_meta_analysis_variants.txt\"\n\n # Open the files for writing\n sig_meta__file = open(sig_meta_file_name,'w')\n not_sig_meta_file = open(not_sig_meta_file_name,'w')\n \n #opening up variant file\n testable_variants_file = open(testable_variants_file_name, 'r')\n\n # reading file line by line\n for line in testable_variants_file:\n\n #Getting the header information from the file\n if line.startswith('#CHROM'):\n \n #Print header of file to new data_file\n sig_meta__file.write(line)\n not_sig_meta_file.write(line)\n continue\n \n else:\n #Tallying total testable variants\n meta_global_counts_dict['Biallelic_Testable_Variants']+= 1\n \n #Splitting the data\n parsed_line = line.rstrip().split('\\t')\n\n #Getting Variant Results Only\n variant_name = parsed_line[0] + \":\" + parsed_line[1]\n\n if variant_name in meta_analysis_sig_variants_list:\n sig_meta__file.write(line)\n meta_global_counts_dict['Total_Sig_ASE_Variants']+= 1\n\n else:\n not_sig_meta_file.write(line)\n\n # Close the files\n sig_meta__file.close()\n not_sig_meta_file.close()\n\n return{'sig_meta_file_name': sig_meta_file_name, 'meta_global_counts_dict':meta_global_counts_dict}", "title": "" }, { "docid": "8ec7fc66d9e4c96a8513b49e4bf954fe", "score": "0.40745944", "text": "def test_channel_filters(self):\n # Include\n worker = Worker(None, only_channels=[\"yes.*\", \"maybe.*\"])\n self.assertEqual(\n worker.apply_channel_filters([\"yes.1\", \"no.1\"]),\n [\"yes.1\"],\n )\n self.assertEqual(\n worker.apply_channel_filters([\"yes.1\", \"no.1\", \"maybe.2\", \"yes\"]),\n [\"yes.1\", \"maybe.2\"],\n )\n # Exclude\n worker = Worker(None, exclude_channels=[\"no.*\", \"maybe.*\"])\n self.assertEqual(\n worker.apply_channel_filters([\"yes.1\", \"no.1\", \"maybe.2\", \"yes\"]),\n [\"yes.1\", \"yes\"],\n )\n # Both\n worker = Worker(None, exclude_channels=[\"no.*\"], only_channels=[\"yes.*\"])\n self.assertEqual(\n worker.apply_channel_filters([\"yes.1\", \"no.1\", \"maybe.2\", \"yes\"]),\n [\"yes.1\"],\n )", "title": "" }, { "docid": "9b22ce43e94fbdf411eb9ca73948ba93", "score": "0.40670216", "text": "def children_of_kind(channel_id=None, content=None, kind=None, **kwargs):\n return content.get_descendants(include_self=False).filter(kind=kind).using(channel_id)", "title": "" }, { "docid": "fbb1138bf9c1505f37e86c0e8b643d96", "score": "0.40659022", "text": "def removeChildren(self, *args):\n return _osg.SwitchRef_removeChildren(self, *args)", "title": "" }, { "docid": "70d3bfb1dc64d16814d0c27df83b9153", "score": "0.40645763", "text": "def _densify_variants(self, variant, all_call_names):\n # type: (vcf_parser.Variant, List[str]) -> vcf_parser.Variant\n existing_call_name = {call.name: call for call in variant.calls}\n\n new_calls = []\n for call_name in all_call_names:\n if call_name in existing_call_name.keys():\n new_calls.append(existing_call_name.get(call_name))\n else:\n new_calls.append(\n vcfio.VariantCall(name=call_name,\n genotype=vcfio.MISSING_GENOTYPE_VALUE))\n variant.calls = new_calls\n\n return variant", "title": "" }, { "docid": "a38310052c86f56e670b2b29062a0b18", "score": "0.4063946", "text": "def test_facet_inheritance_masked_preserve(self):\n\n # create parent (0), push child (1), and pull child (2)\n self._imgs_create(3)\n self._attach_child(0, [1])\n self._attach_parent([2], 0)\n\n # install a package with inheritable facets in the parent\n self._pkg([0], \"install -v {0}\".format(self.p_fmri[\"inc1@2\"]))\n\n for fv in [\"True\", \"False\"]:\n\n # set inheritable facet locally in children\n self._pkg([1, 2], \"change-facet 123456={0}\".format(fv))\n\n # disable inheritable facet in parent\n self._pkg([0], \"change-facet 123456=False\")\n self._pkg([2], \"sync-linked\")\n\n # verify inheritable facet is disabled in children\n output = \"facet.123456\\tFalse\\tparent\\n\"\n output_m = \\\n \"facet.123456\\tFalse\\tparent\\tFalse\\n\" + \\\n \"facet.123456\\t{0}\\tlocal\\tTrue\\n\".format(fv)\n for i in [1, 2]:\n self._pkg([i], \"facet -H -F tsv\", \\\n output_cb=self._assertEqual_cb(output))\n self._pkg([i], \"facet -H -F tsv -m\", \\\n output_cb=self._assertEqual_cb(output_m))\n\n # clear inheritable facet in the parent\n self._pkg([0], \"change-facet 123456=None\")\n self._pkg([2], \"sync-linked\")\n\n # verify the local child setting is restored\n output = \"facet.123456\\t{0}\\tlocal\\n\".format(fv)\n output_m = \"facet.123456\\t{0}\\tlocal\\tFalse\\n\".format(fv)\n for i in [1, 2]:\n self._pkg([i], \"facet -H -F tsv\", \\\n output_cb=self._assertEqual_cb(output))\n self._pkg([i], \"facet -H -F tsv -m\", \\\n output_cb=self._assertEqual_cb(output_m))", "title": "" }, { "docid": "971505b0b6e18ca2c4e1d73af15d94bf", "score": "0.40597397", "text": "def removeChildren(self, *args):\n return _osg.Group_removeChildren(self, *args)", "title": "" }, { "docid": "744cc58f8c3f4297fa38e5b5fe0673a5", "score": "0.4051659", "text": "def remove_item(self, src):\n ctg = self._items.__getitem__(src).category\n self._items.__delitem__(src)\n self._categories[ctg].remove(src)\n if not self._categories[ctg]:\n # avoid category with empty set\n self._categories.__delitem__(ctg)\n\n if ctg == self._main_detector_category:\n self._main_detector = ''", "title": "" }, { "docid": "edb2ace143933faf07276a0b9ef6af26", "score": "0.40428928", "text": "async def bsdataset_remove(self, ctx: Context, *clantags):\n if not clantags:\n await send_cmd_help(ctx)\n return\n\n server = ctx.message.server\n bands = self.settings.get_bands_settings(server)\n\n for clantag in clantags:\n if clantag.startswith('#'):\n clantag = clantag[1:]\n\n removed = bands.pop(clantag, None)\n if removed is None:\n await self.bot.say(\"{} not in clan settings.\".format(clantag))\n return\n\n self.settings.set_bands_settings(server, bands)\n await self.bot.say(\"Removed #{} from bands.\".format(clantag))", "title": "" }, { "docid": "822c40b977b12387acda710d371ebd09", "score": "0.40162432", "text": "def get_all_variants():\n return {\n 'BOOSTDESC': {\n 'desc': {\n 'BGM': 100,\n 'BGM_HARD': 101,\n 'BGM_BILINEAR': 102,\n 'LBGM': 200,\n 'BINBOOST_64': 300,\n 'BINBOOST_128': 301,\n 'BINBOOST_256': 302\n }\n },\n 'BRIEF': None,\n 'DAISY': {\n 'norm': {\n 'NRM_NONE': 100,\n 'NRM_PARTIAL': 101,\n 'NRM_FULL': 102,\n 'NRM_SIFT': 103\n }\n },\n 'FREAK': None,\n 'HarrisLaplace': None,\n 'LATCH': None,\n 'LUCID': None,\n 'STAR': None,\n 'AGAST': {\n 'type': {\n 'AGAST_5_8': 0,\n 'AGAST_7_12d': 1,\n 'AGAST_7_12s': 2,\n 'OAST_9_16': 3\n }\n },\n 'AKAZE': {\n 'diffusivity': {\n 'DIFF_PM_G1': 0,\n 'DIFF_PM_G2': 1,\n 'DIFF_WEICKERT': 2,\n 'DIFF_CHARBONNIER': 3\n },\n 'descriptor_type': {\n 'DESCRIPTOR_KAZE_UPRIGHT': 2,\n 'DESCRIPTOR_KAZE': 3,\n 'DESCRIPTOR_MLDB_UPRIGHT': 4,\n 'DESCRIPTOR_MLDB': 5\n }\n },\n 'BRISK': None,\n 'FAST': {\n 'type': {\n 'TYPE_5_8': 0,\n 'TYPE_7_12': 1,\n 'TYPE_9_16': 2\n }\n },\n 'GFTT': None,\n 'KAZE': {\n 'diffusivity': {\n 'DIFF_PM_G1': 0,\n 'DIFF_PM_G2': 1,\n 'DIFF_WEICKERT': 2,\n 'DIFF_CHARBONNIER': 3\n }\n },\n 'ORB': {\n 'scoreType': {\n 'HARRIS_SCORE': 0,\n 'FAST_SCORE': 1\n }\n }\n\n }", "title": "" }, { "docid": "94f3ca0fd6625cd45f5366e8e0f3f2ae", "score": "0.40150672", "text": "def test_facet_inheritance_globs(self):\n\n # create parent (0), push child (1)\n self._imgs_create(2)\n self._attach_child(0, [1])\n\n self._pkg([0], \"change-facet\" +\n \" 123456=False\" +\n \" 456789=True\" +\n \" *456*=False\" +\n \" *789=True\" +\n \" 123*=True\")\n\n # verify that no facets are inherited\n output = \"\"\n self._pkg([1], \"facet -H -F tsv\", \\\n output_cb=self._assertEqual_cb(output))\n\n # install packages with inheritable facets in the parent\n self._pkg([0], \"install -v {0}\".format(self.p_fmri[\"inc1@2\"]))\n\n # verify that three facets are inherited\n output = \"\"\n output += \"facet.*456*\\tFalse\\tparent\\n\"\n output += \"facet.123*\\tTrue\\tparent\\n\"\n output += \"facet.123456\\tFalse\\tparent\\n\"\n self._pkg([1], \"facet -H -F tsv\", \\\n output_cb=self._assertEqual_cb(output))\n\n # install packages with inheritable facets in the parent\n self._pkg([0], \"install -v {0}\".format(self.p_fmri[\"inc2@2\"]))\n\n # verify that five facets are inherited\n output = \"\"\n output += \"facet.*456*\\tFalse\\tparent\\n\"\n output += \"facet.*789\\tTrue\\tparent\\n\"\n output += \"facet.123*\\tTrue\\tparent\\n\"\n output += \"facet.123456\\tFalse\\tparent\\n\"\n output += \"facet.456789\\tTrue\\tparent\\n\"\n self._pkg([1], \"facet -H -F tsv\", \\\n output_cb=self._assertEqual_cb(output))\n\n # remove packages with inheritable facets in the parent\n self._pkg([0], \"uninstall -v {0}\".format(self.p_fmri[\"inc1@2\"]))\n\n # verify that three facets are inherited\n output = \"\"\n output += \"facet.*456*\\tFalse\\tparent\\n\"\n output += \"facet.*789\\tTrue\\tparent\\n\"\n output += \"facet.456789\\tTrue\\tparent\\n\"\n self._pkg([1], \"facet -H -F tsv\", \\\n output_cb=self._assertEqual_cb(output))\n\n # remove packages with inheritable facets in the parent\n self._pkg([0], \"uninstall -v {0}\".format(self.p_fmri[\"inc2@2\"]))\n\n # verify that no facets are inherited\n output = \"\"\n self._pkg([1], \"facet -H -F tsv\", \\\n output_cb=self._assertEqual_cb(output))", "title": "" }, { "docid": "b32fff1d585a7d61dee80d447129b251", "score": "0.4014483", "text": "def _filter_model(filter_text, data, type):\n if type == \"Debris\":\n return [d for d in data if filter_text.upper() in d['displayName'].upper()]\n\n else:\n filtered_list = [d for d in data\n if (filter_text.upper() in d['displayName'].upper() or\n filter_text.upper() in d['scientificName'].upper()or\n (d[\"commonName1\"] is not None and filter_text.upper() in d['commonName1'].upper()) or\n (d[\"commonName2\"] is not None and filter_text.upper() in d['commonName2'].upper()) or\n (d[\"commonName3\"] is not None and filter_text.upper() in d['commonName3'].upper()))]\n\n if filter_text == \"\":\n return filtered_list\n\n start_match_list = [x for x in filtered_list if x['displayName'].upper().startswith(filter_text.upper())]\n # start_match_list = sorted(start_match_list, key=lambda x: x[\"displayName\"].lower())\n\n remaining_list = [x for x in filtered_list if x not in start_match_list]\n # remaining_list = sorted(remaining_list, key=lambda x: x[\"displayName\"].lower())\n\n sorted_list = start_match_list + remaining_list\n\n return sorted_list", "title": "" }, { "docid": "134decea10265357eda8c824d042aaf7", "score": "0.40106404", "text": "def filter_type(self, items, bclass):\n res = []\n for item in items:\n if self.g.query(f\"ASK {{ <{item}> rdf:type/rdfs:subClassOf* <{bclass}> }}\")[0]:\n res.append(item)\n return res", "title": "" }, { "docid": "9b3b6c5322d150cf2d233a39424e3b98", "score": "0.40089685", "text": "def _filter_my_stocks(self):\n tickers_to_remove = []\n for tk in self.stock_picks.keys():\n if self.stock_picks[tk].errorCount > 0:\n tickers_to_remove.append(tk)\n \n for tk in tickers_to_remove:\n del self.stock_picks[tk]\n \n self.tickers_high_vol = list(self.stock_picks.keys())", "title": "" }, { "docid": "6aefdcb40a27122a238f79ce743f27b1", "score": "0.40069738", "text": "def _filterExcludedGAVs(self, artifactList):\n\n logging.debug(\"Filtering artifacts with excluded GAVs.\")\n regExps = maven_repo_util.getRegExpsFromStrings(self.config.excludedGAVs)\n gavRegExps = []\n gatcvRegExps = []\n for regExp in regExps:\n if regExp.pattern.count(\":\") > 2:\n gatcvRegExps.append(regExp)\n else:\n gavRegExps.append(regExp)\n for ga in artifactList.keys():\n for priority in artifactList[ga].keys():\n for version in artifactList[ga][priority].keys():\n gav = \"%s:%s\" % (ga, version)\n if maven_repo_util.somethingMatch(gavRegExps, gav):\n logging.debug(\"Dropping GAV %s:%s from priority %i because it matches an excluded \"\n \"GAV pattern.\", ga, version, priority)\n del artifactList[ga][priority][version]\n else:\n artSpec = artifactList[ga][priority][version]\n for artType in copy.deepcopy(artSpec.artTypes.keys()):\n at = artSpec.artTypes[artType]\n for classifier in copy.deepcopy(at.classifiers):\n if classifier:\n gatcv = \"%s:%s:%s:%s\" % (ga, artType, classifier, version)\n else:\n gatcv = \"%s:%s:%s\" % (ga, artType, version)\n if maven_repo_util.somethingMatch(gatcvRegExps, gatcv):\n logging.debug(\"Dropping GATCV %s from priority %i because it matches an excluded \"\n \"GAV pattern.\", gatcv, priority)\n at.classifiers.remove(classifier)\n if not at.classifiers:\n logging.debug(\"Dropping GATV %s:%s:%s from priority %i because of no classifiers left.\",\n ga, artType, version, priority)\n del artSpec.artTypes[artType]\n if not artSpec.containsMain():\n logging.debug(\"Dropping GAV %s:%s from priority %i because of no main artifact left.\",\n ga, version, priority)\n del artifactList[ga][priority][version]\n if not artifactList[ga][priority]:\n logging.debug(\"Dropping GA %s from priority %i because of no version left.\", ga, priority)\n del artifactList[ga][priority]\n if not artifactList[ga]:\n logging.debug(\"Dropping GA %s because of no priority left.\", ga)\n del artifactList[ga]\n return artifactList", "title": "" }, { "docid": "65a3a38e6b6dcc29e10c7ff1750d08c6", "score": "0.4006651", "text": "def test_change_list_remove_mixed(self):\r\n changed = self._remove_list_test_helper('alice, bob', 'bob, carol')\r\n self.assertEqual(changed, 'alice')", "title": "" }, { "docid": "5cc573e94f4e7a9f674846adf5964921", "score": "0.4005716", "text": "def generate_null_variants(all_variants_file,significant_variants_file):\n all_variants_df = pd.read_csv(all_variants_file)\n all_variants_df = drop_duplicates(all_variants_df) \n\n significant_variants_df = pd.read_csv(significant_variants_file)\n significant_variants_df = drop_duplicates(significant_variants_df) \n\n null_variants = all_variants_df.loc[~all_variants_df[\"variant\"].isin(significant_variants_df[\"variant\"])].copy()\n return(null_variants)", "title": "" }, { "docid": "588d52acdc41b0f89cc686446860d843", "score": "0.40056813", "text": "def removeGenre(self, genres, locked=True):\n return self.editTags('genre', genres, locked=locked, remove=True)", "title": "" }, { "docid": "0b663675de8dc47a75c54d1a4f8d154c", "score": "0.40054893", "text": "def _del_sub(self, name, all=False, lang=None):\r\n path = self._fix_ns(name, split=True)\r\n original_target = path[-1]\r\n\r\n default_lang = self.get_lang()\r\n if not lang:\r\n lang = default_lang\r\n\r\n for level, _ in enumerate(path):\r\n # Generate the paths to the target elements and their parent.\r\n element_path = \"/\".join(path[:len(path) - level])\r\n parent_path = \"/\".join(path[:len(path) - level - 1])\r\n\r\n elements = self.xml.findall(element_path)\r\n parent = self.xml.find(parent_path)\r\n\r\n if elements:\r\n if parent is None:\r\n parent = self.xml\r\n for element in elements:\r\n if element.tag == original_target or not list(element):\r\n # Only delete the originally requested elements, and\r\n # any parent elements that have become empty.\r\n elem_lang = element.attrib.get('{%s}lang' % XML_NS,\r\n default_lang)\r\n if lang == '*' or elem_lang == lang:\r\n parent.remove(element)\r\n if not all:\r\n # If we don't want to delete elements up the tree, stop\r\n # after deleting the first level of elements.\r\n return", "title": "" }, { "docid": "f69df88f1d37cee772eefc57e1484321", "score": "0.40020126", "text": "def variants(self):\n return self.list_to_instance_list(self.dict['variants'], ComicSummary)", "title": "" }, { "docid": "93f46e2cf48383d5f6542d8ee5c2efaf", "score": "0.40012228", "text": "def parse_name(name):\n possible_names = []\n possible_addons = ['tv', 'ttv', '_tv', '_ttv']\n removables = ['you exiled ', 'you assisted in exiling ', 'you disrupted ', '\\n', '\\x0c']\n # convert to lowercase for easier parsing\n name = name.lower()\n # remove\n for removable in removables:\n name = name.replace(removable, '')\n # add the cleaned up name to the list of possible names\n possible_names.append(name)\n # remove any additional signifiers\n for addon in possible_addons:\n if addon in name:\n possible_names.append(name.replace(addon, ''))\n return possible_names", "title": "" }, { "docid": "8278b42c829e9b7896c15256d2861d22", "score": "0.4000646", "text": "async def removeOldChannels():\n deleteChannelList = []\n #these two loops are split up, as the it raises an error when the dict changes.\n for i in client.get_all_channels():\n if \"-matchday-\" in i.name:\n logger.info(f\"Deleting old channel {i.name}\")\n deleteChannelList.append((i.server, i.name))\n\n for i in deleteChannelList:\n await deleteChannel(i[0], i[1])", "title": "" }, { "docid": "fe2184062cac4dffe99cf91b2a4c2eed", "score": "0.3998517", "text": "def choose(part_of_name='', directory_path='.', types=[File, Directory], quiet=False):\n variants = list(filter(lambda item: part_of_name in item.name and\n any([isinstance(item, t) for t in types]), Directory(directory_path).get_items()))\n variants.sort(key=_item_name) \n if len(variants) == 1:\n return variants[0]\n elif len(variants) > 1:\n if not quiet:\n print(\"More than one matches\")\n do_print = input(\"Print all {}? (y/n) \".format(len(variants)))\n if 'y' in do_print or do_print.strip() == '':\n print()\n for i, item in enumerate(variants):\n print(i, item)\n choice = input(\"Choose #? (#/n) \")\n if choice.isnumeric():\n return variants[int(choice)]\n return variants\n else:\n if not quiet:\n print('No matches')", "title": "" }, { "docid": "2a3fbddfd75f3618478da7e030d4e2c2", "score": "0.39984125", "text": "def removeChildren(self, *args):\n return _osg.Sequence_removeChildren(self, *args)", "title": "" }, { "docid": "1dc342ed2715b38db6a2c05bcd8adfd2", "score": "0.39962763", "text": "def guess_category(name):\n if 'Battery' in name:\n return 'Battery'\n if 'Radio' in name or 'Zigbee' in name:\n return 'Radio'\n if 'Motion' in name:\n return 'Motion'\n if 'Door' in name:\n return 'Door'\n if 'LightSwitch' in name:\n return 'LightSwitch'\n if 'Light' in name:\n return 'Light'\n if 'Temperature' in name or 'Thermostat' in name:\n return 'Temperature'\n if 'Item' in name:\n return 'Item'\n return 'Other'", "title": "" }, { "docid": "261aa348f981a418e5ac1bd8eb0435ff", "score": "0.3991132", "text": "def remove_component_from_favorites(self, component):", "title": "" }, { "docid": "ee869df008f4b1c184f38297a67b1c94", "score": "0.39907864", "text": "def _RemoveFromCloneList(self, clone, attrNamesToClone):\n attrNamesToClone = super(SimpleComponentSplitter, self)._RemoveFromCloneList(clone, attrNamesToClone)\n dontClone = [\"splits\", \"_balance\", \"borrowedSplits\"]\n \n for name in dontClone:\n if name in attrNamesToClone:\n attrNamesToClone.remove(name)\n \n return attrNamesToClone", "title": "" }, { "docid": "446e131f77020afba99d383b9eb726c2", "score": "0.3990018", "text": "def remove_channel(self, channel, *, verbose=True):\n channel_index = wt_kit.get_index(self.channel_names, channel)\n new = list(self.channel_names)\n name = new.pop(channel_index)\n del self[name]\n self.channel_names = new\n if verbose:\n print(\"channel {0} removed\".format(name))", "title": "" }, { "docid": "0294f2538afb929f4310493fa385dfec", "score": "0.3984516", "text": "def form_molecule_type_filter(allowed=None, excluded=None):\n if allowed is None:\n allowed = []\n if excluded is None:\n excluded = []\n pdb_entry_type = pd.read_csv(PDB_ENTRY_TYPE_FILE, delimiter='\\t',\n names=['pdb_code', 'molecule_type', 'source'])\n pdb_entry_type['pdb_code'] = \\\n pdb_entry_type['pdb_code'].apply(lambda x: x.lower())\n pdb_entry_type = pdb_entry_type.set_index('pdb_code')\n molecule_type = pdb_entry_type['molecule_type']\n\n def filter_fn(df):\n pdb_codes = df['structure'].apply(lambda x: x[:4].lower())\n\n if len(allowed) > 0:\n to_keep = molecule_type[pdb_codes].isin(allowed)\n elif len(excluded) > 0:\n to_keep = ~molecule_type[pdb_codes].isin(excluded)\n else:\n to_keep = pd.Series([True] * len(df), index=df['structure'])\n return df[to_keep.values]\n return filter_fn", "title": "" }, { "docid": "e990082266840ebbb5c541a5857275b8", "score": "0.39794925", "text": "def _remove(self, *members):\n from .treants import Treant\n\n abspaths = self._state\n remove = list()\n\n for member in members:\n if isinstance(member, int):\n remove.append(abspaths[member])\n elif isinstance(member, Treant):\n remove.append(member.abspath)\n elif isinstance(member, str):\n # try abspaths\n abspaths = fnmatch.filter(self.abspaths, member)\n paths = [m.abspath for m in self\n if m.abspath in abspaths]\n remove.extend(paths)\n # try names\n names = fnmatch.filter(self.names, member)\n paths = [m.abspath for m in self\n if m.name in names]\n remove.extend(paths)\n else:\n raise TypeError('Only a Treant, index, name, or absolute '\n 'path acceptable')\n\n self._del_members(remove)\n\n # remove from cache\n for abspath in remove:\n self._cache.pop(abspath, None)", "title": "" }, { "docid": "8de7671bac4b3bd69fb2f637910ffd72", "score": "0.39774927", "text": "def _categorize(self, category):\n self.torrents = [result for result in self.torrents\n if result.category == category]", "title": "" }, { "docid": "abee673efb4fb8eff73118936a8f80b3", "score": "0.3977336", "text": "def _filterExcludedGAVs(self, artifactList):\n\n logging.debug(\"Filtering artifacts with excluded GAVs.\")\n regExps = _getRegExpsFromStrings(self.config.excludedGAVs)\n for gat in artifactList.keys():\n ga = gat.rpartition(':')[0]\n for priority in artifactList[gat].keys():\n for version in artifactList[gat][priority].keys():\n gav = ga + \":\" + version\n if _somethingMatch(regExps, gav):\n del artifactList[gat][priority][version]\n if not artifactList[gat][priority]:\n del artifactList[gat][priority]\n if not artifactList[gat]:\n del artifactList[gat]\n return artifactList", "title": "" }, { "docid": "b7619441a2ae4561d73ac874be66d801", "score": "0.39750022", "text": "def remove(self, name, partial_match = True):\n\n if not partial_match:\n for vrml_object, raw_object in zip(self.objects, self.raw_objects):\n if name==vrml_object.name:\n self.objects.remove(vrml_object)\n self.raw_objects.remove(raw_object)\n else:\n for vrml_object, raw_object in zip(self.objects, self.raw_objects):\n if name in vrml_object.name:\n self.objects.remove(vrml_object)\n self.raw_objects.remove(raw_object)", "title": "" }, { "docid": "20f1d499dffdb0a6c6aaca1cda8a66f4", "score": "0.3972678", "text": "def test_remove_members_name(self, collection, tmpdir):\n with tmpdir.as_cwd():\n t1 = dtr.Treant('lark')\n t2 = dtr.Treant('elsewhere/lark')\n t3 = dtr.Treant('hark')\n g = dtr.Group('linus')\n\n stuff = [t1, t2, t3, g]\n\n # test removal by name\n collection.add(stuff)\n for item in stuff:\n assert item in collection\n\n # should remove both treants with name 'lark'\n collection.remove('lark')\n\n for item in (t3, g):\n assert item in collection\n\n for item in (t1, t2):\n assert item not in collection\n\n # test removal by a unix-style glob pattern\n collection.add(stuff)\n for item in stuff:\n assert item in collection\n\n # should remove 'lark' and 'hark' treants\n collection.remove('*ark')\n\n assert g in collection\n\n for item in (t1, t2, t3):\n assert item not in collection", "title": "" }, { "docid": "30fa0788f25b4582a98df61eff431d9d", "score": "0.39716175", "text": "def remove_classification_value(self, obj, type, value):\n special = [\"lc_classifications\", \"dewey_decimal_class\"]\n if type in special and type in obj.keys():\n while value in obj[type]:\n obj[type].remove(value)\n if len(obj[type]) == 0:\n del obj[type]\n elif \"classifications\" in obj.keys() and type in obj[\"classifications\"].keys():\n while value in obj[\"classifications\"][type]:\n obj[\"classifications\"][type].remove(value)\n if len(obj[\"classifications\"][type]) == 0:\n del obj[\"classifications\"][type]\n if len(obj[\"classifications\"]) == 0:\n del obj[\"classifications\"]", "title": "" }, { "docid": "39877248f007b0c470ae4323ee5b921e", "score": "0.39677745", "text": "def _prepare_remove(self, child):\n if child not in self.children():\n raise ValueError(\"cannot remove %r, hasn't been added\" % (child,))", "title": "" }, { "docid": "7a5fd0c74c103c895fa025a79ad232c7", "score": "0.39641193", "text": "def remove(self, values: Union[str, Iterable[str]], *, wait: bool = False) -> None:\n return self._pass_call_to_attr(function_name=\"remove\", values=values, wait=wait)", "title": "" }, { "docid": "4fdeaca17f7cdac37daaa8b69af47b4a", "score": "0.3963481", "text": "def _remove(item):\n\n # TODO: replace list with regex patterns per menu source\n removers = ('\\n', '(V)', '(V,P)', '(P)', '(V,FP)', '(FP)')\n removed = item\n for remover in removers:\n removed = removed.replace(remover, '')\n return removed", "title": "" }, { "docid": "b9c8a691d4f8ba12b8b1f16405e90369", "score": "0.39589578", "text": "def test__parse_channels(input_value):\n guild_id = 202306080004\n guild = Guild.precreate(guild_id)\n \n return parse_channels(input_value, guild.channels, guild_id)", "title": "" }, { "docid": "7b382351c002d6e9dfdde3984b362831", "score": "0.39509356", "text": "def test_remove_category_from_asset(self):\n pass", "title": "" }, { "docid": "26b87545c8f510b41bee6dc4392640e5", "score": "0.39507142", "text": "def itemByName(self, name):\n return RemoveFeature()", "title": "" }, { "docid": "f6e6e1bf427568adc1896a7fc282f8c8", "score": "0.39422128", "text": "def test_inheritance_matches_parental_affected_status_de_novo(self):\n \n cnv = self.create_variant(\"female\")\n \n cnv.child.format['INHERITANCE'] = \"de_novo\"\n cnv.child.format['CIFER_INHERITANCE'] = \"not_inherited\"\n \n self.inh.trio.mother.affected_status = \"1\"\n self.inh.trio.father.affected_status = \"1\"\n \n self.assertTrue(self.inh.inheritance_matches_parental_affected_status(cnv))", "title": "" }, { "docid": "bb6cbf41e870e88e1fe2a8156a31fb17", "score": "0.3936756", "text": "def remove(self, composant):\n cible = None\n for bouton, indice, cote in self.composants:\n if composant == bouton:\n self.gui.remove(bouton)\n cible = bouton, indice, cote\n if cible!=None:\n self.composants.remove(cible)\n while self.boutons[0].count(composant)>0:\n self.boutons[0].remove(composant)\n while self.boutons[1].count(composant)>0:\n self.boutons[1].remove(composant)\n while self.boutons[2].count(composant)>0:\n self.boutons[2].remove(composant)\n while self.boutons[3].count(composant)>0:\n self.boutons[3].remove(composant)\n self.anime(0.0)", "title": "" }, { "docid": "a52715d3556d1176881036d78a6d11d9", "score": "0.393304", "text": "def remove_pkg_debs(self, package, build_type):\n logger.debug(' '.join(['Remove all old version of debs for', package]))\n debs_clue = get_debs_clue(build_type)\n subdebs = debsentry.get_subdebs(debs_clue, package, logger)\n if not subdebs:\n logger.warning('Failed to get subdebs of %s from local debsentry cache', package)\n return False\n for deb in subdebs:\n pkg_item = deb.split('_')\n # if deb = name_version\n if len(pkg_item) > 1:\n msg = ''.join(['package ', pkg_item[0], '(', pkg_item[1], ')'])\n # if deb = name\n else:\n msg = ''.join(['package ', pkg_item[0]])\n logger.info(' '.join(['Searching for binary', msg, 'in repository', REPO_BUILD]))\n\n if self.kits['repo_mgr'].search_pkg(REPO_BUILD, pkg_item[0]):\n logger.info('Found binary %s in repository %s', msg, REPO_BUILD)\n if self.kits['repo_mgr'].delete_pkg(REPO_BUILD, pkg_item[0], 'binary', None, deploy=False):\n logger.info('Successfully deleted binary %s from repository %s',\n msg, REPO_BUILD)\n else:\n logger.info('Failed to delete binary %s from repository %s', msg,\n REPO_BUILD)\n ''' Fixme: not sure whether it's ok to skip self.publish_repo(REPO_BUILD) here\n '''\n return True", "title": "" }, { "docid": "f976075e2735752549d36ae2cfa4b08d", "score": "0.39292687", "text": "def removeChildren(self, *args):\n return _osg.GroupRef_removeChildren(self, *args)", "title": "" }, { "docid": "a9dc2e20d91f3de3e5d34cc1a756d870", "score": "0.3924877", "text": "def filter(self, options):\n if not options.category:\n self.showcategories = self.lists.keys()\n if not options.use_all_categories:\n self.showcategories = [ s for s in self.showcategories \n if s not in autohidden ]\n self.showcategories.sort()\n else:\n self.showcategories = options.category.split(':')\n\n if options.exclude:\n for c in options.exclude.split(':'):\n try:\n self.showcategories.remove(c)\n except ValueError:\n pass\n self.showcompleted = options.verbose", "title": "" }, { "docid": "700ff8ab9f5bada01cedbdbfabb25e17", "score": "0.39238182", "text": "def _handle_removechannel(self, params):\n\t\tchannel = self.get_channel(params[\"chanid\"])\n\t\tif channel:\n\t\t\tself.channels.remove(channel)", "title": "" }, { "docid": "e9c2e9027fc1c737bd4d7a516c364a3a", "score": "0.39208168", "text": "def test__ChannelType__name():\n for instance in ChannelType.INSTANCES.values():\n vampytest.assert_instance(instance.name, str)", "title": "" }, { "docid": "09f829514c930bf5462c0988795e56c7", "score": "0.3918251", "text": "def remove_unavailable_variants(checkout):\n for line in checkout:\n try:\n add_variant_to_checkout(checkout, line.variant, line.quantity, replace=True)\n except InsufficientStock as e:\n quantity = e.item.quantity_available\n add_variant_to_checkout(checkout, line.variant, quantity, replace=True)", "title": "" }, { "docid": "b7c804fa154c313a8cfc939164f2e6fc", "score": "0.39179635", "text": "def removeChildNamed(self, name):\n if name not in self.namedChildren:\n return False\n found = self.namedChildren[name]\n success = self._removeChildStructure(found)\n if not success:\n return False\n del self.namedChildren[name]\n return True", "title": "" }, { "docid": "a237a184697c6dd4533bcf122b7af668", "score": "0.39160612", "text": "def removeSubD():\r\n\tif not pluginCheck():\r\n\t\treturn\r\n\r\n\tobjLs = mc.ls(sl=True, l=True)\r\n\tfor obj in objLs:\r\n\t\tobjSh = mc.listRelatives(obj, s=True, f=True)[0]\r\n\t\tmel.eval('vray addAttributesFromGroup %s vray_subdivision 0' % objSh)\r\n\t\tmel.eval('vray addAttributesFromGroup %s vray_subquality 0' % objSh)\r\n\t\tmel.eval('vray addAttributesFromGroup %s vray_displacement 0' % objSh)", "title": "" } ]
855be140d8423db09b16cf922a2d0314
Try to get event from event queue
[ { "docid": "66dff9d1e0011204d646601720f70914", "score": "0.72469306", "text": "def getEvent(self):\n try:\n ev = self.eventq.get(False)\n except:\n return None\n return ev", "title": "" } ]
[ { "docid": "f433116c31a9524023c0c3aef5ba3e43", "score": "0.7179411", "text": "def pick_event(self):\n logger.debug(\"checking event queue\")\n event = snap7.snap7types.SrvEvent()\n ready = ctypes.c_int32()\n code = self.library.Srv_PickEvent(self.pointer, ctypes.byref(event),\n ctypes.byref(ready))\n check_error(code)\n if ready:\n logger.debug(\"one event ready: %s\" % event)\n return event\n logger.debug(\"no events ready\")", "title": "" }, { "docid": "1851dcc416b3cc9a1e7f89fb60447c83", "score": "0.7093803", "text": "def _process_event_queue(self):\n while not self._event_queue.empty():\n event = self._event_queue.get()\n self._process_event(event)\n return None", "title": "" }, { "docid": "42db8e15cfb492814afb84c152833e04", "score": "0.698128", "text": "def get_event(self, timeout=None):\n # return or block until we have something to return or timeout\n return self._eventq.get(timeout=timeout)", "title": "" }, { "docid": "22a88725adcea3998429a2ba2a374ca0", "score": "0.68726075", "text": "def get(self):\n if self.queue.qsize() <= round(self.max_queue_size / 2):\n self.evt.set()\n gevent.sleep(0)\n self.evt.clear()\n\n return self.queue.get()", "title": "" }, { "docid": "970fd488ffbb80ac1bd3d5a927b0bf3b", "score": "0.67082983", "text": "def get_event(self): # real signature unknown; restored from __doc__\n pass", "title": "" }, { "docid": "47c2c5bc95433d69cd417cec6f97c876", "score": "0.6642804", "text": "def dequeue(self, miliseconds_timeout):\n while True:\n if self.error_code != 0:\n log.debug(\"queue error:\" + self.error_code)\n return None\n if not self.event_q.empty():\n mo_chg_event = self.event_q.get()\n return mo_chg_event\n else:\n return None", "title": "" }, { "docid": "5c4c786781587dcf0b76fc40b8983e98", "score": "0.6537983", "text": "def _routine(self):\n self._process_event_queue()\n return None", "title": "" }, { "docid": "ee4fad19b59514a9d585552269da9bf9", "score": "0.6384603", "text": "def wait_for_event(self):\r\n raise NotImplementedError", "title": "" }, { "docid": "c92484c00738223dffbc70858e593863", "score": "0.63679016", "text": "def wait_for_event(self, timeout, name=None):\n if name:\n self.test_class.logger.debug(\n \"Expecting event '%s' within %ss\", name, timeout\n )\n else:\n self.test_class.logger.debug(\"Expecting event within %ss\", timeout)\n try:\n e = self._events.get(timeout=timeout)\n except queue.Empty:\n raise Exception(\"Event did not occur within timeout\")\n msgname = type(e).__name__\n if name and msgname != name:\n raise Exception(\"Unexpected event received: %s, expected: %s\" % msgname)\n self.test_class.logger.debug(\"Returning event %s:%s\" % (name, e))\n return e", "title": "" }, { "docid": "55d4997556ed4b0e38ee3a3c8b9b7f8d", "score": "0.624427", "text": "def peek_event(self): # real signature unknown; restored from __doc__\n pass", "title": "" }, { "docid": "b5db4efba8cc1e577d684b5d7b624e92", "score": "0.62010676", "text": "def event_listener(self, event):\n self.queue.put(event)", "title": "" }, { "docid": "de1c5dc7c3c97d6231db45bcd9ee77cf", "score": "0.61927515", "text": "async def process_event(self, evnt):\n pass", "title": "" }, { "docid": "8b9f5c2572d28d38bff0a5715086b688", "score": "0.6181531", "text": "def event_get(self):\n if not self.Client:\n return None\n\n event, origintime = None, None\n if self.config.event_id is not None:\n try:\n # Get events via event id, only available from certain clients\n logger.debug(f\"event ID: {self.config.event_id}, querying \"\n f\"client {self.config.client}\")\n event = self.Client.get_events(eventid=self.config.event_id)[0]\n except FDSNException:\n pass\n if self.origintime and event is None:\n try:\n # If getting by event id doesn't work, try based on origintime\n logger.debug(f\"origintime: {self.origintime}, querying\"\n f\"client {self.config.client}\")\n event = self.Client.get_events(starttime=self.origintime,\n endtime=self.origintime\n )\n if len(event) > 1:\n # Getting by origin time may result in multiple events \n # found in the catalog, this is hard to control and will\n # probably need to be addressed manually.\n logger.warning(f\"{len(event)} events found, expected 1.\"\n f\"Returning first entry, manual revision \"\n f\"may be required.\"\n )\n event = event[0]\n except FDSNException:\n pass\n return event", "title": "" }, { "docid": "f24339475d16dbbaa332d6563bfeac3c", "score": "0.6101276", "text": "def get_event(self, event: pg.event.Event):\n pass", "title": "" }, { "docid": "7208936746f15557999cae8fd84d2188", "score": "0.60955685", "text": "def __read_from_queue__(self):\n while True:\n message = self.event_queue.get()\n if message == self.CLOSE_PORT:\n self.close_port()\n return\n else:\n self.__write__(msg=message)", "title": "" }, { "docid": "7e4c9b4833cf37dd678ead3746833d5b", "score": "0.6082438", "text": "def get_next_event(self, timeout=None):\n event, args, kwargs = self.inq.Wait(timeout=timeout)\n return ((self.state, event), args, kwargs)", "title": "" }, { "docid": "1fafafff607cc2aee45c1bd41c786ae9", "score": "0.60788196", "text": "def assert_event_was_fired(self, expected_event):\n self.assertTrue(self.queueEvent_mock.called, \"no event was fired\")\n event = self.queueEvent_mock.call_args[0][0]\n self.assertEqual(self.parser.getEventID(expected_event), event.type)\n return event", "title": "" }, { "docid": "cdf5abd5fb65b4227a2541a31aea4941", "score": "0.6074151", "text": "def get_event_queue(self, event):\n queue_url_dict = self.sqs_client.get_queue_url(QueueName=self.get_queue_name(event))\n return queue_url_dict[\"QueueUrl\"]", "title": "" }, { "docid": "5634fa24bfaacc232cc52edce2c0cc18", "score": "0.59716064", "text": "async def _put_event_queue(self, event: pycot.Event) -> None:\n try:\n await self.event_queue.put(event)\n except queue.Full:\n self._logger.warning(\n \"Lost CoT Event (queue full): '%s'\", event)", "title": "" }, { "docid": "d719d2c93ec16441aefcac0940e99747", "score": "0.5947777", "text": "async def _wait_for_http_event(self, stream_id: int) -> H3Event:\n if not self._read_queue[stream_id]:\n await self._read_ready[stream_id].wait()\n event = self._read_queue[stream_id].popleft()\n if not self._read_queue[stream_id]:\n self._read_ready[stream_id].clear()\n return event", "title": "" }, { "docid": "587763b888f34ed2490f89aaf4058585", "score": "0.5941672", "text": "def process_event(self, ev, sql_queue_func, arg):\r\n pass", "title": "" }, { "docid": "1d921aa08f138109023731b3072faae5", "score": "0.5934292", "text": "def wait_for_event(e):\n logging.debug('Esperando por evento inicio')\n event_is_set = e.wait()\n logging.debug('evento : %s', event_is_set)", "title": "" }, { "docid": "0d91e16afd81d0e5dd6c9ef404b52145", "score": "0.59229195", "text": "async def get_event(request, event_id: str) -> Event:\n event = await request.app[\"db\"].event.find_by_id(event_id)\n return event", "title": "" }, { "docid": "824f2de1d48e7aa810daaaf83e90981b", "score": "0.5900256", "text": "def wait_for_event(e):\n logging.debug('wait_for_event starting')\n event_is_set = e.wait()\n logging.debug('event set: %s', event_is_set)", "title": "" }, { "docid": "16a36b544ec424bc2b45759309694df9", "score": "0.5899328", "text": "def get_item_from_queue(Q, timeout=0.01):\n try:\n item = Q.get(True, timeout)\n except Queue.Empty: \n return None\n \n return item", "title": "" }, { "docid": "734d385914116618a3b9ebf4a8b98ed1", "score": "0.588287", "text": "def test_get_events(self):\n query_string = [('queue_id', '1375801870:2942'),\n ('last_event_id', -1),\n ('dont_block', False)]\n headers = { \n 'Accept': 'application/json',\n }\n response = self.client.open(\n '/api/v1/events',\n method='GET',\n headers=headers,\n query_string=query_string)\n self.assert200(response,\n 'Response body is : ' + response.data.decode('utf-8'))", "title": "" }, { "docid": "8cdced031564dac38a33145708fb4e5f", "score": "0.5832036", "text": "def _process_event_non_blocking(self):\n\n start_time = datetime.datetime.now()\n while not self.queue.empty():\n time_delta = datetime.datetime.now() - start_time\n if time_delta.total_seconds() >= 2:\n break\n\n try:\n event = self.queue.get()\n LOG.debug(\"got event: %s\" % event)\n self.processor.process_event(event)\n except Exception as error:\n LOG.error(\"Exception: %s\", error)", "title": "" }, { "docid": "5b4d34173f293f28b1e8d48cd249cdae", "score": "0.5825454", "text": "def next_event(self):", "title": "" }, { "docid": "1b71e84f92a2e2369a6510eca7006269", "score": "0.5813769", "text": "def _process_event(self, event):\n event_type = event.get('type', None)\n if event_type in self._event_handlers:\n self._event_handlers[event_type](event)\n else:\n LOG.debug(\n \"Task._process_event no handler for event %s in task %s\",\n event_type,\n self._name,\n )\n return None", "title": "" }, { "docid": "c1e555ae53f05e77a1dba6b320fc8e5a", "score": "0.58097464", "text": "def get_message(queue):\n message = queue.receive_messages(MaxNumberOfMessages=1)\n if not message:\n return None\n return message[0]", "title": "" }, { "docid": "39732c28d195d46673e6c3a7f991c16c", "score": "0.5801896", "text": "def next_event(self):\n if not self._recv_buf:\n if self.closed:\n return events.ConnectionClosed()\n return None\n\n event_id = self._recv_buf[0]\n try:\n Event = event_types[event_id]\n except KeyError:\n raise ProtocolError('%d is not a valid event ID' % event_id)\n try:\n event = Event.from_buffer(self._recv_buf[1:])\n except events.NeedMoreDataError:\n return None\n\n # Remove processed data from buffer\n del self._recv_buf[:1+len(event)]\n return event", "title": "" }, { "docid": "19899f2eb2139a9023f1ca809e546988", "score": "0.5792371", "text": "def waitforcardevent(self):\n pass", "title": "" }, { "docid": "a879dbfffe94f76b88bc6cafc9b839e2", "score": "0.5781625", "text": "def test_process_event(self):\n pass", "title": "" }, { "docid": "6623a092db2f7d5b9a54b5f677a51c3a", "score": "0.5780284", "text": "async def sync_handler(self, check_event_callback=None, *kargs, **kwargs):\n logging.info(\"Started synchronous handler to handle an event\")\n while True:\n\n event = await self.queue.get()\n logging.debug(\"Event popped from listener queue\")\n self.queue.task_done()\n\n # Break out of the loop if check_event_callback method is\n # None(caller does not want to check the event).\n # Also break if it is defined and returns True when called\n # else continue waiting for a True return value.\n if check_event_callback is None or \\\n check_event_callback(event, *kargs, **kwargs) is True:\n break\n\n return event", "title": "" }, { "docid": "d178859aca2205121f8af0cfcd630d21", "score": "0.5776098", "text": "def get_event(event_name, events):\n try:\n return [event for event in events if event.meta.type == event_name][0]\n except IndexError:\n return None", "title": "" }, { "docid": "fe213b9a7ec7fd61c7ee32c46ee680f2", "score": "0.5773317", "text": "def function_waiting_for_event(e, t):\n logging.debug('function waiting for event starting')\n event_is_set = e.wait(t)\n if event_is_set:\n logging.debug('event set: %s', event_is_set)\n else:\n logging.debug('doing other work')", "title": "" }, { "docid": "55d6329d6168dd3bb08087e5f8265661", "score": "0.57569534", "text": "def _read_next_event(self):\n raise NotImplementedError('_read_next_event() not defined for %s' %\n self.__class__.__name__)", "title": "" }, { "docid": "ce3cd2bbb819cb4f9861d0d4f9515587", "score": "0.5754978", "text": "async def handle_events(self):\n while not self.stop:\n event = await self.queue.get()\n\n if event is None:\n # None is a sentinel to indicate the that work is done and there will be no more\n # messages. If there are multiple consumers of the queue, this will become an\n # issue as None will need to be added for each consumer\n self.queue.task_done()\n else:\n event_type = str(event['type'])\n e_id = self._get_event_id(event)\n self._add_trace(e_id)\n\n log.debug('AsyncDataFeedEventService/handle_events() --> event-type:' + event_type)\n try:\n route = self.routing_dict[event_type]\n except KeyError:\n log.debug('Event with unsupported type ' + event_type + ' detected')\n self.queue.task_done()\n continue\n\n future = asyncio.ensure_future(route(event))\n future.add_done_callback(partial(self._check_result, e_id))", "title": "" }, { "docid": "9f696eaf40b35b41f4790df37209247a", "score": "0.57396066", "text": "def step(self) -> None:\n try:\n self._now, _, _, event = heappop(self._queue)\n except IndexError:\n raise EmptySchedule()\n\n # Process callbacks of the event. Set the events callbacks to None\n # immediately to prevent concurrent modifications.\n callbacks, event.callbacks = event.callbacks, None # type: ignore\n for callback in callbacks:\n callback(event)\n\n if not event._ok and not hasattr(event, '_defused'):\n # The event has failed and has not been defused. Crash the\n # environment.\n # Create a copy of the failure exception with a new traceback.\n exc = type(event._value)(*event._value.args)\n exc.__cause__ = event._value\n raise exc", "title": "" }, { "docid": "94442c07b92778ffe805aff5c267fc85", "score": "0.5731729", "text": "def get(self):\n try:\n request = self.queue.get_nowait()\n except:\n return None\n else:\n return request", "title": "" }, { "docid": "eb81ad4d59e8d1323c06a21febb3b111", "score": "0.5719649", "text": "def peek(queue):\n return None", "title": "" }, { "docid": "a2036b260112e5e74ec4fe0b649c7091", "score": "0.5691467", "text": "def getEvent(self):\n return self.__event", "title": "" }, { "docid": "9888b12403938b5b8057bab6ca67fd1a", "score": "0.56806695", "text": "def dequeue(queue):\n return None", "title": "" }, { "docid": "216d2e4cf294f1056aa627ddaa19545e", "score": "0.56784475", "text": "def receive_event(event_data):\n with lock:\n event = None\n if isinstance(event_data, dict):\n event = trace.DictTrace (event_data)\n else:\n event = trace.RawTrace (event_data)\n try: \n hosts[event.host].process(event)\n except KeyError: \n logger.exception(traceback.format_exc())\n logger.warning(\"wrong host ID received: {}\".format(event.host))", "title": "" }, { "docid": "8941536b3f20237b70ce5c9c200f978f", "score": "0.566203", "text": "def get_basic_event(self, index):\n if index in self._basic_events:\n return self._basic_events[index]\n return None", "title": "" }, { "docid": "634499bd608bc9ea5f8de6cf3a4ef600", "score": "0.5634889", "text": "def get_data(self, queue):\n try:\n data = queue.get_nowait()\n except Queue.Empty:\n return None\n if issubclass(Exception, data.__class__):\n raise data\n return data", "title": "" }, { "docid": "a78dda213240e9aa5d5dd43864ae08ec", "score": "0.56306934", "text": "def _get_q(self, url):\n for q in self._queues.values():\n if q.url == url:\n return q\n raise Exception(\"Queue url {} not found\".format(url))", "title": "" }, { "docid": "35dde80d2d1e8d4bae653ddb23890c09", "score": "0.5626187", "text": "def waitOnEventQueue( self ) :\n\t\tif not len( self.equeue ) :\n\t\t\treturn False\n\t\tnow = time.time()\n\t\ttry :\n\t\t\ttime.sleep(self.equeue[0][0]-now)\n\t\texcept IOError, e:\n\t\t\tpass # invalid (0) arg to sleep\n\t\t# the time must now be the eval time of the first event\n\t\tself.processEventQueue( self.equeue[0][0], count=1 )\n\t\t# we must process at least one, so it is time to redisplay...\n\t\tself.refreshDisplay()\n\t\treturn True", "title": "" }, { "docid": "df838529fb002b64852e031f4f05ca81", "score": "0.56194776", "text": "def listen(self, q):\n # Checks the input queue for any matching mouse button events\n for event in q:\n if event.type == sdl2.SDL_MOUSEBUTTONDOWN:\n b = event.button.button\n if b in self._buttonmap.keys():\n value = self._buttonmap[b]\n rt = (event.button.timestamp - self._loop_start)\n return (value, rt)\n return None", "title": "" }, { "docid": "16f16b175b358a401275e31743f5b47e", "score": "0.56156397", "text": "def get_event_by_name(self, event_name):\n for event in self.events_list:\n if event['name'] == event_name:\n return event\n else:\n return False", "title": "" }, { "docid": "52951b3ca420c42770acf73bdfed3da8", "score": "0.5610793", "text": "def get_event_by_name(event_name, db):\n event_db = db.keys('*')\n required_event = None\n for event in event_db:\n event_details = json.loads(db.mget(event)[0])\n if event_name.lower() == event_details[\"name\"].lower():\n required_event = event_details\n break\n return required_event", "title": "" }, { "docid": "7cbc38edb8de7bfbf2bfa376d55bb327", "score": "0.56100357", "text": "def load_event_queue(self) -> List[t.Event]:\n bytes_data = load_bytes_data(self.state.event_queue(), self._conn)\n return decode_event_queue(bytes_data)", "title": "" }, { "docid": "eace78928c2bdac366f23fd83732e547", "score": "0.56083226", "text": "def read_events(self):\n buf_ = array.array('i', [0])\n # get event queue size\n if fcntl.ioctl(self._fd, termios.FIONREAD, buf_, 1) == -1:\n return\n queue_size = buf_[0]\n if queue_size < self._threshold:\n LOG.debug('(fd: %d) %d bytes available to read but threshold is '\n 'fixed to %d bytes', self._fd, queue_size,\n self._threshold)\n return\n\n r = os.read(self._fd, queue_size)\n LOG.debug('Event queue size: %d', queue_size)\n event_queue = collections.deque()\n rsum = 0 # counter\n while rsum < queue_size:\n s_size = 16\n # Retrieve wd, mask, cookie and fname_len\n wd, mask, cookie, fname_len = struct.unpack('iIII',\n r[rsum:rsum+s_size])\n\n # Retrieve name\n fname, = struct.unpack('%ds' % fname_len,\n r[rsum + s_size:rsum + s_size + fname_len])\n rsum += s_size + fname_len\n if mask & event.IN_IGNORED:\n continue\n rawevent = dict(wd=wd, mask=mask, cookie=cookie, fname=fname)\n event_queue.append(rawevent)\n return event_queue", "title": "" }, { "docid": "63882cdf33be0f2797d89b5b46544415", "score": "0.5603436", "text": "def get_nonblocking(queue):\n try:\n resp = queue.get(block=False)\n except Exception as e:\n resp = None\n return resp", "title": "" }, { "docid": "9717776e9d157ac4113e3807417200a3", "score": "0.5601572", "text": "async def call_event_async(self, event):\n # If event.type not registered try None as a general event handler.\n callback = self.event_handler.get(event.type, self.event_handler.get(None, None))\n return await call_async(callback, event, LOOP=self.loop)", "title": "" }, { "docid": "f8f8f63680e7334551e0b7b5018d8650", "score": "0.55962324", "text": "def process_event(self, db, event):\r\n raise Exception(\"needs to be implemented\")", "title": "" }, { "docid": "06096fc9b0e8cec266b5906e0e847d4f", "score": "0.5595797", "text": "def find_event(self):\n return self.model.find_unit_event_by_time(\n self.blob.unit, self.view.time\n )", "title": "" }, { "docid": "33364c07b76762ac35303c3ba2d915c8", "score": "0.5583143", "text": "def __init__(self):\n self.evt_thread = None", "title": "" }, { "docid": "240e178642a3ce75b0b0e0b3d5699914", "score": "0.5580184", "text": "def wait_for_event(e):\r\n time.sleep(2)\r\n# print a\r\n# print 'wait_for_event starting'\r\n event_is_set = e.wait()\r\n# print 'event is set . Now I am doing something'\r\n print a\r\n print 'done'", "title": "" }, { "docid": "1e7ac814770ed63418cec12746a79dcf", "score": "0.55754924", "text": "def get_delayed_event(self, r, e):\n # @todo: Rewrite to scheduler\n discriminator, vars = r.get_vars(e)\n ws = e.timestamp - datetime.timedelta(seconds=r.combo_window)\n de = ActiveEvent.objects.filter(\n managed_object=e.managed_object_id,\n event_class=r.event_class,\n discriminator=discriminator,\n timestamp__gte=ws\n ).first()\n if not de:\n # No starting event\n return None\n # Probable starting event found, get all interesting following event\n # classes\n fe = [ee.event_class.id\n for ee in ActiveEvent.objects.filter(\n managed_object=e.managed_object_id,\n event_class__in=r.combo_event_classes,\n discriminator=discriminator,\n timestamp__gte=ws).order_by(\"timestamp\")]\n if r.combo_condition == \"sequence\":\n # Exact match\n if fe == self.combo_event_classes:\n return de\n elif r.combo_condition == \"all\":\n # All present\n if not any([c for c in r.combo_event_classes if c not in fe]):\n return de\n elif r.combo_condition == \"any\":\n # Any found\n if fe:\n return de\n return None", "title": "" }, { "docid": "18b77a831d0bb46ff4ad141b930d4615", "score": "0.557178", "text": "def process_events(self, return_at = None):\n while return_at or not self.queue.empty():\n event = self.queue.get()\n if event[0] == return_at:\n return event\n elif event[0] in self._handlers:\n for handler, data in self._handlers[event[0]]:\n try:\n handler(event, data)\n except ExitMainLoop:\n raise\n except Exception: # pylint: disable=broad-except\n send_exception(self.queue, sys.exc_info())", "title": "" }, { "docid": "bbc630fa2d924c6d9b80033348d5110b", "score": "0.55633557", "text": "def test_get_events(self):\n pass", "title": "" }, { "docid": "a2d1a2d4b341be8a19e296a6cf6e0abf", "score": "0.55602705", "text": "async def aget(self):\n \n return await asyncio.get_event_loop().run_in_executor( None, self._queue.get )", "title": "" }, { "docid": "c4307899ba13a816b1cb4c78a99d8cba", "score": "0.5557095", "text": "def post(self, event):\n self._event_queue.put(event)\n self._notify()\n return None", "title": "" }, { "docid": "811b32e36f9ff0c929f2d0c1cc98bae9", "score": "0.5553492", "text": "async def get_events(self, event=None, timeout=None):\n self.wait_queue = Queue()\n while True:\n if timeout:\n try:\n data = await asyncio.wait_for(self.wait_queue.get(), timeout)\n except asyncio.TimeoutError:\n return\n else:\n data = await self.wait_queue.get()\n if not event or data[\"event\"] == event:\n yield data\n self.wait_queue = None", "title": "" }, { "docid": "ca3cac995a75e9e4112b94a78401de46", "score": "0.55504215", "text": "def get_event(self):\n return self.ticket_order.ticket_type.get_event()", "title": "" }, { "docid": "499d658b877f75354d34ac822d9205af", "score": "0.55482125", "text": "def __do_consume(self, function, event, queue):\n try:\n\n if not isinstance(event, self.input):\n new_event = event.convert(self.input[0])\n self.logger.warning(\"Incoming event was of type '{_type}' when type {input} was expected. Converted to {converted}\".format(\n _type=type(event), input=self.input, converted=type(new_event)), event=event)\n event = new_event\n\n if self.REQUIRED_EVENT_ATTRIBUTES:\n missing = [attribute for attribute in self.REQUIRED_EVENT_ATTRIBUTES if not event.get(attribute, None)]\n if len(missing) > 0:\n raise InvalidActorInput(\"Required incoming event attributes were missing: {missing}\".format(missing=missing))\n\n try:\n function(event, origin=queue.name, origin_queue=queue)\n except QueueFull as err:\n err.queue.wait_until_free() # potential TypeError if target queue is not sent\n queue.put(event) # puts event back into origin queue\n except InvalidActorInput as error:\n self.logger.error(\"Invalid input detected: {0}\".format(error))\n except InvalidEventConversion:\n self.logger.error(\"Event was of type '{_type}', expected '{input}'\".format(_type=type(event), input=self.input))\n except Exception as err:\n self.logger.warning(\"Event exception caught: {traceback}\".format(traceback=traceback.format_exc()), event=event)\n rescue_attribute = Actor._RESCUE_ATTRIBUTE_NAME_TEMPLATE.format(actor=self.name)\n rescue_attempts = event.get(rescue_attribute, 0)\n if self.rescue and rescue_attempts < self.max_rescue:\n setattr(event, rescue_attribute, rescue_attempts + 1)\n sleep(1)\n queue.put(event)\n else:\n event.error = err\n self.send_error(event)", "title": "" }, { "docid": "f3ec8796a3674a4821352bea775f62aa", "score": "0.5546853", "text": "def dispatch(self, event):\n self._event_queue.append(event)", "title": "" }, { "docid": "c9b60eeafdc84fecba9e01d18d3fdb7c", "score": "0.55453014", "text": "def pop(self):\n if len(self._heap_q) == 0:\n # no events to consume\n return None\n\n event_tuple = heappop(self._heap_q) # (time: int, e Event)\n return event_tuple[1]", "title": "" }, { "docid": "259681f8e858cd783503168ebb398997", "score": "0.5544693", "text": "def get_event(self, requested_event, use_event_id=False):\n source = self.read(requested_event=requested_event,\n use_event_id=use_event_id)\n event = next(source)\n return deepcopy(event)", "title": "" }, { "docid": "9cddaa2dc7590a667409ab5d9e22bff0", "score": "0.5537178", "text": "def pollNextEvent(self, event):\r\n fn = self.function_table.pollNextEvent\r\n vREvent = sizeof(VREvent_t)\r\n result = fn(byref(event), vREvent)\r\n return result != 0", "title": "" }, { "docid": "d9bb343c5dfd0d3e0a3b221b887de381", "score": "0.55274796", "text": "def next_event(self, blocking=False):\n while self.event_socket_connected and self.has_event(blocking=blocking):\n with self._buffer_lock:\n line, remainder = self.buffer.split('\\n', 1)\n self.buffer = remainder\n try:\n event = json.loads(line)\n except Exception as e:\n self._logger.info('Error (%s) parsing\\n%s*\\nwith\\n%s*', str(e), line, remainder)\n continue\n if self._should_log_event(event):\n self._logger.info('faucet_event %s', event)\n targets = list(t for t in self._handlers if t in event)\n event_target = targets[0] if targets else None\n faucet_event = dict_proto(event, FaucetEvent, ignore_unknown_fields=True)\n target_event = getattr(faucet_event, str(event_target), None)\n try:\n dispatch = self._valid_event_order(event) and target_event\n except Exception as e:\n self._logger.error('Validation failed for event %s: %s', event, e)\n raise\n dispatch = dispatch and self._handle_port_change_debounce(event, target_event)\n dispatch = dispatch and self._handle_ports_status(event)\n if dispatch:\n self._augment_event_proto(faucet_event, target_event)\n if not self._dispatch_faucet_event(event_target, target_event):\n return event\n return None", "title": "" }, { "docid": "8ef712f217643e41ff9d914c0343da53", "score": "0.552391", "text": "def wait_for_event(bidi_session, event_loop):\n def wait_for_event(event_name: str):\n future = event_loop.create_future()\n\n async def on_event(method, data):\n remove_listener()\n future.set_result(data)\n\n remove_listener = bidi_session.add_event_listener(event_name, on_event)\n\n return future\n return wait_for_event", "title": "" }, { "docid": "1f3c6d50294ba593fa66b99fd9cc89e3", "score": "0.5523747", "text": "def listen(self, q):\n # Checks the input queue for any keypress events for keys in the keymap\n for event in q:\n if event.type == sdl2.SDL_KEYDOWN:\n key = event.key.keysym # keyboard button event object\n if key.sym in self._keymap.keys():\n value = self._keymap[key.sym]\n rt = (event.key.timestamp - self._loop_start)\n return (value, rt)\n return None", "title": "" }, { "docid": "809d08efab85a27f05f160dd442aefe1", "score": "0.55139244", "text": "def _fetch_random_task_from_queue(queue_service, queue_name):\n tasks = queue_service.GetTasks(queue_name)\n\n if not tasks:\n return None\n\n task = random.choice(tasks)\n\n if not task:\n return None\n\n return task", "title": "" }, { "docid": "28a9ae03be8358f7e155d71f2440b3d0", "score": "0.5513144", "text": "def get_event(self, event, scale=(1,1)):\n self.state.get_event(event, scale)", "title": "" }, { "docid": "d6af5d4a7c1403f38972b0cda15f2212", "score": "0.55130726", "text": "def dispatch_event(self, event):\n if self.event_manager:\n self.event_manager.dispatch_event(event)", "title": "" }, { "docid": "a8e12c5e4a49ed3d8d8979e57d85bd50", "score": "0.55116105", "text": "def gather_event(self, try_fm=True):\n logger.debug(\"gathering event\")\n event = None\n if self.ds:\n try:\n # If dataset is given, search for event in ASDFDataSet. If event\n # is in ASDFDataSet already, it has already been gathered and\n # should already have a focal mechanism\n event = self.asdf_event_fetch()\n self.origintime = event.preferred_origin().time\n return event\n except (AttributeError, IndexError):\n pass\n\n # No data in ASDFDataSet, query FDSN\n if self.Client:\n event = self.event_get()\n if event is None:\n raise GathererNoDataException(f\"no Event information found for \"\n f\"{self.config.event_id}\")\n else:\n logger.debug(f\"matching event found: {format_event_name(event)}\")\n self.origintime = event.preferred_origin().time\n # Append extra information and save event before returning\n if try_fm:\n event = append_focal_mechanism(event, client=self.config.client)\n if self.ds and self.config.save_to_ds:\n self.ds.add_quakeml(event)\n logger.debug(f\"event QuakeML added to ASDFDataSet\")\n return event", "title": "" }, { "docid": "54281c18bcfb54ee765bc70030084ada", "score": "0.5509596", "text": "def handle_event(self, event):\n _event = None\n try: \n _event = decode_event_data(event)\n except Exception as error:\n _event = decode_event_data(event['data'])\n\n if _event == self._last_event:\n # Event already handled\n return\n\n self._last_event = _event\n if _event is not None:\n try:\n if _event['event'] == 'Message':\n if _event.get('data', None):\n self.add_message(_event['data'])\n elif _event['event'] == 'Clear':\n self.clear()\n elif _event['event'] == 'ClearQueue':\n self.clear_queue()\n except Exception as error:\n Logger.log(LOG_LEVEL[\"error\"],\n f'Error handling event for {self.id}')", "title": "" }, { "docid": "2f57643a46e1a25ad0cea40962a98916", "score": "0.55067426", "text": "def get_queue():\n watcher = Watcher()\n watcher.connect()\n queue = watcher.get_queue()\n return queue", "title": "" }, { "docid": "72be01f0111d8d2f726a6d68f16cbb9e", "score": "0.5502617", "text": "async def dispatch_event(event: str, *args: typing.Any, **kwargs: typing.Any) -> None:\n cb = _callbacks.get(event)\n if cb is not None:\n try:\n await cb(*args, **kwargs)\n except Exception as e:\n log.error(f\"Error in handler for event {event}: {e}\")", "title": "" }, { "docid": "03cb4166f1186ef397e42bb0c34ebb57", "score": "0.5498519", "text": "def using_simple_queue(queued_client):\n queued_client.event(service='test')", "title": "" }, { "docid": "6c95795bbf2db3cc592f56247159e6ff", "score": "0.54895115", "text": "def event_to_queue(alias, listen_path, event_queue):\n events = sparkl('listen', listen_path, alias=alias)\n\n for event in events:\n event_queue.put(event)", "title": "" }, { "docid": "d77dba0f5066422e3af4295dae2992c8", "score": "0.54889494", "text": "def get_from_events(events, branch_name):\n\n if branch_name in events.fields:\n return events[branch_name]\n else:\n return None", "title": "" }, { "docid": "d106a850373d76e36d76197cb4071a6b", "score": "0.54852873", "text": "async def check_event(self, evnt):\n return False", "title": "" }, { "docid": "c6585ebc4028117fdc44c575e3190da0", "score": "0.5471601", "text": "def dispatch(self):\n return self.event_target.dispatch_event(self.event_name, self)", "title": "" }, { "docid": "5fcaf5352e607f543f3c038c4b869934", "score": "0.54692215", "text": "def handle(self, event: Event) -> None:", "title": "" }, { "docid": "5fcaf5352e607f543f3c038c4b869934", "score": "0.54692215", "text": "def handle(self, event: Event) -> None:", "title": "" }, { "docid": "c43b71748c5c0ea96b347cabf54d5b27", "score": "0.54576594", "text": "def asdf_event_fetch(self):\n event = self.ds.events[0]\n logger.debug(f\"matching event found: {format_event_name(event)}\")\n return event", "title": "" }, { "docid": "3a5c0752032c8ca767dc853f2ea1380d", "score": "0.54361784", "text": "def receive_epr(self, host_id, sender_id, q_id=None, block=False):\n key = sender_id + ':' + host_id\n ent_queue = self._entaglement_pairs.get_from_dict(key)\n if ent_queue is None:\n raise Exception(\"Internal Error!\")\n qubit = ent_queue.get()\n if q_id is not None and q_id != qubit.id:\n raise ValueError(\"Qid doesn't match id!\")\n return qubit", "title": "" }, { "docid": "3c2261fcf3ffb30ffb884e7de6f1e534", "score": "0.54282", "text": "def get(self, timeout=None):\n try:\n if not self.__queue_feeder_end:\n self.__data_ready_event.wait(timeout)\n return self.__queue.popleft()\n except IndexError:\n if timeout:\n return None\n\n self.__queue_end = True\n finally:\n if self.count == 0:\n self.__data_ready_event.clear()\n if self.__queue_feeder_end:\n self.__queue_end = True", "title": "" }, { "docid": "6c6d7131fb4e4a2877e0fb63aaeec36d", "score": "0.54274803", "text": "def get_next_event(self):\n return # osid.calendaring.Event", "title": "" }, { "docid": "92227b089d6ea0b95c372e4e88c2e10e", "score": "0.5427281", "text": "def _find_event(self, event_klass):\n if isinstance(event_klass, type):\n event_klass = event_klass.__name__\n l = self.events\n first_from_back = next((l[-i] for i in range(1, len(l) + 1) if l[-i].event_type() == event_klass), None)\n return first_from_back", "title": "" }, { "docid": "ac5b59801731e8234043e3cff63d8c82", "score": "0.54259956", "text": "def _key_events_fetcher(self):\n raise NotImplementedError", "title": "" }, { "docid": "8ca68467223b6d1cfe5e4f76be201c38", "score": "0.5424284", "text": "def next(self):\n while True:\n # Awaits a new event to look for a job\n self.queue_event.wait()\n found = None\n for job in self.queue:\n lock = self.get_lock(job)\n # Get job in the queue if it has no lock or its not locked\n if lock is None or not lock.locked():\n found = job\n job.set_lock(lock)\n break\n if found:\n # Unlocked job found to run\n self.queue.remove(found)\n # If there are no more jobs in the queue, clear the event\n if len(self.queue) == 0:\n self.queue_event.clear()\n return found\n else:\n # No jobs available to run, clear the event\n self.queue_event.clear()", "title": "" }, { "docid": "87f0030e2bdd64a77990181a7c23373d", "score": "0.541929", "text": "def get(self) -> debugger_info.DebuggerObject:\n return self._queue.get()[1]", "title": "" }, { "docid": "abd2c55fce7578e195587815656d5cd3", "score": "0.54171056", "text": "def wait_for_event_timeout(e, t):\n while not e.isSet():\n logging.debug('Esperando evento inicio a destiempo')\n event_is_set = e.wait(t)\n logging.debug('evento: %s', event_is_set)\n if event_is_set:\n logging.debug('Procesando evento')\n else:\n logging.debug('haciendo otro trabajo')", "title": "" }, { "docid": "379e9290651c3a7ba5351ea5c1248809", "score": "0.5413514", "text": "def process_event(event, consumer_queue=None):\n if isinstance(event, Event):\n # Run the event\n event.exec_()\n if consumer_queue and event.has_output:\n consumer_queue.put(event)", "title": "" }, { "docid": "ce0bc0d0425b51fc1af358c10ccad764", "score": "0.5407505", "text": "def await_state_event(self, pid=None, state=None, timeout=30, strict=False):\n\n start_time = datetime.now()\n\n assert state in ProcessStateEnum._str_map, \"process state %s unknown!\" % state\n state_str = ProcessStateEnum._str_map.get(state)\n\n # stick the pid into a container if it is only one\n if pid is not None and not isinstance(pid, (list, tuple)):\n pid = (pid,)\n\n while 1:\n if datetime.now() - start_time > timedelta(seconds=timeout):\n raise AssertionError(\"Waiter timeout! Waited %s seconds for process %s state %s\" % (timeout, pid, state_str))\n try:\n event = self.event_queue.get(timeout=timeout)\n except queue.Empty:\n raise AssertionError(\"Event timeout! Waited %s seconds for process %s state %s\" % (timeout, pid, state_str))\n log.debug(\"Got event: %s\", event)\n\n if (pid is None or event.origin in pid) and (state is None or event.state == state):\n return event\n\n elif strict:\n raise AssertionError(\"Got unexpected event %s. Expected state %s for process %s\" % (event, state_str, pid))", "title": "" }, { "docid": "d93eae148b82780d388e8c7b40685dcc", "score": "0.5405129", "text": "def GetNextEventSource(self):", "title": "" } ]
b0cdacba661f704d7960a2fbf39e3eab
Override reset_target_position from hectorquad_env to also delete trees
[ { "docid": "7e8ba560a58f499be4c3538e731e91bf", "score": "0.60241246", "text": "def reset_target_position(self):\n self.goal = self._sample_goal()\n target_cmd = \"gz model -m \" + self.target_name + \" -x {} -y {} -z {}\".format(*self.goal)\n subprocess.call(target_cmd,shell=True)\n\n # delete_model_proxy = rospy.ServiceProxy('gazebo/delete_model', DeleteModel)\n # print(\"Removing Forest\")\n\n # for i in range(self.total_trees):\n # rospy.wait_for_service('gazebo/delete_model')\n # delete_model_proxy(\"tree{}\".format(i+1))\n\n # print(\"Respawning Forest\")\n # self.generate_forest()\n\n W = 10\n D = 10\n density = 0.4\n NW = int(W*density)\n ND = int(D*density) \n trees = [(int(W*i/NW)-W/2+np.random.uniform()*W/NW,int(D*j/ND)-D/2+np.random.uniform()*D/ND) for i in range(NW) for j in range(ND)]\n\n state_msg = ModelState()\n\n set_state = rospy.ServiceProxy('/gazebo/set_model_state', SetModelState)\n\n\n print(\"Moving Trees\")\n\n for i in range(self.total_trees):\n rospy.wait_for_service('/gazebo/set_model_state')\n\n state_msg.model_name = 'tree{}'.format(i+1)\n state_msg.pose.position.x = trees[i][0]\n state_msg.pose.position.y = trees[i][1]\n state_msg.pose.position.z = 1\n set_state(state_msg)", "title": "" } ]
[ { "docid": "dafeb299af4a0d6607b9c620b7636deb", "score": "0.58593154", "text": "def untarget_location():\n global __GRAPH__\n __GRAPH__ = None", "title": "" }, { "docid": "19a9661811e6e471c109de440e7e6888", "score": "0.58495784", "text": "def destroy_tree(self, tree):\n pass", "title": "" }, { "docid": "7e4de2ee775c8a143a44fd4e2b85bf42", "score": "0.5610399", "text": "def reset_targets(self):\n self._link_target.reset()\n self._joint_target.reset()", "title": "" }, { "docid": "f3aa14d225bb97332c7b40d9d99a4e3a", "score": "0.55386925", "text": "def clear_target(self):\n\n self.input_dataset = self.dataset\n self.target_dataset = None\n\n self.input = self.dataset.values\n self.target = None\n\n self.supervise = False", "title": "" }, { "docid": "cce6e8f655b5d9d989bdb103aa271100", "score": "0.55361676", "text": "def reset(self, env, observation):\n del observation\n assert env.action_space == self._action_space\n self._model = env\n # Initialize root with some reward to avoid division by zero.\n self._root = TreeNode(init_reward=0)\n self._real_visited = set()", "title": "" }, { "docid": "fa025f2a2200903a5020fe9fa6a39512", "score": "0.54550517", "text": "def _clear_path_np(self):\n if self.path_np is not None:\n self.path_np.remove_node()\n self.path_np = None", "title": "" }, { "docid": "3495fa1d7c46339354a9ebb267085fff", "score": "0.54545796", "text": "def clear_tree(self):\n\n for i in reversed(range(self._liquor_view.topLevelItemCount())):\n self._liquor_view.takeTopLevelItem(i)", "title": "" }, { "docid": "6ab2e9553e104a2e5c9da6e87cfc9604", "score": "0.5337201", "text": "def clear_spatial_unit_terms(self):\n pass", "title": "" }, { "docid": "b5dbbb164bb5280858ea9ad6d37d3142", "score": "0.53056437", "text": "def delete(self, handle):\n tkey = self.tkey(self.relation.project(handle))\n leaf = self._lookup(self.root, self.stat.height, tkey)\n if tkey not in leaf.keys:\n raise ValueError(\"key to be deleted not found in index\")\n del leaf.keys[tkey]\n leaf.save()\n # tree never shrinks -- if all keys get deleted we still have an empty shell of tree", "title": "" }, { "docid": "9a8c6d187059d945c5963302f6d2207c", "score": "0.5305444", "text": "def reset(self):\n self.answers = OOBTree()", "title": "" }, { "docid": "0e35c706ec8beb7f51022626bc24d07d", "score": "0.53054357", "text": "def tearDown(self):\n del self.disjoint_set\n self.disjoint_set = None", "title": "" }, { "docid": "d7f01455eadbe7b7bea3ceb53dfa5d53", "score": "0.52937704", "text": "def reset(self, grid, target):\n if self.random_reset:\n self.position_and_rotation = self.__random_position_and_rotation(grid, target)\n else:\n self.position_and_rotation = copy.deepcopy(self.initial_position_and_rotation)", "title": "" }, { "docid": "56ace975a9be5cb1de07ef8b4bc134b4", "score": "0.5277287", "text": "def cleanup(self):\n self.logic.setParameterNode(None)\n self.removeObservers()", "title": "" }, { "docid": "dda56762cd537aebfb9992031d6e9734", "score": "0.52764404", "text": "def purge(self):\n self.X = None\n self.Y = None", "title": "" }, { "docid": "9fa3769156e50f72b73eed06b5c1842d", "score": "0.52638435", "text": "def reset_states(self):\n self.zcs = tf.zeros((N_SAMPLES, self.d_zc,))\n self.w_zs = tf.ones((N_SAMPLES,))\n self.child_targets = [[\n self.w_zs, tfd.Normal(loc=self.zcs, scale=1e-1)\n ]] # N-list< [(N_samples)-Tensor, (N_samples,self.d_zc)-Dist] >", "title": "" }, { "docid": "a7b840fd6760fe1b4ff0d5ffe376a442", "score": "0.5220613", "text": "def tearDown(self):\n self.node_list = None", "title": "" }, { "docid": "80662f977a151897d347e9861451a42f", "score": "0.5216521", "text": "def del_target(self, dex):\n\n self.tlst.remove(dex)\n self.target_dataset = self.dataset[self.tlst]\n self.target = self.target_datset.values", "title": "" }, { "docid": "3f70c6c25f4faadec2a1ed96a45b8e13", "score": "0.52098227", "text": "def clear(self):\n self._root = None\n self._nodes = 0", "title": "" }, { "docid": "61228896e3f56e6a8fc3dbde2f5a0922", "score": "0.51933783", "text": "def depopulate(self):\n self._populated = False\n \n #clear the events\n if self._events is not None: del self._events[:]\n if self._step_events is not None: del self._step_events[:]\n \n #remove the agent(s)\n if self._agent is not None: del self._agent[:]\n self._multi_agent_mode = False\n self._total_reward = np.zeros(0)\n \n #remove the randomizer\n self._randomizer = None\n \n #clear the variants\n if self._variants is not None: self._variants.clear()\n \n #remove the judge\n self._judge = None\n \n #clear the goals\n if self._goals is not None: self._goals.clear()\n \n #clear all objects and object attributes\n if self._object_attributes is not None: self._object_attributes.clear()\n if self._stepping_object_attributes is not None: del self._stepping_object_attributes[:]\n if self._stepping_object_attribute_order is not None: del self._stepping_object_attribute_order[:]\n if self._randomizing_object_attributes is not None: del self._randomizing_object_attributes[:]\n if self._data is not None: self._data.null()\n self._num_objects = 0\n \n #clear all world attributes\n if isinstance(self._world_attributes, dict): # world attributes have already been parsed\n self._world_attributes.clear()", "title": "" }, { "docid": "eb0a3585a52efc5ae1cf4eac24ae44c6", "score": "0.51861984", "text": "def destroyAllObjects(self):\n\n frame = self; c = self.c; tree = frame.tree; body = self.body\n\n # g.printGcAll()\n\n # Do this first.\n #@+<< clear all vnodes and tnodes in the tree >>\n #@+node:ekr.20081121110412.170: *7* << clear all vnodes and tnodes in the tree>>\n # Using a dict here is essential for adequate speed.\n vList = []; tDict = {}\n\n for p in c.all_unique_positions():\n vList.append(p.v)\n if p.v:\n key = id(p.v)\n if key not in tDict:\n tDict[key] = p.v\n\n for key in tDict:\n g.clearAllIvars(tDict[key])\n\n for v in vList:\n g.clearAllIvars(v)\n\n vList = []; tDict = {} # Remove these references immediately.\n #@-<< clear all vnodes and tnodes in the tree >>\n\n # Destroy all ivars in subcommanders.\n g.clearAllIvars(c.atFileCommands)\n if c.chapterController: # New in Leo 4.4.3 b1.\n g.clearAllIvars(c.chapterController)\n g.clearAllIvars(c.fileCommands)\n g.clearAllIvars(c.keyHandler) # New in Leo 4.4.3 b1.\n g.clearAllIvars(c.importCommands)\n g.clearAllIvars(c.tangleCommands)\n g.clearAllIvars(c.undoer)\n\n g.clearAllIvars(c)\n g.clearAllIvars(body.colorizer)\n g.clearAllIvars(body)\n g.clearAllIvars(tree)\n\n # This must be done last.\n frame.destroyAllPanels()\n g.clearAllIvars(frame)", "title": "" }, { "docid": "67a69a6dc672eb155e16c8e812c2c5d6", "score": "0.5185494", "text": "def reset_tree(self):\n self.tree = {}\n self.tree['leaves'] = []\n self.tree['levels'] = []\n self.tree['is_ready'] = False", "title": "" }, { "docid": "4fd8b004fd5b46c8fab5114d31219850", "score": "0.5182877", "text": "def reset(self):\n self.tree = defaultdict(VisitStats)", "title": "" }, { "docid": "5f732099fe86c1e27db3aa9e73cde4f7", "score": "0.51733226", "text": "def retractboundary(repo, tr, targetphase, nodes):\n phcache = repo._phasecache.copy()\n phcache.retractboundary(repo, tr, targetphase, nodes)\n repo._phasecache.replace(phcache)", "title": "" }, { "docid": "2693a18cc9d8b8336d71d95275f3b4ed", "score": "0.5151595", "text": "def _clear_agent(self):\n self.selected_agent = None\n self.text_dict['agent'] = None\n self._clear_path_np()", "title": "" }, { "docid": "db528245f59a6a8227dcb4cffdfa9fba", "score": "0.5141068", "text": "def flush_old_plan(path,searched_grid): \r\n for p in searched_grid:\r\n p.remove()\r\n path.remove()", "title": "" }, { "docid": "e189b7ae634ec74b70eb5d71235472d0", "score": "0.5135303", "text": "def clear_stack_and_variable(self):\n self.stack = []\n self.x = None\n self.output = None", "title": "" }, { "docid": "55b6c4a6bcd0bad82e17329c025e6969", "score": "0.5134397", "text": "def reset(self):\n self.index = -1\n del (self.parsets)\n self.parsets = {}\n del (self.metadata)\n self.metadata = {}", "title": "" }, { "docid": "fba07b7561a15526799bc5ec27da647d", "score": "0.51292896", "text": "def tearDownClass(cls):\n cls.runModule(\n \"g.remove\",\n flags=\"f\",\n type=\"vector\",\n name=(cls.inpoint, cls.outtrain, cls.outvalid),\n )\n cls.del_temp_region()", "title": "" }, { "docid": "ed5b4873f9c23b4ba343b9fa62871191", "score": "0.5128898", "text": "def _reset_env(self, *args, **kwargs):\n # Reset Simulation\n self.reset_simulation()\n\n # Reset ground in simulation\n self.plane.reset()\n\n # Border\n self.border.reset()\n\n # Robot\n robot_state = self.robot.reset()\n\n # Goal\n if self.tgt_pose_is_rdn:\n xy_offset = \\\n self.np_random.randn(2)*np.sqrt(self.goal_state_var[:2])\n yaw_offset = \\\n self.np_random.randn(1)*np.sqrt(self.goal_state_var[2])\n else:\n xy_offset = np.zeros(2)\n yaw_offset = 0\n # Set XY position\n des_pos = np.zeros(3)\n des_pos[:2] = self.goal_state_mean[:2] + xy_offset\n # Set the height (Z)\n des_pos[2] = self.goal_height\n # Set the orientation (Yaw)\n des_yaw = self.goal_state_mean[2] + yaw_offset\n # Set the pose of _object object\n des_pose = np.concatenate((des_pos, create_quat(rot_yaw=des_yaw)))\n self.goal.reset(pose=des_pose)\n\n # Obstacle\n if self.tgt_pose_is_rdn:\n xy_offset = \\\n self.np_random.randn(2)*np.sqrt(self.obst_state_var[:2])\n yaw_offset = \\\n self.np_random.randn(1)*np.sqrt(self.obst_state_var[2])\n else:\n xy_offset = np.zeros(2)\n yaw_offset = 0\n # Set XY position\n des_pos = np.zeros(3)\n des_pos[:2] = self.obst_state_mean[:2] + xy_offset\n # Set the height (Z)\n des_pos[2] = self.obst_height\n # Set the orientation (Yaw)\n des_yaw = self.obst_state_mean[2] + yaw_offset\n # Set the pose of _object object\n des_pose = np.concatenate((des_pos, create_quat(rot_yaw=des_yaw)))\n self.obstacle.reset(des_pose)\n\n # Update Environment State and Observation\n state = self._update_env_state()\n observation = self._update_env_obs()\n\n # Replace init_cond with current state\n if self._is_env_instantiation_complete:\n self._current_init_cond = np.copy(self._state)\n\n # Update max reward\n if self._is_env_instantiation_complete:\n self._max_reward = np.array([0.])\n # self._max_reward = self._calc_max_reward()\n\n # Visualization (if applicable)\n self.enable_vis()\n\n return observation", "title": "" }, { "docid": "9ffe222438b144fc27af0161eee5edd1", "score": "0.51283425", "text": "def tearDown(self):\n # remove refs\n self.lattice = None\n self.filter = None", "title": "" }, { "docid": "8e05af26c9816c47e4ab73894995dd8d", "score": "0.51277524", "text": "def clear(self):\n del self._data[:]\n self._data = np.zeros(shape=self._memory_size, dtype=object)\n self._data_cursor = 0\n self._tree.fill(0.0)", "title": "" }, { "docid": "50fea6f536858edf3f8f4bbc17d1d4a8", "score": "0.51220185", "text": "def clearBranch(self):\n self.values.clear()\n self._branches = []", "title": "" }, { "docid": "76fabf3340b310fb8dbc0bfc2834bec2", "score": "0.5108787", "text": "def robot_specific_reset(self):\n super(FetchGripper, self).robot_specific_reset()\n\n joints = self.untucked_default_joints\n set_joint_positions(self.robot_ids[0], self.joint_ids, joints)\n\n self.controller.reset()", "title": "" }, { "docid": "630a0185aaedb480987729717191e4b8", "score": "0.5104828", "text": "def tearDown(self):\n super().tearDown()\n del self.model_fp16\n del self.model_4bit\n\n gc.collect()\n torch.cuda.empty_cache()", "title": "" }, { "docid": "630a0185aaedb480987729717191e4b8", "score": "0.5104828", "text": "def tearDown(self):\n super().tearDown()\n del self.model_fp16\n del self.model_4bit\n\n gc.collect()\n torch.cuda.empty_cache()", "title": "" }, { "docid": "8dae105f2816db214a87362f8b9bb20c", "score": "0.5103597", "text": "def _ResetState(self):\n # A set of the nodes which have been on all paths so far.\n # In the end this will contain the intersection of all paths between\n # start and finish.\n self._solution_set = None\n # One path from start to finish. The nodes need to be returned ordered.\n # As otherwise we might set the destination behind the definition point of\n # some binding.\n self._one_path = None\n # A map from nodes to a set of nodes, which are on all paths between the\n # node and the finish.\n # If this is None it means that no path to finish exist from the node.\n self._node_to_finish_set = {}", "title": "" }, { "docid": "89b7c824a489c94237c4896676f8ef2e", "score": "0.5090819", "text": "def reset_paths(self):\n self.clear_solved()\n self.clear_nodes()\n self.clear_node_paths()\n self.clear_smooth_path()", "title": "" }, { "docid": "1d31a8102870b236e7c743ed9294d14c", "score": "0.50890845", "text": "def killNode(self):\n self.key = None\n self.parent = None\n self.child = None\n self.sibling = None", "title": "" }, { "docid": "5a9ac0cc3ba59981b762d3925797cf25", "score": "0.50666916", "text": "def reset(self):\n self.solution_set = []\n self.dist_to_soln = []", "title": "" }, { "docid": "dbaac5c6ddf5192bb27e0b0b19e79e0d", "score": "0.5063979", "text": "def test_explicit_del_he4(self):\n gdf = GridFile(self.test_driver_gridfile4)\n utm = gdf.grids['UTMGrid']\n del gdf", "title": "" }, { "docid": "3bf4f71b2f595e74e9fee6b8eab1a9fe", "score": "0.50639284", "text": "def deleteSubtree(self):\r\n\r\n\t\tfor i in range(2):\r\n\t\t\tif self.child[i] is not None:\r\n\t\t\t\tself.child[i].deleteSubtree()\r\n\t\t\t\tself.child[i] = None", "title": "" }, { "docid": "4ba281f6b7ca73d40b460ca7a66315fa", "score": "0.5058372", "text": "def reset():\n\tglobal MATRIX\n\tglobal PATH_FOUND\n\tglobal OPEN_LIST, CLOSED_LIST\n\tglobal START_NODE, END_NODE\n\tglobal START_NODE_SET, END_NODE_SET\n\tglobal PATH_NODES_X, PATH_NODES_Y\n\n\tMATRIX = initCells()\n\tPATH_FOUND = False\n\tOPEN_LIST = []\n\tCLOSED_LIST = []\n\tSTART_NODE_SET = False\n\tSTART_NODE = {\"x\": None, \"y\": None, \"f\": None, \"g\": None, \"h\": None, \"parent\": None, \"barrier\": False}\n\tEND_NODE = {\"x\": None, \"y\": None, \"f\": None, \"g\": None, \"h\": None, \"parent\": None, \"barrier\": False}\n\tEND_NODE_SET = False\n\tPATH_NODES_X = []\n\tPATH_NODES_Y = []", "title": "" }, { "docid": "0129778ed4d4ef23fb6665b06b7939bc", "score": "0.50513005", "text": "def clear_master_index():\n\tword_index.clear()", "title": "" }, { "docid": "c6e36c17de054b9d303411dcbfe026fa", "score": "0.5050757", "text": "def clear(self):\n self.root = None", "title": "" }, { "docid": "c6e36c17de054b9d303411dcbfe026fa", "score": "0.5050757", "text": "def clear(self):\n self.root = None", "title": "" }, { "docid": "f3a4e89c9650f88a3e03d6d8c1ae073f", "score": "0.5050175", "text": "def clear_coordinate_terms(self):\n pass", "title": "" }, { "docid": "f68fe283f9ea2a984c702925797a873c", "score": "0.5035291", "text": "def tearDown(self):\n if self.temp_scope:\n self.temp_scope.delete()\n super().tearDown()", "title": "" }, { "docid": "a993486309bea252f1d4fcd073c6a72e", "score": "0.5023681", "text": "def reset_grad(self):\r\n self.unet.zero_grad()", "title": "" }, { "docid": "0716e8b1a2193221e84105c64d89bb6b", "score": "0.50203604", "text": "def destroy_process_group():\n global _backend\n global _initialized\n torch._C._dist_destroy_process_group()\n _backend = dist_backend.UNDEFINED\n _initialized = 0", "title": "" }, { "docid": "be814324c7d3e3eb127babdf186f4029", "score": "0.501431", "text": "def _rebuild(self):\n self._tree = KDTree(np.hstack([key.get_array() for key in self._key_list]).T)", "title": "" }, { "docid": "1a79740bc7e460da15bd6cc15240a1b0", "score": "0.5014035", "text": "def reset( self ):\n self.canv.delete( 'bar' )\n self.canv.delete( 'text' )\n self.killBtn.configure( state=DISABLED )\n self.targetGen = None\n self.targetArgs = []\n self.targetKwds = []\n self.killVar.set( 0 )\n self.targetIdx = 0\n self.targetLen = 0", "title": "" }, { "docid": "50e7482de8b650f4083d6ac9fc64e1c6", "score": "0.5009572", "text": "def tearDown(self):\n del self.model_fp16\n del self.model_8bit\n\n gc.collect()\n torch.cuda.empty_cache()", "title": "" }, { "docid": "50e7482de8b650f4083d6ac9fc64e1c6", "score": "0.5009572", "text": "def tearDown(self):\n del self.model_fp16\n del self.model_8bit\n\n gc.collect()\n torch.cuda.empty_cache()", "title": "" }, { "docid": "f35ae651d34c4682c8c13f60afaba078", "score": "0.50002486", "text": "def test_delete_nodes(self):\n pass", "title": "" }, { "docid": "b5c594a5f2b2c723608f28bf0994d179", "score": "0.49987623", "text": "def clean_up(self):\n self.tsk_opt = None\n self.writer = None\n self.model = None", "title": "" }, { "docid": "8ad06c555d2dda86760b6ad6613ebac7", "score": "0.49955243", "text": "def _reset_global_indexes(self):\n for node in self.nodes.values():\n node.global_index = None", "title": "" }, { "docid": "05744ff5f707565bf4cc9d444213d09b", "score": "0.49906743", "text": "def test_delete_node(self):\n pass", "title": "" }, { "docid": "2ebc06d1be91a38f31ccf413e575e9d0", "score": "0.4982484", "text": "def destroy(self):\n from pgdrive.engine.engine_utils import get_engine\n engine = get_engine()\n self.detach_from_world(engine.physics_world)\n if self._body is not None and hasattr(self.body, \"object\"):\n self.body.generated_object = None\n if self.origin is not None:\n self.origin.removeNode()\n self.dynamic_nodes.clear()\n self.static_nodes.clear()\n self._config.clear()", "title": "" }, { "docid": "c61ab87aba114728b5ed17e265e4104d", "score": "0.4978644", "text": "def clean(self):\r\n chdir(self.root)\r\n if exists( self.testroot): \r\n remove_tree(self.testroot)", "title": "" }, { "docid": "b0ea70ea841b5b140d03177c300682ca", "score": "0.49755806", "text": "def reset(group):\n\n # Define waypoint\n startpose = group.get_current_pose().pose\n startpose.position.x = -0.5\n startpose.position.y = 0.35\n startpose.position.z = 1.025\n\n # Compute and execute path\n plan,_ = group.compute_cartesian_path([startpose], 0.01, 0.0)\n group.execute(plan, wait=True)\n group.stop()", "title": "" }, { "docid": "e1dcc8e5c04d185589a4ca77994ad7e6", "score": "0.4970534", "text": "def resetTrajectory(self):\n self.trajectory = None", "title": "" }, { "docid": "e440bd9a49ed576f77e5b035fea04a0e", "score": "0.4966538", "text": "def destroy_unit(self, target: BaseUnitObject):\n target.position.slot = slotEmpty\n if isinstance(target, Unit):\n del self.units[target.pk]", "title": "" }, { "docid": "956beb003e8d743dd14478b0f95addc6", "score": "0.49659526", "text": "def _no_child_delete(self, node):\n if node.parent.left_leaf == node:\n node.parent.left_leaf = None\n else:\n node.parent.right_leaf = None", "title": "" }, { "docid": "e8117a0c46179962421364ebd652b0fa", "score": "0.4958142", "text": "def make_quadtree(self):\n X = np.random.random((NBodySimulator.num_particles, 2))\n mins = np.array([-0.1, -0.1])\n maxs = np.array([1.1, 1.1])\n # create all particles\n particles = [Particle(self.env, NBodySimulator.network, pos, []) for pos in X]\n # assign neighbors to particles\n for i in range(NBodySimulator.num_particles-1):\n particles[i].set_neighbor(particles[i+1])\n\n NBodySimulator.start_particle = particles[0]\n\n # create quadtree\n NBodySimulator.root_node = InternalMasterNode(self.env, NBodySimulator.network, mins, maxs, 0, particles, parents=[])\n NBodySimulator.log(NBodySimulator.quadtree_str())", "title": "" }, { "docid": "0b9658626ee34ff18e3f29087e3d2da0", "score": "0.49564838", "text": "def _clear(self):\n self.cur_steps = 0\n self.best_call_count = 0\n self.tabu_list = deque(maxlen=self.tabu_size)\n self.current = self.initial_state\n self.best = self.initial_state", "title": "" }, { "docid": "fcd94c38c727a22914e1a21c6fd63dc4", "score": "0.4955896", "text": "def __del__(self):\n self.__tail.empty()\n self.__direction = [0, 0]", "title": "" }, { "docid": "aadd23f8379eda1d69b59f4ec99747e6", "score": "0.49534222", "text": "def teardown_method(self):\n world.clear_paths()\n print(\"\\nEnd of tests in: %s\\n-------------------\\n\" % __name__)\n self.bigml = {}", "title": "" }, { "docid": "aadd23f8379eda1d69b59f4ec99747e6", "score": "0.49534222", "text": "def teardown_method(self):\n world.clear_paths()\n print(\"\\nEnd of tests in: %s\\n-------------------\\n\" % __name__)\n self.bigml = {}", "title": "" }, { "docid": "71511a17e8ec84bcfe6c7e5abe63393e", "score": "0.49450734", "text": "def reset(self) -> None:\n self.indices = np.array([])\n self.positions = np.array([])\n self.neighbors = np.array([])\n self.distances = np.array([])\n self.distvals = np.array([])\n self.pnvecs = np.array([])\n self.paxes = np.array([])\n self.pmask = np.array([])\n self._dmap = None\n self.invalid_idx = -1\n self.invalid_distidx = -1", "title": "" }, { "docid": "0886b85e2f48cc44c555c7a52f91aedc", "score": "0.49435446", "text": "def finalize(self):\n if self.delete_orphans:\n source_keyvals = set(self.source.distinct(self.source.key))\n target_keyvals = set(self.target.distinct(self.target.key))\n to_delete = list(target_keyvals - source_keyvals)\n if len(to_delete):\n self.logger.info(f\"Finalize: Deleting {len(to_delete)} orphans.\")\n self.target.remove_docs({self.target.key: {\"$in\": to_delete}})\n super().finalize()", "title": "" }, { "docid": "6479ff86677a96c3c1676369efaf566a", "score": "0.49433276", "text": "def reset_closest_enemies(self):\n self.__closest_enemies = []", "title": "" }, { "docid": "8d8f1afccc4290de98faf3376d37c06c", "score": "0.49397883", "text": "def reset(self):\n nodesToRemove = []\n if self.currentVolumeId is not None:\n nodesToRemove.append(slicer.mrmlScene.GetNodeByID(self.currentVolumeId))\n if self.currentResultsNode is not None:\n nodesToRemove.append(self.currentResultsNode)\n if self.currentLabelmapResults is not None:\n nodesToRemove.append(self.currentLabelmapResults)\n\n if self.currentTracheaModel is not None:\n nodesToRemove.append(self.currentTracheaModel)\n # Remove all the cylinder models for every possible stent\n for node in self.currentCylindersModel.values():\n if node is not None:\n nodesToRemove.append(node)\n\n #for node in itertools.chain.from_iterable(self.currentFiducialsListNodes.itervalues()):\n for value in self.currentFiducialsListNodes.values():\n if value is not None:\n for node in value:\n nodesToRemove.append(node)\n\n for node in nodesToRemove:\n slicer.mrmlScene.RemoveNode(node)\n\n self.__initVars__()", "title": "" }, { "docid": "b3ce908c93358c0c0488def08a0d9436", "score": "0.4937703", "text": "def test_explicit_del_he5(self):\n gdf = GridFile(self.test_driver_grid_file)\n utm = gdf.grids['UTMGrid']\n del gdf", "title": "" }, { "docid": "af66a8bbbb1e17e3f8430aa1b5f98cd6", "score": "0.49372154", "text": "def clear(self, target_commit):\n raise NotImplementedError # pragma: no cover", "title": "" }, { "docid": "608c3a7c0d1c07cd772cbcc11ab201c8", "score": "0.49324268", "text": "def do_delete_worktree(self, args):\n routines = self.get_routines()\n routines.load()\n self.do_coroutine(routines.delete_worktree_routine(self.get_feedback_ui()))", "title": "" }, { "docid": "ee8cb091a9e7aa2ef4e978f7063d0553", "score": "0.49233145", "text": "def reset_grad(self):\n self.unet.zero_grad()", "title": "" }, { "docid": "dd5e733a23e42b033ef47da54693a073", "score": "0.49098635", "text": "def destroy_plan(self):\n fqwoa_mpi.qwoa_state(\n self.size,\n self.dummy_gammas,\n self.dummy_ts,\n self.dummy_qualities,\n self.dummy_lambdas,\n self.initial_state,\n self.final_state,\n self.comm.py2f(),\n -1)", "title": "" }, { "docid": "7a9c92f9a5a68e8740f32f94585538a8", "score": "0.49037343", "text": "def clean_cuda(self):\n self.grid_gpu.free()", "title": "" }, { "docid": "7d1c56d8cb4ac30061037d5611dabdc6", "score": "0.49009398", "text": "def reset(self):\n\n # projective matrix\n self.P = self.K @ self.M_inv[:3, :4]\n # self.P = self.K @ self.M[:3, :4]\n assert self.P.shape == (3, 4)\n\n # clearing cached properties\n if \"H\" in self.__dict__:\n del self.__dict__[\"H\"]\n if \"H_inv\" in self.__dict__:\n del self.__dict__[\"H_inv\"]", "title": "" }, { "docid": "09ada0c7553a79b49cb900c29fcd89d5", "score": "0.489768", "text": "def flush_node_stack(cls):\n cls.created_nodes_stack = []", "title": "" }, { "docid": "ef354b3ff83d36d92aa993ebf100139c", "score": "0.48942393", "text": "def tearDown(self):\n super(BaseTracerTestCase, self).tearDown()\n\n self.reset()\n delattr(self, 'tracer')", "title": "" }, { "docid": "889be6a3afa6b278113d1de0eacc0dfe", "score": "0.4890334", "text": "def _on_op_placement_context_change(self):\n self.clear_ancestor_caches()\n self.clear_subtree_caches()", "title": "" }, { "docid": "c90728b5264d9caf6362374687e52862", "score": "0.4889196", "text": "def _reset_env(self, *args, **kwargs):\n # Reset Simulation\n self.reset_simulation()\n\n # Reset ground in simulation\n self._plane.reset()\n\n # Robot\n condition = kwargs.pop('condition', None)\n if condition is None:\n if self._random_config:\n n_joints = len(self._init_robot_config)\n self._init_robot_config = self._init_robot_config_mean + \\\n self._init_robot_config_std ** 2 * np.random.randn(n_joints)\n else:\n self._init_robot_config = self._init_robot_configs[condition]\n self._robot.initial_configuration = self._init_robot_config\n robot_state = self._robot.reset()\n\n # Rest object in simulation\n tray_pose = self.get_tray_pose()\n if condition is None:\n object_offset = self.object_offset\n object_pose = pose_transform(tray_pose, object_offset)\n else:\n object_pose = self.obj_init_conds[condition]\n\n self._object.reset(pose=object_pose)\n self._init_object_pose = object_pose\n\n # Target\n tray_pose = self.get_tray_pose()\n if condition is None:\n if self._random_tgt:\n # Sampling with uniform\n tgt_dist = np.inf\n while tgt_dist > 0.6 or tgt_dist < 0.2:\n tgt_pos = np.random.uniform(\n low=self._target_pose_lims[:3, 0],\n high=self._target_pose_lims[:3, 1],\n )\n tgt_dist = np.linalg.norm(tgt_pos - tray_pose[:3])\n tgt_ori = np.random.uniform(\n low=self._target_pose_lims[3:, 0],\n high=self._target_pose_lims[3:, 1],\n )\n tgt_ori = create_quat(rot_yaw=tgt_ori[2])\n target_pose = np.concatenate((tgt_pos, tgt_ori))\n else:\n target_pose = pose_transform(tray_pose, self.target_offset)\n\n else:\n target_pose = self.tgt_init_conds[condition]\n self._target.reset(pose=target_pose)\n self._init_target_pose = target_pose\n\n if self._is_env_instantiation_complete:\n if self.active_subtask == 1:\n robot_uid = self._robot.id\n tray_index = self._robot._links['tray'].bodyPartIndex\n object_uid = self._object.id\n const_pos = [0.2, 0.0, 0.029+0.062]\n self.pbc.createConstraint(parentBodyUniqueId=robot_uid,\n parentLinkIndex=tray_index,\n childBodyUniqueId=object_uid,\n childLinkIndex=-1,\n jointType=self.pbc.JOINT_FIXED,\n jointAxis=(0., 0., 1.),\n parentFramePosition=const_pos,\n childFramePosition=(0., 0., 0.)\n )\n\n self._prev_tgt_tray_error = None\n self._prev_obj_tray_error = None\n\n # Visualization (if applicable)\n self.enable_vis()\n\n # Update Environment State and Observation\n state = self._update_env_state()\n observation = self._update_env_obs()\n\n # Update max reward\n if self._is_env_instantiation_complete:\n self._max_rewards = np.array([0., 0., 0.])\n # self._max_rewards = self._calc_max_reward()\n\n return observation", "title": "" }, { "docid": "b7054a0e7596c99ef0336355e67c08cd", "score": "0.4887929", "text": "def clean_up(self):\n # remote references\n self.higher_layer_pruning = None\n self.higher_layer_prediction = None\n self.higher_layer_K = None\n self.long_range_projection = None\n self.lower_layer_new_hypo = None\n self.lower_layer_evidence = None\n\n # local attributes\n # if self.params[\"self_supervised\"]:\n self.bu_posterior = None \n self.td_posterior = None \n self.likelihood = None\n # self.best_hypo = None\n self.lower_layer_hypos = None\n self.layer_evidence = None\n self.layer_LH = None # otherwise updated dependend on word length won't work\n self.layer_prediction = None\n self.layer_new_hypo = None\n self.layer_pruning = None\n self.layer_long_range_projection = None", "title": "" }, { "docid": "fb0461447999c8e4d55b546e157bdc2d", "score": "0.48866773", "text": "def __del__(self):\n if self.tensor:\n _LIB.release_graph_tensor(ctypes.c_void_p(self.tensor))\n self.tensor = None", "title": "" }, { "docid": "57bae6a9eb13ad2d66e3b4fb042d5768", "score": "0.4885694", "text": "def clear(self):\n self._heap = []\n self._total_positives = 0", "title": "" }, { "docid": "b5bc1273e525fbd3c377ea3f9d48c63f", "score": "0.4882694", "text": "def resetTarget(self):\n if self.lastTargetedPrey:\n lastPrey = hsm.objIDToObject[self.lastTargetedPrey]\n lastPrey.isTargeted = False", "title": "" }, { "docid": "221f550281e9d6d846ade134ec3bf9f9", "score": "0.48797536", "text": "def reset(self):\n self._n_features = -1\n self._n_targets = -1\n self._size = 0\n self._next_insert = 0\n self._oldest = 0\n self._imask = None\n self._X = None\n self._y = None\n self._is_initialized = False\n\n return self", "title": "" }, { "docid": "8c7de5ee1eb5b45b11b02c2466097f77", "score": "0.4877992", "text": "def teardown(self):\n super(CodeProject, self).teardown()\n self.manager.garbage.append(self._output_dir)", "title": "" }, { "docid": "059c4b52ed3913049618d1f6262a507e", "score": "0.48763055", "text": "def prepare_tree(self) -> None:\n if not self.target_mol:\n raise ValueError(\"No target molecule set\")\n\n self.stock.reset_exclusion_list()\n if self.config.exclude_target_from_stock and self.target_mol in self.stock:\n self.stock.exclude(self.target_mol)\n self._logger.debug(\"Excluding the target compound from the stock\")\n\n self._logger.debug(\"Defining tree root: %s\" % self.target_smiles)\n self.tree = SearchTree(root_smiles=self.target_smiles, config=self.config)\n self.analysis = None\n self.routes = RouteCollection([])", "title": "" }, { "docid": "4d1b21234beac7521022b3ec437cb4b4", "score": "0.48729455", "text": "def clear(self):\n self.__hit_positions = []\n self.__num_terminated = 0", "title": "" }, { "docid": "18c973cf0c1b24308bd1475faa60bc4b", "score": "0.48720494", "text": "def clear(self):\n\n del self.positions[:]\n del self.X[:]", "title": "" }, { "docid": "d3be622d6c409ae5ae6fba27c1cd1fa3", "score": "0.4870433", "text": "def clear_trace_entities(self):\n if self._local is not None:\n self._local.clear()", "title": "" }, { "docid": "f5e055e0c794eeb3e1ec7145736be084", "score": "0.48690325", "text": "def cut_tree(self, game, treelist):\n self.engaged = True\n if len(treelist) != 0:\n location_tree = random.choice(treelist)\n start_x = self.get_x()\n start_y = self.get_y()\n dest_x = location_tree.get_x()\n dest_y = location_tree.get_y()\n x_vel = ((dest_x - start_x) / 200) * 2\n y_vel = ((dest_y - start_y) / 200) * 2\n \n while (abs(location_tree.get_x() - self.get_x()) > 1 and abs(location_tree.get_y() - self.get_y()) > 1):\n self.dx = x_vel\n self.dy = y_vel\n self.dx, self.dy = 0, 0\n\n location_tree.fell(game)\n treelist.remove(location_tree)\n \n while (abs(start_x - self.get_x()) > 1 and abs(start_y - self.get_y()) > 1):\n self.dx = -(x_vel)\n self.dy = -(y_vel)\n self.dx, self.dy = 0, 0\n self.x, self.y = -50, -50\n self.engaged = False", "title": "" }, { "docid": "e9dda65b2cb7c0db401f45563a9d7719", "score": "0.486716", "text": "def CleanUp(self):\n self._parent = None", "title": "" }, { "docid": "0f43136203175364118aa5b4f0f5faf1", "score": "0.4865883", "text": "def tree_delete(self, z):\n action_pos = super(AVLTree, self).tree_delete(z)\n self._rebalance(action_pos)", "title": "" }, { "docid": "e7d79795eee1d76635edda71266fcf74", "score": "0.48619634", "text": "def action_clear(self) -> None:\n tree = self.query_one(Tree)\n tree.clear()", "title": "" }, { "docid": "9d27aa0b8ca3d3ddcc334da28c7f29d3", "score": "0.48606405", "text": "def clear_dists(self):\n self.positions = []\n self.streams = []\n self.geometries = []", "title": "" }, { "docid": "b03840c9cc59132e2cbd508f6a41ce5f", "score": "0.48594007", "text": "def reset(self):\n self.grid.reset()\n self.agent.reset()\n if self.starter == None:\n c = int(self.grid.grid_size / 2)\n self.agent.pos = (c, c)\n else:\n self.agent.pos = self.starter \n\n self.t = 0\n self.rewards = []\n self.history = []\n self.record_step()\n\n return self.visible_state", "title": "" }, { "docid": "bd1606a4ee7bfec21b56864c4fd53bfc", "score": "0.48579985", "text": "def delete_target(self, byid=None, byposition=None):\n\n elem = _get_first_child_by_pos_or_id(self._node, \"target\", byid, byposition)\n self._node.remove(elem)", "title": "" } ]
d2d4c69573a6987380877809bb98ee11
Numeric value confirming this is the zone's DNSKEY
[ { "docid": "3e58e1dc66f48d867c5c3c91c0f42095", "score": "0.0", "text": "def flags(self):\n self._pull()\n return self._flags", "title": "" } ]
[ { "docid": "9233a0eda3bf8db5065f4038cc0725eb", "score": "0.5917154", "text": "def keytag(dnskey):\n if dnskey.algorithm == 1:\n a = ord(dnskey.key[-3]) << 8\n b = ord(dnskey.key[-2])\n return a + b\n else:\n header = struct.pack(\"!HBB\", dnskey.flags, dnskey.protocol,\n dnskey.algorithm)\n key = header + dnskey.key\n ac = 0\n for i, value in enumerate(ord(x) for x in key):\n if i % 2:\n ac += value\n else:\n ac += (value << 8)\n ac += (ac >> 16) & 0xffff\n return ac & 0xffff", "title": "" }, { "docid": "af86d3d2e08636a7ffe0840a9b896213", "score": "0.574273", "text": "def ttl(self) -> int:\r\n return self.data.get(\"TTL\")", "title": "" }, { "docid": "1fbcd510ebed28601c689849c474dc35", "score": "0.5689361", "text": "def getKey(self) -> long:\n ...", "title": "" }, { "docid": "b7fa9a8a6e14de5dd56ae2c3ba0e6ea9", "score": "0.5648747", "text": "def retrysecurezone():\n opts, zone = _setup_tools()\n Dnskey = models.Dnskey \n nameserver = named.Dns()\n dnskey_rrset = nameserver.lookup(zone, 'DNSKEY')\n keytags = [named.keytag(dnskey) for dnskey in dnskey_rrset]\n keys = Dnskey.objects.get_zone_keys(zone)\n num_keys = len(keys)\n if num_keys != 3:\n log.error(\"wrong number of keys.\")\n num_pre = num_zsk = num_ksk = 0\n for key in keys:\n if key.status == 'pre-active':\n num_pre += 1\n if key.type == 'ZSK':\n num_zsk += 1\n elif key.type == 'KSK':\n num_ksk += 1\n if num_pre != 2:\n mesg = \"wrong number of 'pre-active' keys. Need two, got %d\"\n mesg %= num_pre\n log.error(mesg)\n if num_ksk != 1:\n log.error(\"wrong number of KSK keys. Need one, got %d.\" % num_ksk)\n if num_zsk != 2:\n log.error(\"wrong number of ZSK keys. Need two, got %d\" % num_zsk)\n keys_to_add = []\n for key in keys:\n if not int(key.keytag) in keytags:\n keys_to_add.append(key)\n if not keys_to_add:\n log.error(\"All the keys are in the DNS already.\")\n\n return _common_securezone(keys_to_add, zone, nameserver)", "title": "" }, { "docid": "d862905a1af4f523ff232b8a93d82cb6", "score": "0.55396634", "text": "def _getOldDnsZone(self):\n\n with self.dbm.session() as session:\n try:\n result = GlobalParametersDbHandler().getParameter(\n session, 'DNSZone'\n )\n\n return result.value.lower() if result.value else None\n except ParameterNotFound:\n return None", "title": "" }, { "docid": "9ee947d2e0fe0fdfca50af3f4e54e1de", "score": "0.55210745", "text": "def dns_id(self):\n return self._id", "title": "" }, { "docid": "bd9277b3e851f7810ac80e121cfc379f", "score": "0.5457395", "text": "def zone_id(self) -> str:\n return self._zone_id", "title": "" }, { "docid": "bd7766284876a14b9e3343c23eceb58f", "score": "0.54504806", "text": "def get_key_serial_nr(self):\t\t\t\n\t\treturn self.smidr", "title": "" }, { "docid": "5dae9e6013eeadc0c436c0b1fc1e7549", "score": "0.5441539", "text": "def platform_reserved_dns_ip(self) -> Optional[str]:\n return pulumi.get(self, \"platform_reserved_dns_ip\")", "title": "" }, { "docid": "dd745cafcde6c54d31787313602fcb3f", "score": "0.5438075", "text": "def zone_id(self) -> str:\n return pulumi.get(self, \"zone_id\")", "title": "" }, { "docid": "dd745cafcde6c54d31787313602fcb3f", "score": "0.5438075", "text": "def zone_id(self) -> str:\n return pulumi.get(self, \"zone_id\")", "title": "" }, { "docid": "b2e2b6e64ca536d5142a5e9ca8756976", "score": "0.5417634", "text": "def ChildKey(self,\n index: int) -> 'Bip32':\n return self.__CkdPriv(index) if not self.m_is_public else self.__CkdPub(index)", "title": "" }, { "docid": "a4575b1c82645ef37cefa505b66e8c36", "score": "0.5386305", "text": "def ip_address_key(group: str, request: HttpRequest) -> str:\n return get_client_ip(request)", "title": "" }, { "docid": "5006f117ea2b5c557523c67a5e1e45cc", "score": "0.5345722", "text": "def keyid(self):\n pass", "title": "" }, { "docid": "53ee16c7a89361767985ccee2767d6d1", "score": "0.53334296", "text": "def __int__(self):\n return self._ip_dec", "title": "" }, { "docid": "5b63a43890a7176986c8bf42f71c3368", "score": "0.53295803", "text": "def ttl(self, key):\n return self.get_conn().ttl(key)", "title": "" }, { "docid": "f1ff48f4e9d2748e183e9f7c947bb7e9", "score": "0.52999026", "text": "def test_dns_answer(self, HostedZoneId: str, RecordName: str, RecordType: str, ResolverIP: str = None, EDNS0ClientSubnetIP: str = None, EDNS0ClientSubnetMask: str = None) -> Dict:\n pass", "title": "" }, { "docid": "e5fc8486680c564fecd4302b89eaffd5", "score": "0.529594", "text": "def verify_domain(host_name, session):\n c = session.client(\"route53\")\n host_name = \".\".join(host_name.split(\".\")[-2:]) + \".\"\n r = c.list_hosted_zones_by_name(DNSName=host_name)\n for i in r[\"HostedZones\"]:\n if i[\"Name\"] == host_name and i[\"Config\"][\"PrivateZone\"] == False:\n zone_id = i[\"Id\"].split(\"/\")[-1]\n return zone_id\n print(f\"Error: Public zone for domain {host_name[:-1]} not found\")\n exit(1)", "title": "" }, { "docid": "2632cc988173604f09c4ba0e239c89c2", "score": "0.528697", "text": "def dns_zone_partner_id(self) -> str:\n return pulumi.get(self, \"dns_zone_partner_id\")", "title": "" }, { "docid": "2dd5a05893d27e39e7e294dd57c8bdcf", "score": "0.52837574", "text": "def securezone():\n opts, zone = _setup_tools()\n Dnskey = models.Dnskey \n keys = Dnskey.objects.get_zone_keys(zone) \n if keys.count():\n _show_zone_keystatus(zone, verbose=False)\n log.error(\"\\n%s already has the above keys.\" % zone,\n \"psz retrysecurezone might work for this zone.\")\n\n nameserver = named.Dns()\n dnskey_rrset = nameserver.lookup(zone, 'DNSKEY')\n if dnskey_rrset:\n num_keys = len(dnskey_rrset)\n log.error(\"The zone %s already has %d DNSKEYs\" % (zone, num_keys))\n\n newkeydir = defaults['path_newkeydir']\n key_dir = os.path.join(defaults['path_zonedir'], zone, newkeydir)\n try:\n os.chdir(key_dir)\n except OSError, err:\n log.error(\"chdir failed: %s\" % err) \n\n keys_made = []\n try:\n zsk2 = Dnskey.from_dnssec_keygen(zone)\n except errors.PszKeygenError, err:\n log.error(\"keygen failed making ZSK2 for zone %s. %s\" % (zone, err))\n zsk2.save()\n keys_made.append(zsk2)\n\n zonedir = os.path.join(defaults['path_zonedir'], zone)\n try:\n zsk1 = Dnskey.from_dnssec_keygen(zone)\n except errors.PszKeygenError, err:\n _cleanup(keys_made)\n mesg = \"keygen failed making ZSK1 for zone %s. %s\" % (zone, err)\n log.error(mesg)\n zsk1.update('pre-active')\n keys_made.append(zsk1)\n\n try:\n zsk1.move(zonedir)\n except errors.PszError, err:\n _cleanup(keys_made)\n mesg = \"Failed to move ZSK1: %s.\" % err\n log.error(mesg)\n\n try:\n ksk = Dnskey.from_dnssec_keygen(zone, keytype='KSK')\n except errors.PszKeygenError, err:\n _cleanup(keys_made)\n log.error(\"keygen failed making KSK for zone %s. %s\" % (zone, err))\n ksk.update('pre-active')\n keys_made.append(ksk)\n\n try:\n ksk.move(zonedir)\n except errors.PszError, err:\n _cleanup(keys_made)\n log.error(\"Failed to move KSK: %s\" % err)\n\n return _common_securezone(keys_made, zone, nameserver)", "title": "" }, { "docid": "973396b9ddf0a2ae67bcacfe13f060b4", "score": "0.5282698", "text": "def serialnum(self):\n if self.pubkey is None:\n raise ValueError(\"Public key should be loaded for fetch serial number.\")\n return int(self.pub_cert[0][1])", "title": "" }, { "docid": "fa9100926047bdff95db246d2c4f4490", "score": "0.5252558", "text": "def key(self):\n return self._nd.key", "title": "" }, { "docid": "6cca7e92a4a13c0e23bd8d6d82c7f3e2", "score": "0.52407086", "text": "def get_geo_key(coords):\n return (coords * 1000).astype(int).tobytes()", "title": "" }, { "docid": "e497b2c08876a0051e6f4aa139f0601f", "score": "0.52246916", "text": "def public_key(self, d, compressed=True):\n P = self.point_multiply(d, self.G)\n if compressed:\n if not P.y % 2:\n return '02' + format(P.x, '02x')\n return '03' + format(P.x, '02x')\n return '04' + format(P.x, '02x') + format(P.y, '02')", "title": "" }, { "docid": "0e057f6eae74ca184472b5816b0b84ac", "score": "0.5223977", "text": "def zone_id(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"zone_id\")", "title": "" }, { "docid": "0e057f6eae74ca184472b5816b0b84ac", "score": "0.5223977", "text": "def zone_id(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"zone_id\")", "title": "" }, { "docid": "f030d15c9556e1d6a21a7480e04a394a", "score": "0.52058226", "text": "def zone(self) -> int:\n return self._zone", "title": "" }, { "docid": "348d2dbb486321ef2bee41362704d247", "score": "0.52055156", "text": "def key_id(self) -> str:\n return pulumi.get(self, \"key_id\")", "title": "" }, { "docid": "7cffc0526c9ad82b06754fe02ca69b25", "score": "0.52047956", "text": "def dns_label(self):\n return self._dns_label", "title": "" }, { "docid": "7cffc0526c9ad82b06754fe02ca69b25", "score": "0.52047956", "text": "def dns_label(self):\n return self._dns_label", "title": "" }, { "docid": "8219d671686e35b9f740aa5fe0c41a10", "score": "0.52010715", "text": "def _show_zone_keystatus(zone, verbose=False):\n if verbose:\n fmt = \"%s %s (%s key, %d bits) has been %s since\"\n else:\n fmt = \"%s %s (%s key, %d bits) is %s\"\n Dnskey = models.Dnskey \n zones = {}\n zonekeys = Dnskey.objects.get_zone_keys(zone)\n for key in zonekeys:\n zones.setdefault(key.zone, []).append(key)\n if not zones:\n s = \"%s either has no keys in the DNS or doesn't exist.\"\n print s % zone\n return\n zone_list = zones.keys()\n zone_list.sort()\n now = datetime.datetime.now()\n for zone in zone_list:\n zones[zone].sort()\n print '\\n', zone\n print '-' * len(zone)\n for key in zones[zone]:\n s = fmt % (key.type, key.keytag, key.algorithm,\n key.size, key.status)\n print s,\n if not verbose:\n print\n continue\n updated = key.updated\n age = now - updated\n days = age.days\n if days:\n print '%d days ago' % days\n else:\n print 'earlier today'", "title": "" }, { "docid": "7fa3c21946f879a0495ae9dbe537daf9", "score": "0.5171472", "text": "def zone_id(self) -> ZoneId:\n return self._zone_id", "title": "" }, { "docid": "9f6fec56e4e373dca5889a39d70ff0e9", "score": "0.51695985", "text": "def get_hosted_zone_count(self) -> Dict:\n pass", "title": "" }, { "docid": "c0ed6eb13aada70952d322a31c527390", "score": "0.5162474", "text": "def max_key(cls):\n return b''.join([b'Z' for _ in range(0, cls.KV_LEN)])", "title": "" }, { "docid": "fbec3b637f83384c7de32face58f91eb", "score": "0.51546985", "text": "def __len__(self):\n return self.server.zcard(self.key)", "title": "" }, { "docid": "2359aa02bded7b786288d6f8d1920eb1", "score": "0.5117375", "text": "def FormatIntegralLastKey(value):\r\n assert value < 1000000000000000, value\r\n return '%015d' % value", "title": "" }, { "docid": "a8622979ccdc3fc3b14c84cf89db5c91", "score": "0.5113922", "text": "def ttl(self, key):\n raise NotImplementedError", "title": "" }, { "docid": "0c6425a3cd079e2b39803ee3dea1e3de", "score": "0.51119363", "text": "def public_key(sk):\n return encodepoint(scalarmult_base(sk))", "title": "" }, { "docid": "b85f30a6c716fcc7112b9be3679d045c", "score": "0.5111039", "text": "def _find_zone_id_for_domain(self, domain: str) -> str:\n paginator = self.r53.get_paginator(\"list_hosted_zones\")\n zones = []\n target_labels = domain.rstrip(\".\").split(\".\")\n for page in paginator.paginate():\n for zone in page[\"HostedZones\"]:\n if zone[\"Config\"][\"PrivateZone\"]:\n continue\n\n candidate_labels = zone[\"Name\"].rstrip(\".\").split(\".\")\n if candidate_labels == target_labels[-len(candidate_labels):]:\n zones.append((zone[\"Name\"], zone[\"Id\"]))\n\n if not zones:\n raise errors.PluginError(\n \"Unable to find a Route53 hosted zone for {0}\".format(domain)\n )\n\n # Order the zones that are suffixes for our desired to domain by\n # length, this puts them in an order like:\n # [\"foo.bar.baz.com\", \"bar.baz.com\", \"baz.com\", \"com\"]\n # And then we choose the first one, which will be the most specific.\n zones.sort(key=lambda z: len(z[0]), reverse=True)\n return zones[0][1]", "title": "" }, { "docid": "efc8e4c3903fcfb7659f218fc410e6a2", "score": "0.51051337", "text": "def _get_ttl(self):\n return self.__ttl", "title": "" }, { "docid": "efc8e4c3903fcfb7659f218fc410e6a2", "score": "0.51051337", "text": "def _get_ttl(self):\n return self.__ttl", "title": "" }, { "docid": "6ca4b7715cf91fcbca4d42216e70c2f6", "score": "0.5089235", "text": "def key(self) -> int:\n return self._key", "title": "" }, { "docid": "aa8c70647e4dda0c6d3d11887a359bb1", "score": "0.5074811", "text": "def generate_key(num_digit):\n min_val = 10 ** (num_digit - 1)\n max_val = (10 ** num_digit) - 1\n otp = random.randint(min_val, max_val)\n return otp", "title": "" }, { "docid": "28b3e4db2e6acbeaf1f3f0c39d4cc46c", "score": "0.50681955", "text": "def get_address(self) -> str:\n return f'hx{sha3_256(self.public_key[1:]).digest()[-20:].hex()}'", "title": "" }, { "docid": "38332390c5bc477d77e4c9e047dfc5ce", "score": "0.5060432", "text": "def gbkey(value):\n return value.Id.split(':')[1]", "title": "" }, { "docid": "e62008c65b716a7f3e2ea3a3722ff51a", "score": "0.50549275", "text": "def get_next_key_value_adapter(self):\n ret = {}\n ret['mac_address'] = None\n ret['ip_address'] = None\n ret['num'] = None\n ret['dhcp_scope'] = None\n ret['name'] = 'nic0'\n key_value = self.keyvalue_set.filter(\n key__startswith='nic', key__icontains='mac_address')[0]\n m = re.search(r'nic\\.(\\d+)\\.mac_address\\.0', key_value.key)\n ret['num'] = int(m.group(1))\n key_value_set = self.keyvalue_set.filter(\n key__startswith='nic.%s' % ret['num'])\n if not key_value_set:\n for kv in key_value_set:\n m = re.search(r'nic\\.\\d+\\.(.*)\\.0', kv.key)\n if m:\n ret[m.group(1)] = str(kv.value)\n return ret\n else:\n return False", "title": "" }, { "docid": "07147c3d51f678708e5806d9970a6728", "score": "0.5039538", "text": "def get_dns_record(self):\n if self.type == \"dkim\":\n self.value = lib.get_dkim_record(\n self.domain.name, self.domain.dkim_key_selector)\n else:\n func = getattr(lib, \"get_{}_record\".format(self.type))\n self.value = func(self.domain.name)", "title": "" }, { "docid": "a05efceb5022257749d65def0c5925c9", "score": "0.5037979", "text": "def _get_zone_idx(line):\n\n last_word = line.strip().split()[-1]\n return int(last_word.split(\"_\")[-1])", "title": "" }, { "docid": "2199e5d23261e4ce64d943134db54671", "score": "0.50347483", "text": "def _common_securezone(keys, zone, nameserver):\n rs = \"\\nUse 'retrysecure' command to try again with existing keys.\"\n try:\n _add_keys_to_dns(keys, zone, nameserver)\n except errors.PszDnsError, err:\n log.error(\"Dns update error: (%s)\" % err, rs)\n except errors.PszDnsCountError, err:\n log.error(err, rs)\n \n for key in keys:\n if key.status == 'pre-active':\n key.update('active')\n else:\n key.update('published')\n\n mesg = \"%s secured.\" % zone\n print mesg \n models.LogMessage(zone=zone, message=mesg).save()\n log.log(mesg)\n return 0", "title": "" }, { "docid": "603d268e0d6dcd693102ac15e6ae3404", "score": "0.50280756", "text": "def get_zone_id(zone_name):\n if zone_name[-1] != '.':\n zone_name = zone_name + '.'\n hosted_zones = route53.list_hosted_zones()\n x = filter(lambda record: record['Name'] == zone_name, hosted_zones['HostedZones'])\n try:\n zone_id_long = x[0]['Id']\n zone_id = str.split(str(zone_id_long),'/')[2]\n return zone_id\n except:\n return None", "title": "" }, { "docid": "998f51e14912962965f2649f00da3d2e", "score": "0.5025834", "text": "def ttl(self, key):\n expire_time = self.key_time_map.get(key,{}).get('expire_time',None)\n if expire_time:\n key_ttl = expire_time - time.time()\n if key_ttl > 0:\n return key_ttl\n return None", "title": "" }, { "docid": "04323400131291d2e442a9682619edb3", "score": "0.50246805", "text": "def unique_id(self):\n return \"zone_\" + str(self._id)", "title": "" }, { "docid": "073b4a3dccc7f1282d48252d9d0e9c32", "score": "0.5022926", "text": "def dns_name(self) -> str:\n return pulumi.get(self, \"dns_name\")", "title": "" }, { "docid": "073b4a3dccc7f1282d48252d9d0e9c32", "score": "0.5022926", "text": "def dns_name(self) -> str:\n return pulumi.get(self, \"dns_name\")", "title": "" }, { "docid": "073b4a3dccc7f1282d48252d9d0e9c32", "score": "0.5022926", "text": "def dns_name(self) -> str:\n return pulumi.get(self, \"dns_name\")", "title": "" }, { "docid": "073b4a3dccc7f1282d48252d9d0e9c32", "score": "0.5022926", "text": "def dns_name(self) -> str:\n return pulumi.get(self, \"dns_name\")", "title": "" }, { "docid": "0140c52eeb3cfcdf615c7d65452ff481", "score": "0.5022621", "text": "def aaadnatip(self) :\n\t\ttry :\n\t\t\treturn self._aaadnatip\n\t\texcept Exception as e:\n\t\t\traise e", "title": "" }, { "docid": "8aa0898ab7bd6a0fc62b8cb250e89b9e", "score": "0.5014974", "text": "def validate_key(self, key):\n\t\treturn key", "title": "" }, { "docid": "820bde7785dc4287f72370e6f56a03ad", "score": "0.5010696", "text": "def get_pubkey_from_pub(pubkey):\n pub_cert = pubkey[0][6][1]\n pub_der = der_decoder.decode(bit2string(pub_cert))\n return int(pub_der[0][0])", "title": "" }, { "docid": "b84fea6b42fd9a258530b2e76e7e4e59", "score": "0.500453", "text": "def nonce_key(self, nonce):\n return sha256(str(self.entity['ID'].value) + nonce).hexdigest()", "title": "" }, { "docid": "8ac8f7ac02e3d37c384e24014addde5b", "score": "0.5003538", "text": "def _gen_key(self, data):\n nowbits = math.modf(time())\n return '%s%s' % (self._encode_num(int(nowbits[1])),\n self._encode_num(int(nowbits[0]*100000)))", "title": "" }, { "docid": "8b57bcc45e15a6d07a52ea6e92c599d2", "score": "0.5002354", "text": "def _calculate_part(self, key):\r\n # pyflakes complains about variable reuse if both of these lines use 'c'\r\n number = int(''.join(c for c in key if c.isdigit()))\r\n spaces = len([c2 for c2 in key if c2.isspace()])\r\n try:\r\n key_number = number // spaces\r\n except (ValueError, ZeroDivisionError):\r\n raise ValueError\r\n return struct.pack(\">I\", key_number)", "title": "" }, { "docid": "8b57bcc45e15a6d07a52ea6e92c599d2", "score": "0.5002354", "text": "def _calculate_part(self, key):\r\n # pyflakes complains about variable reuse if both of these lines use 'c'\r\n number = int(''.join(c for c in key if c.isdigit()))\r\n spaces = len([c2 for c2 in key if c2.isspace()])\r\n try:\r\n key_number = number // spaces\r\n except (ValueError, ZeroDivisionError):\r\n raise ValueError\r\n return struct.pack(\">I\", key_number)", "title": "" }, { "docid": "dbe613284a59ea9dc13c68ffe5b840e1", "score": "0.50011873", "text": "def key(self):\n return self.mip_key(self.mip)", "title": "" }, { "docid": "758d0d704d0fa40d19de5b02088b3a24", "score": "0.49939564", "text": "def _get_zone_id(client: botocore.client.Route53, domain: str) -> str:\n logger = get_logger(__name__)\n assert domain.endswith(\".\")\n\n # Filter out sub-domains; leaves domains intact\n fsd = \".\".join(domain.split(\".\")[-3:])\n\n # Find zone from Route 53 api\n zones = client.list_hosted_zones()\n zone_id = None\n for z in zones[\"HostedZones\"]:\n if fsd == z[\"Name\"]:\n zone_id = z[\"Id\"]\n\n if zone_id is None:\n msg = \"Could not find hosted zone for fully specified domain\"\n logger.error(msg, domain=fsd, zones=zones)\n logger.error(pformat(zones))\n raise Route53Error(msg)\n\n logger.info(\"Got HostedZoneId\", zone_id=zone_id)\n return zone_id", "title": "" }, { "docid": "6ada8c58ca548dd212b0af4f53ba8558", "score": "0.49906525", "text": "def zone_id(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"zone_id\")", "title": "" }, { "docid": "37125871727d924a810290857a5084be", "score": "0.49822593", "text": "def private_dns_name(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"private_dns_name\")", "title": "" }, { "docid": "8dea923cf0a58144ee2b4c26ca2f20a2", "score": "0.49822053", "text": "def as_int(self, key):\r\n return int(self[key])", "title": "" }, { "docid": "b056741b6a2a65fe6e08443d289f3fe2", "score": "0.49747562", "text": "def ReadHostKey(self):\n hk = self.Read(4)\n return hk", "title": "" }, { "docid": "8313c70e95a6ad728cf790815c801d85", "score": "0.4973687", "text": "def get_zone_label(zone_idx):\n return cemo.const.ZONE[zone_idx]", "title": "" }, { "docid": "0c405793b0b9d672a741e57da711f12d", "score": "0.4961278", "text": "def _get_dhcp_value(key):\n lookup = \"dhcp-option-force={}\".format(key)\n with open(DNSMASQ_NEUTRON) as stream:\n for line in stream:\n if not line.startswith(lookup):\n continue\n _, _, option_value = line.strip().partition(\"=\")\n _, _, value = option_value.partition(\",\")\n return value.strip()", "title": "" }, { "docid": "e9e8fdc91248fd35597d6b9c4c51c9cf", "score": "0.4957906", "text": "def zone_id(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"zone_id\")", "title": "" }, { "docid": "e9e8fdc91248fd35597d6b9c4c51c9cf", "score": "0.4957906", "text": "def zone_id(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"zone_id\")", "title": "" }, { "docid": "e9e8fdc91248fd35597d6b9c4c51c9cf", "score": "0.4957906", "text": "def zone_id(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"zone_id\")", "title": "" }, { "docid": "406b2de394fde6db4d7856ab59981a8a", "score": "0.49575812", "text": "def access_key_id(self) -> Any:\n return pulumi.get(self, \"access_key_id\")", "title": "" }, { "docid": "1c62334738abb96548267a7a901bc927", "score": "0.49510738", "text": "def _zone_index(self, zone: IUZone) -> str:\n # pylint: disable=no-self-use\n if zone is not None:\n return str(zone.index + 1)\n return \"0\"", "title": "" }, { "docid": "87185c1c6037c6520966f2889bed885c", "score": "0.49460456", "text": "def test_get_address_from_public_key():\n fet_crypto = FetchAICrypto()\n address = FetchAICrypto().get_address_from_public_key(fet_crypto.public_key)\n assert str(address) == str(fet_crypto.address), \"The address must be the same.\"", "title": "" }, { "docid": "87185c1c6037c6520966f2889bed885c", "score": "0.49460456", "text": "def test_get_address_from_public_key():\n fet_crypto = FetchAICrypto()\n address = FetchAICrypto().get_address_from_public_key(fet_crypto.public_key)\n assert str(address) == str(fet_crypto.address), \"The address must be the same.\"", "title": "" }, { "docid": "d48d02ac03091ed60c0b8b65569915a4", "score": "0.49436355", "text": "def translate_to_key_code(key: CommonKey) -> int:\n return _translate_to_key_code(key)", "title": "" }, { "docid": "a2a2f2730c580ace0abcf14c147bf167", "score": "0.49429187", "text": "def ttl(self) -> int:\n return self._ttl", "title": "" }, { "docid": "c2e9d5e7521ec9ca24a3ebf53290392c", "score": "0.49402174", "text": "def Int24sn():\n return BytesInteger(3, signed=True, swapped=native)", "title": "" }, { "docid": "c330a98ec9e3acaaf481deb36f82c33a", "score": "0.49395323", "text": "def pkScriptLen(self) -> int:\n return int(codecs.encode(self._pkScriptLen, \"hex\"), 16)", "title": "" }, { "docid": "935122be925c57acb847be73a42a54ec", "score": "0.4939115", "text": "def get_invalid_clientid():\n return 0", "title": "" }, { "docid": "935122be925c57acb847be73a42a54ec", "score": "0.4939115", "text": "def get_invalid_clientid():\n return 0", "title": "" }, { "docid": "d18bd03f272efb497a74a07366967345", "score": "0.49355888", "text": "def getKey(self, capacity):\n return (ord(self._obj[0]) - ord(\"a\")) % capacity", "title": "" }, { "docid": "69f73a489fe96fbb527ce1018faacf5a", "score": "0.4932226", "text": "def pad(self):\n return self[\"pad\"]", "title": "" }, { "docid": "8b0b8ecf732fad333e0736eef00a487e", "score": "0.49251783", "text": "def GetKey(self, version_number):\r\n return self.dict[str(version_number)]", "title": "" }, { "docid": "b2289004b60ddffc0cc1c65599c949ed", "score": "0.49245566", "text": "def external_key(self):\n return self._external_key", "title": "" }, { "docid": "88c8a395827c7f6230f8256584a581d5", "score": "0.49217552", "text": "def address(self):\n if self._address:\n pass\n elif 'address' in self.keystore:\n self._address = self.keystore['address'].decode('hex')\n elif not self.locked:\n self._address = keys.privtoaddr(self.privkey)\n else:\n return None\n return self._address", "title": "" }, { "docid": "de65b6385d998601da1707c0b4dab3d1", "score": "0.49212047", "text": "def get_code(self):\n # This is just a naive approach. Exposes the server IP in plain text.\n\n # ip consists of 4 bytes or 32 bits\n # connection message is 16 bytes or 128 bits\n # port is 2 bytes or 16 bits\n priv_ip = list(map(lambda x: int(x), self.addr[0].split('.')))\n ip = list(map(lambda x: int(x), self.public_ip.split('.')))\n logging.info(f\"Private IP is {priv_ip}, Public IP is {ip}.\")\n if priv_ip != ip:\n logging.warning(f\"You must forward private port {self.addr[1]} to public port {self.public_port}.\")\n return convert_62_f((self.connection_key << 16) + \n (self.public_port << 152) + \n (ip[0] << 8) + \n (ip[1] << 144) + \n (ip[2]) + \n (ip[3] << 168))", "title": "" }, { "docid": "f32060d66cb6d10c5efd9c6346bbf2db", "score": "0.4919947", "text": "def key_id(self):\n return self.__key_id", "title": "" }, { "docid": "4a7cc5341e216f12f6f74ab6028df7c9", "score": "0.49106213", "text": "def pubkey(self):\n if not self.locked:\n return privtopub(self.privkey)\n else:\n return None", "title": "" }, { "docid": "bb060d7b0b2920b3b9aabc7619796942", "score": "0.49076554", "text": "def make_recovery_key(numeric_recovery_key):\n rc = numeric_recovery_key\n recovery_key = rc[0:6] + '-' + rc[6:12] + '-' + rc[12:18] + '-' + rc[18:24] + '-' + rc[24:30] + '-' + rc[30:36] +\\\n '-' + rc[36:42] + '-' + rc[42:48]\n return recovery_key", "title": "" }, { "docid": "1f6bb7bbcb94ea58b8d9d619e3b5455c", "score": "0.4902473", "text": "def ttl(self) -> int:\n return pulumi.get(self, \"ttl\")", "title": "" }, { "docid": "ae327d0db5991519444a764911d3da9b", "score": "0.4901226", "text": "def _deadzone(value, deadzone):\n if abs(value) < deadzone:\n return 0\n else:\n return value", "title": "" }, { "docid": "78d25ef6d76851fb37f1baf59987c4c0", "score": "0.49008858", "text": "def public_key(self) -> Optional[str]:\n return self.zil_key and self.zil_key.keypair_str.public", "title": "" }, { "docid": "12e30bbfd590edcdec7a2d55e3fcb094", "score": "0.49004403", "text": "def SessionKeyResp(nonce):\n status = \"113 Nonce \"+ str(nonce)\n return status", "title": "" }, { "docid": "9d20c49e8b7c9a989c55d455697015d6", "score": "0.48979113", "text": "def read_key(self, code: int) -> float:\n return self._lib.wooting_analog_read_analog(code)", "title": "" }, { "docid": "eac800649a0539d588e8557d7f3c610b", "score": "0.48920786", "text": "def lacppartnerkey(self) :\n\t\ttry :\n\t\t\treturn self._lacppartnerkey\n\t\texcept Exception as e:\n\t\t\traise e", "title": "" }, { "docid": "b3956b25ebf0cb66a4ce4a85cf4b374d", "score": "0.4888326", "text": "def get_dec(self):\n return str(self._ip_dec)", "title": "" }, { "docid": "896926daeb6e903bd138a639e07fea60", "score": "0.48875818", "text": "def get_zone_id(self, zone_no):\n if 0 <= zone_no < len(self.coordinate3d_combined):\n return self.coordinate3d_combined[zone_no]['id']\n else:\n return ''", "title": "" } ]
dde64f202a48bd01e8833eb1d28427af
Creates or updates a remediation at resource group scope.
[ { "docid": "b11475b6785c9d970d4dfdbbabb455e9", "score": "0.7138369", "text": "def create_or_update_at_resource_group(\n self,\n resource_group_name, # type: str\n remediation_name, # type: str\n parameters, # type: \"_models.Remediation\"\n **kwargs # type: Any\n ):\n # type: (...) -> \"_models.Remediation\"\n cls = kwargs.pop('cls', None) # type: ClsType[\"_models.Remediation\"]\n error_map = {\n 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError\n }\n error_map.update(kwargs.pop('error_map', {}))\n api_version = \"2019-07-01\"\n content_type = kwargs.pop(\"content_type\", \"application/json\")\n accept = \"application/json\"\n\n # Construct URL\n url = self.create_or_update_at_resource_group.metadata['url'] # type: ignore\n path_format_arguments = {\n 'subscriptionId': self._serialize.url(\"self._config.subscription_id\", self._config.subscription_id, 'str'),\n 'resourceGroupName': self._serialize.url(\"resource_group_name\", resource_group_name, 'str'),\n 'remediationName': self._serialize.url(\"remediation_name\", remediation_name, 'str'),\n }\n url = self._client.format_url(url, **path_format_arguments)\n\n # Construct parameters\n query_parameters = {} # type: Dict[str, Any]\n query_parameters['api-version'] = self._serialize.query(\"api_version\", api_version, 'str')\n\n # Construct headers\n header_parameters = {} # type: Dict[str, Any]\n header_parameters['Content-Type'] = self._serialize.header(\"content_type\", content_type, 'str')\n header_parameters['Accept'] = self._serialize.header(\"accept\", accept, 'str')\n\n body_content_kwargs = {} # type: Dict[str, Any]\n body_content = self._serialize.body(parameters, 'Remediation')\n body_content_kwargs['content'] = body_content\n request = self._client.put(url, query_parameters, header_parameters, **body_content_kwargs)\n pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)\n response = pipeline_response.http_response\n\n if response.status_code not in [200, 201]:\n map_error(status_code=response.status_code, response=response, error_map=error_map)\n error = self._deserialize(_models.ErrorResponse, response)\n raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)\n\n if response.status_code == 200:\n deserialized = self._deserialize('Remediation', pipeline_response)\n\n if response.status_code == 201:\n deserialized = self._deserialize('Remediation', pipeline_response)\n\n if cls:\n return cls(pipeline_response, deserialized, {})\n\n return deserialized", "title": "" } ]
[ { "docid": "af2222181999b70c89a1243fa3ad1d4d", "score": "0.6849127", "text": "def create_or_update_at_management_group(\n self,\n management_group_id, # type: str\n remediation_name, # type: str\n parameters, # type: \"_models.Remediation\"\n **kwargs # type: Any\n ):\n # type: (...) -> \"_models.Remediation\"\n cls = kwargs.pop('cls', None) # type: ClsType[\"_models.Remediation\"]\n error_map = {\n 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError\n }\n error_map.update(kwargs.pop('error_map', {}))\n management_groups_namespace = \"Microsoft.Management\"\n api_version = \"2019-07-01\"\n content_type = kwargs.pop(\"content_type\", \"application/json\")\n accept = \"application/json\"\n\n # Construct URL\n url = self.create_or_update_at_management_group.metadata['url'] # type: ignore\n path_format_arguments = {\n 'managementGroupsNamespace': self._serialize.url(\"management_groups_namespace\", management_groups_namespace, 'str'),\n 'managementGroupId': self._serialize.url(\"management_group_id\", management_group_id, 'str'),\n 'remediationName': self._serialize.url(\"remediation_name\", remediation_name, 'str'),\n }\n url = self._client.format_url(url, **path_format_arguments)\n\n # Construct parameters\n query_parameters = {} # type: Dict[str, Any]\n query_parameters['api-version'] = self._serialize.query(\"api_version\", api_version, 'str')\n\n # Construct headers\n header_parameters = {} # type: Dict[str, Any]\n header_parameters['Content-Type'] = self._serialize.header(\"content_type\", content_type, 'str')\n header_parameters['Accept'] = self._serialize.header(\"accept\", accept, 'str')\n\n body_content_kwargs = {} # type: Dict[str, Any]\n body_content = self._serialize.body(parameters, 'Remediation')\n body_content_kwargs['content'] = body_content\n request = self._client.put(url, query_parameters, header_parameters, **body_content_kwargs)\n pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)\n response = pipeline_response.http_response\n\n if response.status_code not in [200, 201]:\n map_error(status_code=response.status_code, response=response, error_map=error_map)\n error = self._deserialize(_models.ErrorResponse, response)\n raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)\n\n if response.status_code == 200:\n deserialized = self._deserialize('Remediation', pipeline_response)\n\n if response.status_code == 201:\n deserialized = self._deserialize('Remediation', pipeline_response)\n\n if cls:\n return cls(pipeline_response, deserialized, {})\n\n return deserialized", "title": "" }, { "docid": "3c40f85516bb0ed38d1a283333c39db9", "score": "0.58502185", "text": "def create_or_update_at_subscription(\n self,\n remediation_name, # type: str\n parameters, # type: \"_models.Remediation\"\n **kwargs # type: Any\n ):\n # type: (...) -> \"_models.Remediation\"\n cls = kwargs.pop('cls', None) # type: ClsType[\"_models.Remediation\"]\n error_map = {\n 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError\n }\n error_map.update(kwargs.pop('error_map', {}))\n api_version = \"2019-07-01\"\n content_type = kwargs.pop(\"content_type\", \"application/json\")\n accept = \"application/json\"\n\n # Construct URL\n url = self.create_or_update_at_subscription.metadata['url'] # type: ignore\n path_format_arguments = {\n 'subscriptionId': self._serialize.url(\"self._config.subscription_id\", self._config.subscription_id, 'str'),\n 'remediationName': self._serialize.url(\"remediation_name\", remediation_name, 'str'),\n }\n url = self._client.format_url(url, **path_format_arguments)\n\n # Construct parameters\n query_parameters = {} # type: Dict[str, Any]\n query_parameters['api-version'] = self._serialize.query(\"api_version\", api_version, 'str')\n\n # Construct headers\n header_parameters = {} # type: Dict[str, Any]\n header_parameters['Content-Type'] = self._serialize.header(\"content_type\", content_type, 'str')\n header_parameters['Accept'] = self._serialize.header(\"accept\", accept, 'str')\n\n body_content_kwargs = {} # type: Dict[str, Any]\n body_content = self._serialize.body(parameters, 'Remediation')\n body_content_kwargs['content'] = body_content\n request = self._client.put(url, query_parameters, header_parameters, **body_content_kwargs)\n pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)\n response = pipeline_response.http_response\n\n if response.status_code not in [200, 201]:\n map_error(status_code=response.status_code, response=response, error_map=error_map)\n error = self._deserialize(_models.ErrorResponse, response)\n raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)\n\n if response.status_code == 200:\n deserialized = self._deserialize('Remediation', pipeline_response)\n\n if response.status_code == 201:\n deserialized = self._deserialize('Remediation', pipeline_response)\n\n if cls:\n return cls(pipeline_response, deserialized, {})\n\n return deserialized", "title": "" }, { "docid": "10ba5353739c1050666ec04cf62c5d68", "score": "0.5808498", "text": "def create_or_update_at_resource(\n self,\n resource_id, # type: str\n remediation_name, # type: str\n parameters, # type: \"_models.Remediation\"\n **kwargs # type: Any\n ):\n # type: (...) -> \"_models.Remediation\"\n cls = kwargs.pop('cls', None) # type: ClsType[\"_models.Remediation\"]\n error_map = {\n 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError\n }\n error_map.update(kwargs.pop('error_map', {}))\n api_version = \"2019-07-01\"\n content_type = kwargs.pop(\"content_type\", \"application/json\")\n accept = \"application/json\"\n\n # Construct URL\n url = self.create_or_update_at_resource.metadata['url'] # type: ignore\n path_format_arguments = {\n 'resourceId': self._serialize.url(\"resource_id\", resource_id, 'str', skip_quote=True),\n 'remediationName': self._serialize.url(\"remediation_name\", remediation_name, 'str'),\n }\n url = self._client.format_url(url, **path_format_arguments)\n\n # Construct parameters\n query_parameters = {} # type: Dict[str, Any]\n query_parameters['api-version'] = self._serialize.query(\"api_version\", api_version, 'str')\n\n # Construct headers\n header_parameters = {} # type: Dict[str, Any]\n header_parameters['Content-Type'] = self._serialize.header(\"content_type\", content_type, 'str')\n header_parameters['Accept'] = self._serialize.header(\"accept\", accept, 'str')\n\n body_content_kwargs = {} # type: Dict[str, Any]\n body_content = self._serialize.body(parameters, 'Remediation')\n body_content_kwargs['content'] = body_content\n request = self._client.put(url, query_parameters, header_parameters, **body_content_kwargs)\n pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)\n response = pipeline_response.http_response\n\n if response.status_code not in [200, 201]:\n map_error(status_code=response.status_code, response=response, error_map=error_map)\n error = self._deserialize(_models.ErrorResponse, response)\n raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)\n\n if response.status_code == 200:\n deserialized = self._deserialize('Remediation', pipeline_response)\n\n if response.status_code == 201:\n deserialized = self._deserialize('Remediation', pipeline_response)\n\n if cls:\n return cls(pipeline_response, deserialized, {})\n\n return deserialized", "title": "" }, { "docid": "bd207aeb8e888fb045287900e298a5b9", "score": "0.5615905", "text": "def remediation(self, remediation):\n self._remediation = remediation", "title": "" }, { "docid": "bd207aeb8e888fb045287900e298a5b9", "score": "0.5615905", "text": "def remediation(self, remediation):\n self._remediation = remediation", "title": "" }, { "docid": "0849a2886504b67f01684650dc419c17", "score": "0.5520816", "text": "def cancel_at_resource_group(\n self,\n resource_group_name, # type: str\n remediation_name, # type: str\n **kwargs # type: Any\n ):\n # type: (...) -> \"_models.Remediation\"\n cls = kwargs.pop('cls', None) # type: ClsType[\"_models.Remediation\"]\n error_map = {\n 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError\n }\n error_map.update(kwargs.pop('error_map', {}))\n api_version = \"2019-07-01\"\n accept = \"application/json\"\n\n # Construct URL\n url = self.cancel_at_resource_group.metadata['url'] # type: ignore\n path_format_arguments = {\n 'subscriptionId': self._serialize.url(\"self._config.subscription_id\", self._config.subscription_id, 'str'),\n 'resourceGroupName': self._serialize.url(\"resource_group_name\", resource_group_name, 'str'),\n 'remediationName': self._serialize.url(\"remediation_name\", remediation_name, 'str'),\n }\n url = self._client.format_url(url, **path_format_arguments)\n\n # Construct parameters\n query_parameters = {} # type: Dict[str, Any]\n query_parameters['api-version'] = self._serialize.query(\"api_version\", api_version, 'str')\n\n # Construct headers\n header_parameters = {} # type: Dict[str, Any]\n header_parameters['Accept'] = self._serialize.header(\"accept\", accept, 'str')\n\n request = self._client.post(url, query_parameters, header_parameters)\n pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)\n response = pipeline_response.http_response\n\n if response.status_code not in [200]:\n map_error(status_code=response.status_code, response=response, error_map=error_map)\n error = self._deserialize(_models.ErrorResponse, response)\n raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)\n\n deserialized = self._deserialize('Remediation', pipeline_response)\n\n if cls:\n return cls(pipeline_response, deserialized, {})\n\n return deserialized", "title": "" }, { "docid": "59d423dac448d5898d88d2204f1df160", "score": "0.53263164", "text": "def cancel_at_management_group(\n self,\n management_group_id, # type: str\n remediation_name, # type: str\n **kwargs # type: Any\n ):\n # type: (...) -> \"_models.Remediation\"\n cls = kwargs.pop('cls', None) # type: ClsType[\"_models.Remediation\"]\n error_map = {\n 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError\n }\n error_map.update(kwargs.pop('error_map', {}))\n management_groups_namespace = \"Microsoft.Management\"\n api_version = \"2019-07-01\"\n accept = \"application/json\"\n\n # Construct URL\n url = self.cancel_at_management_group.metadata['url'] # type: ignore\n path_format_arguments = {\n 'managementGroupsNamespace': self._serialize.url(\"management_groups_namespace\", management_groups_namespace, 'str'),\n 'managementGroupId': self._serialize.url(\"management_group_id\", management_group_id, 'str'),\n 'remediationName': self._serialize.url(\"remediation_name\", remediation_name, 'str'),\n }\n url = self._client.format_url(url, **path_format_arguments)\n\n # Construct parameters\n query_parameters = {} # type: Dict[str, Any]\n query_parameters['api-version'] = self._serialize.query(\"api_version\", api_version, 'str')\n\n # Construct headers\n header_parameters = {} # type: Dict[str, Any]\n header_parameters['Accept'] = self._serialize.header(\"accept\", accept, 'str')\n\n request = self._client.post(url, query_parameters, header_parameters)\n pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)\n response = pipeline_response.http_response\n\n if response.status_code not in [200]:\n map_error(status_code=response.status_code, response=response, error_map=error_map)\n error = self._deserialize(_models.ErrorResponse, response)\n raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)\n\n deserialized = self._deserialize('Remediation', pipeline_response)\n\n if cls:\n return cls(pipeline_response, deserialized, {})\n\n return deserialized", "title": "" }, { "docid": "fc67ae25f08f1175121b0b011af4dfb8", "score": "0.5248496", "text": "def get_at_resource_group(\n self,\n resource_group_name, # type: str\n remediation_name, # type: str\n **kwargs # type: Any\n ):\n # type: (...) -> \"_models.Remediation\"\n cls = kwargs.pop('cls', None) # type: ClsType[\"_models.Remediation\"]\n error_map = {\n 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError\n }\n error_map.update(kwargs.pop('error_map', {}))\n api_version = \"2019-07-01\"\n accept = \"application/json\"\n\n # Construct URL\n url = self.get_at_resource_group.metadata['url'] # type: ignore\n path_format_arguments = {\n 'subscriptionId': self._serialize.url(\"self._config.subscription_id\", self._config.subscription_id, 'str'),\n 'resourceGroupName': self._serialize.url(\"resource_group_name\", resource_group_name, 'str'),\n 'remediationName': self._serialize.url(\"remediation_name\", remediation_name, 'str'),\n }\n url = self._client.format_url(url, **path_format_arguments)\n\n # Construct parameters\n query_parameters = {} # type: Dict[str, Any]\n query_parameters['api-version'] = self._serialize.query(\"api_version\", api_version, 'str')\n\n # Construct headers\n header_parameters = {} # type: Dict[str, Any]\n header_parameters['Accept'] = self._serialize.header(\"accept\", accept, 'str')\n\n request = self._client.get(url, query_parameters, header_parameters)\n pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)\n response = pipeline_response.http_response\n\n if response.status_code not in [200]:\n map_error(status_code=response.status_code, response=response, error_map=error_map)\n error = self._deserialize(_models.ErrorResponse, response)\n raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)\n\n deserialized = self._deserialize('Remediation', pipeline_response)\n\n if cls:\n return cls(pipeline_response, deserialized, {})\n\n return deserialized", "title": "" }, { "docid": "e19c9f51f44e7b2b6d367dfb2de4de76", "score": "0.52227175", "text": "def post_group(self, group, overwrite = True):\n if overwrite or not self.exists(group.id):\n if not group.signatures: group.signatures = [self.signature]\n if not group.writers: group.writers = [self.signature]\n response = requests.post(self.groups_url, json=group.to_json(), headers=self.headers)\n response = self.__handle_response(response)\n\n return Group.from_json(response.json())", "title": "" }, { "docid": "e196fdef6b3571a38ff810af651d3af1", "score": "0.51654935", "text": "async def __update_mediation_record(\n self, provided_mediation_invitation: str\n ) -> MediationInviteRecord:\n default_invite = await self.__retrieve()\n\n if default_invite != provided_mediation_invitation:\n default_invite = await self.store(\n MediationInviteRecord.unused(provided_mediation_invitation)\n )\n\n return default_invite", "title": "" }, { "docid": "b29fd4afe78848bcebc8d349bd21ebe6", "score": "0.511356", "text": "def perform_create(self, serializer):\n data = serializer.validated_data\n\n slack = SlackSdk()\n response_data = slack.post_moderation(\n text=data['content'])\n\n message_id = response_data.get('ts')\n\n moderation = Moderation.objects.create(\n content_key=data['content_key'],\n content=data['content'],\n content_author_id=data['content_author_id'],\n status='#modinbox',\n status_reason='moderate',\n message_id=message_id\n )\n serializer.moderation = moderation\n\n ModerationAction.objects.create(moderation=moderation,\n action='moderate')", "title": "" }, { "docid": "a908716ed331c8f37a0474c70fa739ae", "score": "0.5021546", "text": "def post(self, request, aggregate_id=None):\n try:\n img = None\n img_url = None\n icon_img = None\n icon_img_url = None\n typespec = None\n\n # Authenticate the user\n auth_user = auth_manager.do_auth(request)\n creator = MediaUser.objects.get(username=auth_user.username)\n\n if aggregate_id is not None:\n # Updates an existing Amenity\n amenity = MediaAggregate.objects.get(id=aggregate_id)\n if amenity.owner == creator:\n do_action = True if 'action' in request.query_params\\\n else False\n if do_action:\n action = request.query_params['action']\n return amentiycontroller.handle_operations(\n amenity,\n action,\n request.query_params,\n request.data)\n # do the normal update\n # (Note:Sonu) Add the handle_update operation into\n # controller.\n return self.handle_update(request, amenity)\n else:\n raise UserNotAuthorizedException()\n # Create a new aggregator instance\n # Lookup the type and assign the instance\n typespec = MediaAggregateType.objects.get(\n typename=request.data['type'])\n # Store icon image\n if 'icon_content' in request.data:\n icon_img_ser = JpegImageContentSerializer(\n data=request.data['icon_content'])\n if icon_img_ser.is_valid(raise_exception=True):\n icon_img = icon_img_ser.save()\n icon_img_url = icon_img.get_absolute_url()\n # Store image\n if 'image_content' in request.data:\n img_ser = JpegImageContentSerializer(\n data=request.data['image_content'])\n if img_ser.is_valid(raise_exception=True):\n img = img_ser.save()\n img_url = img.get_absolute_url()\n # Serialize the aggregator object\n serializer = MediaAggregateSerializer(\n data=request.data)\n if serializer.is_valid(raise_exception=True):\n srcobj = serializer.save(created_time=datetime.now(),\n image_content=img,\n image_url=img_url,\n icon_content=icon_img,\n icon_image_url=icon_img_url)\n srcobj.update(owner=creator, typespec=typespec)\n # (Note:Sonu)\n # Create a default source and attach it to Aggregate,\n # This default source will host all the in-house campaigns\n # for the aggregate by default.\n # Additional sources can be handled by the aggregate\n # owner\n # Most of the properties are derived from the MediaAggregate\n # property.\n inhouse_source =\\\n self.factory.create_instance(\n name=srcobj.name,\n display_name=srcobj.display_name,\n caption=\"inhouse source\",\n type=typespec.typename,\n tags=typespec.category,\n source_internet_settings=json.dumps(\n srcobj.internet_settings),\n category=typespec.category,\n point=srcobj.location)\n srcobj.update(inhouse_source=inhouse_source)\n srcobj.save()\n return JSONResponse(serializer.data,\n status=HTTP_201_CREATED)\n except DoesNotExist as e:\n print e\n return JSONResponse(str(e),\n status=HTTP_404_NOT_FOUND)\n except UserNotAuthorizedException as e:\n print e\n return JSONResponse(str(e),\n status=HTTP_401_UNAUTHORIZED)\n except Exception as e:\n print e\n return JSONResponse(str(e),\n status=HTTP_500_INTERNAL_SERVER_ERROR)", "title": "" }, { "docid": "f4819b6cf6c1b47ae825404e68766571", "score": "0.5015683", "text": "def add(group, persister=None):\n persister.exec_stmt(Group.INSERT_GROUP,\n {\"params\": (group.group_id, group.description, group.status)}\n )", "title": "" }, { "docid": "1b083473ab275a31941dab0ec8eca0a9", "score": "0.5005575", "text": "def assign_perm_to_group(instance,name,permissioin):\n\tlogger = logging.getLogger(__name__)\n\n\ttry:\n\t\tgroup = Group.objects.get(name=name)\n\t\tassign_perm(permissioin, group, instance)\n\texcept Exception as e:\n\t\tlogger.exception(e)\n\n\treturn instance", "title": "" }, { "docid": "bb2718ab785fcef45283358cd7b23606", "score": "0.49702054", "text": "def delete_at_resource_group(\n self,\n resource_group_name, # type: str\n remediation_name, # type: str\n **kwargs # type: Any\n ):\n # type: (...) -> Optional[\"_models.Remediation\"]\n cls = kwargs.pop('cls', None) # type: ClsType[Optional[\"_models.Remediation\"]]\n error_map = {\n 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError\n }\n error_map.update(kwargs.pop('error_map', {}))\n api_version = \"2019-07-01\"\n accept = \"application/json\"\n\n # Construct URL\n url = self.delete_at_resource_group.metadata['url'] # type: ignore\n path_format_arguments = {\n 'subscriptionId': self._serialize.url(\"self._config.subscription_id\", self._config.subscription_id, 'str'),\n 'resourceGroupName': self._serialize.url(\"resource_group_name\", resource_group_name, 'str'),\n 'remediationName': self._serialize.url(\"remediation_name\", remediation_name, 'str'),\n }\n url = self._client.format_url(url, **path_format_arguments)\n\n # Construct parameters\n query_parameters = {} # type: Dict[str, Any]\n query_parameters['api-version'] = self._serialize.query(\"api_version\", api_version, 'str')\n\n # Construct headers\n header_parameters = {} # type: Dict[str, Any]\n header_parameters['Accept'] = self._serialize.header(\"accept\", accept, 'str')\n\n request = self._client.delete(url, query_parameters, header_parameters)\n pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)\n response = pipeline_response.http_response\n\n if response.status_code not in [200, 204]:\n map_error(status_code=response.status_code, response=response, error_map=error_map)\n error = self._deserialize(_models.ErrorResponse, response)\n raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)\n\n deserialized = None\n if response.status_code == 200:\n deserialized = self._deserialize('Remediation', pipeline_response)\n\n if cls:\n return cls(pipeline_response, deserialized, {})\n\n return deserialized", "title": "" }, { "docid": "e7dfe145785de127cb132df34584f343", "score": "0.49675414", "text": "def create(self, request, *args, **kwargs):\n try:\n with transaction.atomic():\n res_json = {}\n\n request_data = request.data\n\n if request_data['type'] == SO_TYPE_REMEDIATION_NOTICE:\n if not len(request_data['remediation_actions']):\n # Type is remediation action but no remediation actions defined\n raise serializers.ValidationError(['You must define at least one remediation action.'])\n\n # offence and offender\n request_data['offence_id'] = request_data.get('current_offence', {}).get('id', None) # This raises an error when empty string is passed\n request_data['offender_id'] = request_data.get('current_offender', {})\n if request_data['offender_id'] in ({}, ''):\n request_data['offender_id'] = None\n else:\n request_data['offender_id'] = request_data['offender_id'].get('id', None)\n\n # workflow\n workflow_type = request_data.get('workflow_type', '')\n\n # allocated group\n regionDistrictId = request_data['district_id'] if request_data['district_id'] else request_data['region_id']\n groups = self.get_compliance_permission_groups(regionDistrictId, workflow_type)\n if groups.count() == 1:\n group = groups.first()\n elif groups.count() > 1:\n group = groups.first()\n request_data['allocated_group_id'] = group.id\n\n # Count number of files uploaded\n num_of_documents = 0\n temporary_document_collection_id = request.data.get('temporary_document_collection_id')\n if temporary_document_collection_id:\n temp_doc_collection, created = TemporaryDocumentCollection.objects.get_or_create(\n id=temporary_document_collection_id)\n if temp_doc_collection:\n num_of_documents = temp_doc_collection.documents.count()\n # request_data['num_of_documents_attached'] = num_of_documents # Pass number of files attached for validation\n # You can access this data by self.initial_data['num_of_documents_attached'] in validate(self, data) method\n\n # Save sanction outcome (offence, offender, alleged_offences)\n if hasattr(request_data, 'id') and request_data['id']:\n instance = SanctionOutcome.objects.get(id=request_data['id'])\n serializer = SaveSanctionOutcomeSerializer(instance, data=request_data, partial=True, context={'num_of_documents_attached': num_of_documents})\n else:\n serializer = SaveSanctionOutcomeSerializer(data=request_data, partial=True, context={'num_of_documents_attached': num_of_documents})\n serializer.is_valid(raise_exception=True)\n instance = serializer.save()\n\n # Action log for creation\n instance.log_user_action(SanctionOutcomeUserAction.ACTION_CREATE.format(instance.lodgement_number), request)\n\n # Link temp uploaded files to the sanction outcome\n if num_of_documents:\n for doc in temp_doc_collection.documents.all():\n save_default_document_obj(instance, doc)\n temp_doc_collection.delete()\n\n # Create relations between this sanction outcome and the alleged offence(s)\n count_alleged_offences = 0\n for ao_id in request_data['alleged_offence_ids_included']:\n # alleged_offence = AllegedOffence.objects.get(id=ao_id)\n # alleged_commited_offence = AllegedCommittedOffence.objects.create(sanction_outcome=instance, alleged_offence=alleged_offence, included=True)\n\n data = {'alleged_offence_id': ao_id, 'sanction_outcome_id': instance.id}\n serializer = AllegedCommittedOffenceCreateSerializer(data=data, context={'request': request})\n serializer.is_valid(raise_exception=True)\n serializer.save()\n count_alleged_offences += 1\n\n # Validate if alleged offences are selected\n if count_alleged_offences == 0:\n if instance.type == SO_TYPE_INFRINGEMENT_NOTICE:\n raise serializers.ValidationError(['You must select an alleged committed offence.'])\n else:\n raise serializers.ValidationError(['You must select at least one alleged committed offence.'])\n\n # Validate if an offender is selected\n if not instance.offender:\n if not instance.is_parking_offence: #\n raise serializers.ValidationError(['An offender must be selected.'])\n\n # Validate if an offender or a registration number is set at least\n if instance.is_parking_offence:\n if not instance.registration_number and not instance.offender:\n raise serializers.ValidationError(['Either offender or registration number is required.'])\n\n for id in request_data['alleged_offence_ids_excluded']:\n try:\n alleged_offence = AllegedOffence.objects.get(id=id)\n alleged_commited_offence = AllegedCommittedOffence.objects.create(sanction_outcome=instance, alleged_offence=alleged_offence, included=False)\n except:\n pass # Should not reach here\n\n # Handle workflow\n if workflow_type == SanctionOutcome.WORKFLOW_SEND_TO_MANAGER:\n instance.send_to_manager(request)\n elif not workflow_type:\n instance.save()\n\n # Save remediation action, and link to the sanction outcome\n for dict in request_data['remediation_actions']:\n dict['sanction_outcome_id'] = instance.id\n dict['action'] = dict['action_text']\n remediation_action = SaveRemediationActionSerializer(data=dict)\n if remediation_action.is_valid(raise_exception=True):\n remediation_action.save()\n\n # Log CallEmail action\n if request_data.get('call_email_id'):\n call_email = CallEmail.objects.get(id=request_data.get('call_email_id'))\n call_email.log_user_action(\n CallEmailUserAction.ACTION_SANCTION_OUTCOME.format(\n instance.lodgement_number), \n request)\n\n # Log Inspection action\n if request_data.get('inspection_id'):\n inspection = Inspection.objects.get(id=request_data.get('inspection_id'))\n inspection.log_user_action(\n InspectionUserAction.ACTION_SANCTION_OUTCOME.format(\n instance.lodgement_number), \n request)\n\n # Create/Retrieve comms log entry\n comms_log_id = request.data.get('comms_log_id')\n if comms_log_id and comms_log_id is not 'null':\n workflow_entry = instance.comms_logs.get(id=comms_log_id)\n else:\n workflow_entry = self.add_comms_log(request, instance, workflow=True)\n\n if workflow_type == SanctionOutcome.WORKFLOW_SEND_TO_MANAGER:\n # email_data = prepare_mail(request, instance, workflow_entry, send_mail)\n #compliance_group = CompliancePermissionGroup.objects.get(id=request.data.get('allocated_group_id'))\n to_address = [user.email for user in compliance_group.members.all()]\n cc = [request.user.email,]\n bcc = None\n email_data = send_to_manager_email(to_address, instance, workflow_entry, request, cc, bcc)\n\n # Log email communication\n serializer = SanctionOutcomeCommsLogEntrySerializer(instance=workflow_entry, data=email_data, partial=True)\n serializer.is_valid(raise_exception=True)\n serializer.save()\n\n # Return\n return HttpResponse(res_json, content_type='application/json')\n\n except serializers.ValidationError:\n print(traceback.print_exc())\n raise\n except ValidationError as e:\n print(traceback.print_exc())\n if hasattr(e, 'error_dict'):\n raise serializers.ValidationError(repr(e.error_dict))\n else:\n # raise serializers.ValidationError(repr(e[0].encode('utf-8')))\n raise serializers.ValidationError(repr(e[0]))\n except Exception as e:\n print(traceback.print_exc())\n raise serializers.ValidationError(str(e))", "title": "" }, { "docid": "5bfd58c78be0467fb009d522c75d5f7e", "score": "0.49632418", "text": "def test_add_group(self):\n\t\tdraft = ReviewRequestDraft.create(self.review_request)\n\t\tdraft.summary = \"Test Summary\"\n\t\tdraft.target_groups.add(self.group)\n\t\tself._check_counters(total_outgoing=1, pending_outgoing=1)\n\t\tself.review_request.publish(self.user)\n\t\tself._check_counters(total_outgoing=1, pending_outgoing=1, total_incoming=1, group_incoming=1, starred_public=1)", "title": "" }, { "docid": "bc09b77e73d4bdd5c53f821fe3c4960d", "score": "0.49266875", "text": "def get_at_management_group(\n self,\n management_group_id, # type: str\n remediation_name, # type: str\n **kwargs # type: Any\n ):\n # type: (...) -> \"_models.Remediation\"\n cls = kwargs.pop('cls', None) # type: ClsType[\"_models.Remediation\"]\n error_map = {\n 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError\n }\n error_map.update(kwargs.pop('error_map', {}))\n management_groups_namespace = \"Microsoft.Management\"\n api_version = \"2019-07-01\"\n accept = \"application/json\"\n\n # Construct URL\n url = self.get_at_management_group.metadata['url'] # type: ignore\n path_format_arguments = {\n 'managementGroupsNamespace': self._serialize.url(\"management_groups_namespace\", management_groups_namespace, 'str'),\n 'managementGroupId': self._serialize.url(\"management_group_id\", management_group_id, 'str'),\n 'remediationName': self._serialize.url(\"remediation_name\", remediation_name, 'str'),\n }\n url = self._client.format_url(url, **path_format_arguments)\n\n # Construct parameters\n query_parameters = {} # type: Dict[str, Any]\n query_parameters['api-version'] = self._serialize.query(\"api_version\", api_version, 'str')\n\n # Construct headers\n header_parameters = {} # type: Dict[str, Any]\n header_parameters['Accept'] = self._serialize.header(\"accept\", accept, 'str')\n\n request = self._client.get(url, query_parameters, header_parameters)\n pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)\n response = pipeline_response.http_response\n\n if response.status_code not in [200]:\n map_error(status_code=response.status_code, response=response, error_map=error_map)\n error = self._deserialize(_models.ErrorResponse, response)\n raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)\n\n deserialized = self._deserialize('Remediation', pipeline_response)\n\n if cls:\n return cls(pipeline_response, deserialized, {})\n\n return deserialized", "title": "" }, { "docid": "186b2eb67ee602073c8d4162348d6cd0", "score": "0.48800674", "text": "def create(persister=None):\n persister.exec_stmt(Group.CREATE_GROUP)\n persister.exec_stmt(Group.CREATE_GROUP_REPLICATION)", "title": "" }, { "docid": "63223c90278f9792583a9e94406a056f", "score": "0.4878813", "text": "def handle_update(self, request, amenity):\n try:\n img = None\n img_url = None\n icon_img = None\n icon_img_url = None\n\n # Update existing aggregator instance\n # Store icon image\n if 'icon_content' in request.data:\n _req = {\"image\": request.data['icon_content'],\n \"image_type\": \"png\"}\n icon_img_ser = JpegImageContentSerializer(\n data=_req)\n if icon_img_ser.is_valid(raise_exception=True):\n icon_img = icon_img_ser.save()\n icon_img_url = icon_img.get_absolute_url()\n\n # Store image\n if 'image_content' in request.data:\n _req = {\"image\": request.data['image_content'],\n \"image_type\": \"png\"}\n img_ser = JpegImageContentSerializer(\n data=_req)\n if img_ser.is_valid(raise_exception=True):\n img = img_ser.save()\n img_url = img.get_absolute_url()\n\n if 'typespec' in request.data:\n typesepc_ser = MediaAggregateTypeSerializer(\n data=request.data['typespec'], partial=True)\n if typesepc_ser.is_valid(raise_exception=True):\n typespec = typesepc_ser.save()\n\n # Serialize the aggregator object\n serializer = MediaAggregateSerializer(\n data=request.data, partial=True)\n # Check if serializer is valid\n if serializer.is_valid(raise_exception=True):\n srcobj = serializer.update(amenity, serializer.validated_data)\n srcobj.update(image_content=img,\n icon_content=icon_img,\n image_url=img_url,\n icon_image_url=icon_img_url)\n return JSONResponse(serializer.validated_data,\n status=HTTP_200_OK)\n except UserNotAuthorizedException as e:\n print e\n return JSONResponse(str(e),\n status=HTTP_401_UNAUTHORIZED)\n except Exception as e:\n print e\n return JSONResponse(str(e),\n status=HTTP_500_INTERNAL_SERVER_ERROR)", "title": "" }, { "docid": "75ffc190ef9399512ef062e481363384", "score": "0.48339", "text": "def record(running_app, minimal_record):\n s = current_rdm_records.records_service\n draft = s.create(system_identity, minimal_record)\n return s.publish(system_identity, draft.id)", "title": "" }, { "docid": "6375c60662c8effb795e22a2f408c7ce", "score": "0.4804024", "text": "def create(self, name):\n self.options['group_name'] = name \n self.options['action'] = 'group.create'\n return self.call(self.options)", "title": "" }, { "docid": "6f6354a1e4afad238bd4850d346f4f87", "score": "0.47695532", "text": "def __create_resource_group(args):\n\n resource_client = __create_resource_management_client()\n resource_client.resource_groups.create_or_update(\n args.resource_group_name,\n {\"location\": \"westus\"}\n ).result()", "title": "" }, { "docid": "7f9169d1c958001caa58b91c72e37878", "score": "0.47620702", "text": "def update_group(self, GroupName: str = None, GroupARN: str = None, FilterExpression: str = None) -> Dict:\n pass", "title": "" }, { "docid": "ebcd877ffcbb201650c9b033047c0eed", "score": "0.47543424", "text": "def post(self, request, user_id, format=None):\n data = request.data\n right_choices = ['moderator', 'admin']\n action_choices = ['promote', 'demote']\n errors = {}\n\n # validation\n if not data['right'] or not data['right'] in right_choices:\n errors['right'] = ('this field is required and must be one of '\n + 'the following options'\n + ', '.join(right_choices))\n if not data['action'] or not data['action'] in action_choices:\n errors['action'] = ('this field is required and must be one of '\n + 'the following options'\n + ', '.join(action_choices))\n if not User.objects.filter(id=user_id).exists():\n errors['id'] = 'a user with the id #' + user_id + ' does not exist'\n if errors:\n return Response(errors, status=status.HTTP_400_BAD_REQUEST)\n\n # actual behaviour\n user = User.objects.get(id=user_id)\n group = Group.objects.get(name=data['right'])\n action = data['action']\n if action == 'promote':\n user.groups.add(group)\n elif action == 'demote':\n user.groups.remove(group)\n return Response(serializers.UserSerializer(user).data)", "title": "" }, { "docid": "3b77f790354cdda37b0e2e14e619b923", "score": "0.4750311", "text": "def add_group(group):", "title": "" }, { "docid": "724247784f624851f40cc031931fca86", "score": "0.4738862", "text": "def create(self, group):\n self.request.mongo_connection.shinken.contactgroups.insert(\n group.as_dict()\n )", "title": "" }, { "docid": "8522d1ca7b8249b47d75ceb0e9efda1d", "score": "0.47312427", "text": "def resource_group_set(name: str, location: str) -> ResourceGroup:\n command: List[str] = ['az', 'group', 'create', f'--name={name}', f'--location={location}']\n sh.print_command(command)\n process = sh.run_subprocess(command)\n # sh.log_subprocess(LOG, process, debug=ARGS.debug)\n if process.returncode != 0:\n return ResourceGroup()\n # resource_group = ResourceGroup(process.stdout)\n resource_group: ResourceGroup = json_to_dataclass(process.stdout, ResourceGroup)\n resource_group.changed = True\n # LOG.debug(\"resource_group: {resource_group}\")\n return resource_group", "title": "" }, { "docid": "ee2699f2d7f93d60235bc655e09b8ef2", "score": "0.46945423", "text": "def add_research_group(self, obj):\n cursor = self.dbconnect.get_cursor()\n try:\n cursor.execute('INSERT INTO research_group(name, abbreviation, logo_location, description_id, address, '\n 'telephone_number, is_active) VALUES(%s,%s,%s,%s,%s,%s,%s)',\n (obj.name, obj.abbreviation, obj.logo_location, obj.description_id, obj.address,\n obj.telephone_number, obj.is_active))\n self.dbconnect.commit()\n except:\n self.dbconnect.rollback()\n raise", "title": "" }, { "docid": "7496d8f009ee9d9ebe992011d4584abc", "score": "0.46903872", "text": "def modify_group(self, group, effective_path):\n try:\n permissions = self.isi_sdk.NamespaceAcl(\n authoritative='mode',\n group=group)\n self.namespace_api.set_acl(namespace_path=effective_path,\n acl=True,\n namespace_acl=permissions)\n except Exception as e:\n error_msg = self.determine_error(error_obj=e)\n error_message = 'Failed to modify group ' \\\n 'due to error {0}'.format(str(error_msg))\n LOG.error(error_message)\n self.module.fail_json(msg=error_message)", "title": "" }, { "docid": "832972cbec027f7309d896bbc0c9db17", "score": "0.46691564", "text": "async def store(\n self, mediation_invite: MediationInviteRecord\n ) -> MediationInviteRecord:\n current_invite_record = await self.__retrieve_record(self.MEDIATION_INVITE_ID)\n\n if current_invite_record is None:\n await self.__storage.add_record(\n StorageRecord(\n type=self.INVITE_RECORD_CATEGORY,\n id=self.MEDIATION_INVITE_ID,\n value=mediation_invite.to_json(),\n )\n )\n else:\n await self.__storage.update_record(\n current_invite_record,\n mediation_invite.to_json(),\n tags=current_invite_record.tags,\n )\n\n return mediation_invite", "title": "" }, { "docid": "24f559a9562627c9b6f0d791ba9763a3", "score": "0.46667236", "text": "def __try_create(group_data):\n logging.info(f'trying to create group {group_data.get(\"displayName\")}')\n make_request(f'{GRAPH_URL}{RESOURCE_PATH}', 'POST', group_data)\n logging.info(f'group {group_data.get(\"displayName\")} created successfully')", "title": "" }, { "docid": "1fc1608e716e3a5deb4be68ed24d0672", "score": "0.46619722", "text": "async def post_group(\n request: Request,\n group: GroupCreate,\n) -> dict[str, Optional[int]]:\n app = request.app\n try:\n data = await app.db.groups.insert(group)\n return {\"id\": data[\"id\"]}\n except DatabaseIntegrityException as exc:\n raise HTTPException(status_code=400, detail=exc.detail) from exc", "title": "" }, { "docid": "0c6614c850d2d71cccc4efd34eec0660", "score": "0.4658458", "text": "def put(self):\n r = request.get_json()\n rs = Con.set_member_togroup(r['username'], r['groupname'])\n\n return jsonify({'result': rs})", "title": "" }, { "docid": "82e3ef63b6e9813fce1ae8d32fe9a4d1", "score": "0.46552604", "text": "def __add_group(self, tx, name):\n tx.run(\"MERGE (a:Group { name: $name})\", name=name)", "title": "" }, { "docid": "5502aed07204d74929eefba8b7ae8a53", "score": "0.46440285", "text": "def create_group():\n body = request.get_json(force=True)\n group_id = body.get('groupId')\n # check in redis if group_id already exists\n success = chat_service.create_conversation(group_id)\n return_code = 201 if success else 409\n return jsonify({'success': success}), return_code", "title": "" }, { "docid": "3c43a533cd22b094a2af03490a37ece6", "score": "0.46324277", "text": "def api_node_group_assignments_write(request):\n\n au = get_authenticated_user(request)\n\n try:\n payload = request.json_body\n\n node_id = payload['node_id']\n node_group_id = payload['node_group_id']\n\n log.debug('Checking for node_group_assignment node_id={0},node_group_id={1}'.format(node_id, node_group_id))\n\n try:\n nga = DBSession.query(NodeGroupAssignment)\n nga = nga.filter(NodeGroupAssignment.node_id==node_id)\n nga = nga.filter(NodeGroupAssignment.node_group_id==node_group_id)\n nga = nga.one()\n log.info('node_group_assignment already exists')\n return Response(content_type='application/json', status_int=409)\n except NoResultFound:\n try:\n log.debug('Creating new node_group_assignment for node_id={0},node_group_id={1}'.format(node_id, node_group_id))\n utcnow = datetime.utcnow()\n\n nga = NodeGroupAssignment(node_id=node_id,\n node_group_id=node_group_id,\n updated_by=au['user_id'],\n created=utcnow,\n updated=utcnow)\n\n DBSession.add(nga)\n DBSession.flush()\n except Exception as e:\n log.error('Error creating new node_group_assignment node_id={0},node_group_id={1},exception={2}'.format(node_id, node_group_id, e))\n raise\n\n except Exception as e:\n log.error('Error writing to node_group_assignment API={0},exception={1}'.format(request.url, e))\n return Response(str(e), content_type='application/json', status_int=500)", "title": "" }, { "docid": "2c9f2dc74c43a861b9c3a3d4b16a8c40", "score": "0.4621976", "text": "def update(self):\n self.create()", "title": "" }, { "docid": "aa5db294e78b2ea089acd03aa2bc1ab1", "score": "0.4599321", "text": "def multizone_new_media_status(self, group_uuid: str, media_status: MediaStatus):", "title": "" }, { "docid": "326e21173914d97ee451f34443768fe2", "score": "0.45985007", "text": "def added_to_multizone(self, group_uuid: str):", "title": "" }, { "docid": "ceab99b05372454ed056c0aad758ebc7", "score": "0.45918852", "text": "def update_research_group(self, group_name, obj):\n cursor = self.dbconnect.get_cursor()\n try:\n cursor.execute('UPDATE research_group '\n 'SET name = %s, abbreviation = %s, logo_location = %s, description_id = %s, '\n 'address = %s, telephone_number = %s, is_active = %s '\n 'WHERE name=%s',\n (obj.name, obj.abbreviation, obj.logo_location, obj.description_id, obj.address,\n obj.telephone_number, obj.is_active, group_name))\n self.dbconnect.commit()\n except:\n self.dbconnect.rollback()\n raise", "title": "" }, { "docid": "88e6dbc25530a053bc87c3eeda8493be", "score": "0.4584425", "text": "def test_update_group(self):\n pass", "title": "" }, { "docid": "8496c5cf9657ea455b9a863fc1742ae0", "score": "0.45763978", "text": "def save_record_set(self, resource_record_set):\n LOG.info(\"[+] save_record_set: %s\" % str(resource_record_set))\n\n recode_type = resource_record_set.get('Type')\n\n filter_opt = {\n 'HostedZoneId': resource_record_set.get('HostedZoneId'), # uniq Hosted-Zone\n 'Name': resource_record_set.get('Name'),\n 'Type': recode_type,\n }\n\n # CNAME(s)\n if recode_type.upper() == 'CNAME' and 'SetIdentifier' in resource_record_set:\n filter_opt['SetIdentifier'] = resource_record_set.get('SetIdentifier')\n\n return_val = self.mgo_collection.find_one(filter_opt)\n\n # If exists, return a document.\n # If not exists, return None\n\n # If no document, insert a new document\n if not return_val:\n LOG.info(\"insert a new document\")\n self.mgo_collection.insert_one(resource_record_set)\n return\n\n # If a document, check if the document needs to be updated.\n return_val.pop('_id')\n\n # return_val : previous document from mongodb\n # resource_record_set : current document from aws\n self.check_and_alarm(return_val, resource_record_set)\n\n # always update the document\n update_val = {\n \"$set\": resource_record_set\n }\n\n self.mgo_collection.update(filter_opt, update_val)", "title": "" }, { "docid": "08933b3a87b4004970da1dcfae9367f4", "score": "0.45757326", "text": "def perform_create(self, serializer):\n queryset = self.request.user.wantToWatchMediaItem.\\\n all().filter(mdbID=self.request.data['mdbID'])\n if queryset.exists():\n raise ValidationError('Elementet finnes allerede i listen.')\n serializer.save(owner=self.request.user)", "title": "" }, { "docid": "39bcf8eba1f8962796b2cbb5fdf9832e", "score": "0.4573148", "text": "def updateOrCreateRecordsForSurveyGroup(request, *args, **kwargs):\n\n from soc.modules.gsoc.logic.models.grading_record import logic as grading_record_logic\n from soc.modules.gsoc.logic.models.grading_survey_group import logic as survey_group_logic\n from soc.modules.gsoc.logic.models.student_project import logic as student_project_logic\n\n post_dict = request.POST\n\n group_key = post_dict.get('group_key')\n\n if not group_key:\n # invalid task data, log and return OK\n return error_handler.logErrorAndReturnOK(\n 'Invalid updateRecordForSurveyGroup data: %s' % post_dict)\n\n # get the GradingSurveyGroup for the given keyname\n survey_group_entity = survey_group_logic.getFromKeyName(group_key)\n\n if not survey_group_entity:\n # invalid GradingSurveyGroup specified, log and return OK\n return error_handler.logErrorAndReturnOK(\n 'Invalid GradingSurveyGroup specified: %s' % group_key)\n\n # check and retrieve the project_key that has been done last\n if 'project_key' in post_dict:\n project_start_key = post_dict['project_key']\n else:\n project_start_key = None\n\n # get all valid StudentProjects from starting key\n fields = {'program': survey_group_entity.scope,\n 'status': ['accepted', 'failed', 'completed']}\n\n if project_start_key:\n # retrieve the last project that was done\n project_start = student_project_logic.getFromKeyName(project_start_key)\n\n if not project_start:\n # invalid starting project key specified, log and return OK\n return error_handler.logErrorAndReturnOK(\n 'Invalid Student Project Key specified: %s' %(project_start_key))\n\n fields['__key__ >'] = project_start.key()\n\n # get the first batch_size number of StudentProjects\n project_entities = student_project_logic.getForFields(fields,\n limit=DEF_BATCH_SIZE)\n\n # update/create and batch put the new GradingRecords\n grading_record_logic.updateOrCreateRecordsFor(survey_group_entity,\n project_entities)\n\n if len(project_entities) == DEF_BATCH_SIZE:\n # spawn new task starting from the last\n new_project_start = project_entities[DEF_BATCH_SIZE-1].key().id_or_name()\n\n # pass along these params as POST to the new task\n task_params = {'group_key': group_key,\n 'project_key': new_project_start}\n task_url = '/tasks/grading_survey_group/update_records'\n\n new_task = taskqueue.Task(params=task_params, url=task_url)\n new_task.add()\n else:\n # task completed, update timestamp for last update complete\n fields = {'last_update_complete': datetime.datetime.now()}\n survey_group_logic.updateEntityProperties(survey_group_entity, fields)\n\n # task completed, return OK\n return http.HttpResponse('OK')", "title": "" }, { "docid": "a370a7023ec40f28d864f4b2ee676fe5", "score": "0.4558871", "text": "def create(self):\n path = '/projects/%s/groups/' % (self.client.project,)\n info = self.client._connection.api_request(\n method='POST', path=path, data=self._to_dict())\n self._set_properties_from_dict(info)", "title": "" }, { "docid": "e4f53d7405ec7b0b8560c94955725704", "score": "0.45582944", "text": "def manage_saveNotifyGroup(self, notify_group, REQUEST):\r\n self.manage_addNotifyableGroup(notify_group)\r\n\r\n msg = \"%s created.\"%NOTIFYABLEGROUP_METATYPE\r\n url = self._getManagementFormURL(msg)\r\n REQUEST.RESPONSE.redirect(url)", "title": "" }, { "docid": "e8c595eedbe6b6b1020367d936fed52e", "score": "0.45543867", "text": "def add_group(self, name):\n with self.driver.session() as session:\n session.write_transaction(self.__add_group, name)", "title": "" }, { "docid": "c2d7776877e60c8b9f498f40a73bf92c", "score": "0.4551829", "text": "def set_to_group(self, group: Group) -> bool:\n addr = self._bridge._url + f\"/groups/{group.gid}/action\"\n payload = f\"{{\\\"scene\\\": \\\"{self._sid}\\\"}}\"\n return self.api_put(addr, payload)", "title": "" }, { "docid": "03ae76d7b67c333241d9910b0d5ff123", "score": "0.45477793", "text": "def update_share(self, group_id, resource_id, **kwargs):\n properties = {}\n\n for attr in kwargs.keys():\n properties[self._underscore_to_camelcase(attr)] = kwargs[attr]\n\n data = {\n \"properties\": properties\n }\n\n response = self._perform_request(\n url='/um/groups/%s/shares/%s' % (group_id, resource_id),\n method='PUT',\n data=json.dumps(data))\n\n return response", "title": "" }, { "docid": "6558a5c006032ab2d099cd9b101e6949", "score": "0.45417726", "text": "def update_groups(data):\n from .api.events import EventAPI\n\n provider = data.get('Provider')\n identifiers = data.get('Object').get('Identifier')\n event = []\n source_identifier = identifiers.pop()\n for identifier in identifiers:\n payload = {\n \"RelationshipType\": {\n \"Name\": \"IsRelatedTo\",\n \"SubTypeSchema\": \"DataCite\",\n \"SubType\": \"IsIdenticalTo\"\n },\n \"Target\": {\n \"Identifier\": identifier,\n \"Type\": {\n \"Name\": \"unknown\"\n }\n },\n \"LinkProvider\": [\n {\n \"Name\": provider\n }\n ],\n \"Source\": {\n \"Identifier\": source_identifier,\n \"Type\": {\n \"Name\": \"unknown\"\n }\n },\n \"LinkPublicationDate\": str(datetime.now())\n }\n event.append(payload)\n try:\n EventAPI.handle_event(event, no_index=True, delayed=False)\n except ValueError:\n pass\n\n try:\n group = get_group_from_id(\n identifiers[0]['ID'], identifiers[0]['IDScheme'])\n if group:\n group.data.update(data.get('Object'))\n db.session.commit()\n except Exception:\n pass", "title": "" }, { "docid": "8d6b67ab3949a983b29a0c6f876ac75a", "score": "0.454132", "text": "def modify_dbinstance_resource_group(\n self,\n request: gpdb_20160503_models.ModifyDBInstanceResourceGroupRequest,\n ) -> gpdb_20160503_models.ModifyDBInstanceResourceGroupResponse:\n runtime = util_models.RuntimeOptions()\n return self.modify_dbinstance_resource_group_with_options(request, runtime)", "title": "" }, { "docid": "e941c32c51fd8ae16f066195bfe09e72", "score": "0.45361036", "text": "def update(self):\n return self.connection._update_group('UpdateAutoScalingGroup', self)", "title": "" }, { "docid": "7dfe33ebe54a236766ff51f49874f7b4", "score": "0.4534104", "text": "def update(self, instance, validated_data):\n instance.moderator = self.context['request'].user\n instance.approval_at = timezone.now()\n return super().update(instance, validated_data)", "title": "" }, { "docid": "a9ca129b1aed0e6b58313bd084e888e4", "score": "0.45311558", "text": "def Create(iam,groupname: str,tag='/'):\n\t\t\t\treturn iam.resource.Group(groupname).create(Path=AWS.preptag(tag))", "title": "" }, { "docid": "7306f933c072a60b14747df25a28d54a", "score": "0.45270988", "text": "def test_set_group_if_reply_overwrites_groups_in_replies():\n fetcher = _fake_fetcher({'group': 'parent_group'})\n annotation = {\n 'group': 'this should be overwritten',\n 'references': ['parent_id']\n }\n\n transform.set_group_if_reply(annotation, fetcher=fetcher)\n\n assert annotation['group'] == \"parent_group\"", "title": "" }, { "docid": "c6b0fec5fdf2707f1242caceb7e1a117", "score": "0.45116875", "text": "def test_set_group_if_reply_adds_group_to_replies():\n fetcher = _fake_fetcher({'group': 'parent_group'})\n annotation = {'references': ['parent_id']}\n\n transform.set_group_if_reply(annotation, fetcher=fetcher)\n\n assert annotation['group'] == \"parent_group\"", "title": "" }, { "docid": "7fd8fa7d3b9a15cd3eb289843582b4e5", "score": "0.45096436", "text": "def to_modify(pool_details, use_rmcache, use_rfcache, new_name, media_type):\n pool_name = pool_details['name']\n pool_use_rfcache = pool_details['useRfcache']\n pool_use_rmcache = pool_details['useRmcache']\n pool_media_type = pool_details['mediaType']\n modify_params = {}\n\n if new_name is not None and pool_name != new_name:\n modify_params['new_name'] = new_name\n if use_rfcache is not None and pool_use_rfcache != use_rfcache:\n modify_params['use_rfcache'] = use_rfcache\n if use_rmcache is not None and pool_use_rmcache != use_rmcache:\n modify_params['use_rmcache'] = use_rmcache\n if media_type is not None and media_type != pool_media_type:\n modify_params['media_type'] = media_type\n return modify_params", "title": "" }, { "docid": "677ed41b795c3e1c1a31e62d1d258438", "score": "0.45027736", "text": "def save_groupinvitation(self, group_invite):\n\n with GroupInvitationMapper() as mapper:\n mapper.update(group_invite)", "title": "" }, { "docid": "f238af84ad68503b5331c379667c45cd", "score": "0.45025045", "text": "def ad_group_set(name: str) -> AdGroup:\n command: List[str] = ['az', 'ad', 'group', 'create',\n f'--display-name={name}', f'--mail-nickname={name}']\n sh.print_command(command)\n process = sh.run_subprocess(command)\n # sh.log_subprocess(LOG, process, debug=ARGS.debug)\n if process.returncode != 0:\n return AdGroup()\n ad_group: AdGroup = json_to_dataclass(process.stdout, AdGroup)\n ad_group.changed = True\n # LOG.debug(f'ad_group: {ad_group}')\n return ad_group", "title": "" }, { "docid": "7eb49d58813396575b23a4a8b6aea402", "score": "0.4502351", "text": "def delete_at_management_group(\n self,\n management_group_id, # type: str\n remediation_name, # type: str\n **kwargs # type: Any\n ):\n # type: (...) -> Optional[\"_models.Remediation\"]\n cls = kwargs.pop('cls', None) # type: ClsType[Optional[\"_models.Remediation\"]]\n error_map = {\n 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError\n }\n error_map.update(kwargs.pop('error_map', {}))\n management_groups_namespace = \"Microsoft.Management\"\n api_version = \"2019-07-01\"\n accept = \"application/json\"\n\n # Construct URL\n url = self.delete_at_management_group.metadata['url'] # type: ignore\n path_format_arguments = {\n 'managementGroupsNamespace': self._serialize.url(\"management_groups_namespace\", management_groups_namespace, 'str'),\n 'managementGroupId': self._serialize.url(\"management_group_id\", management_group_id, 'str'),\n 'remediationName': self._serialize.url(\"remediation_name\", remediation_name, 'str'),\n }\n url = self._client.format_url(url, **path_format_arguments)\n\n # Construct parameters\n query_parameters = {} # type: Dict[str, Any]\n query_parameters['api-version'] = self._serialize.query(\"api_version\", api_version, 'str')\n\n # Construct headers\n header_parameters = {} # type: Dict[str, Any]\n header_parameters['Accept'] = self._serialize.header(\"accept\", accept, 'str')\n\n request = self._client.delete(url, query_parameters, header_parameters)\n pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)\n response = pipeline_response.http_response\n\n if response.status_code not in [200, 204]:\n map_error(status_code=response.status_code, response=response, error_map=error_map)\n error = self._deserialize(_models.ErrorResponse, response)\n raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)\n\n deserialized = None\n if response.status_code == 200:\n deserialized = self._deserialize('Remediation', pipeline_response)\n\n if cls:\n return cls(pipeline_response, deserialized, {})\n\n return deserialized", "title": "" }, { "docid": "229d1fce9f10def35c5ea1a9874e5426", "score": "0.44997305", "text": "def post(self, project_group):\n\n created_group = project_groups.project_group_create(\n project_group.as_dict())\n\n return wmodels.ProjectGroup.from_db_model(created_group)", "title": "" }, { "docid": "1ac883b646092f6ecb5a8d24585d1aad", "score": "0.44954446", "text": "def post_group(self, invite_id):\n group_id = self._data().get('unique_id')\n\n command = commands.AddGroupAttendeesCommand(\n invite_unique_id=invite_id,\n group_unique_id=group_id,\n user=self.user\n )\n return command.execute()", "title": "" }, { "docid": "c64d5a9fe755aefd4a077fe27912c8fe", "score": "0.44708467", "text": "def add_share(self, group_id, resource_id, **kwargs):\n properties = {}\n\n for attr in kwargs.keys():\n properties[self._underscore_to_camelcase(attr)] = kwargs[attr]\n\n data = {\n \"properties\": properties\n }\n\n response = self._perform_request(\n url='/um/groups/%s/shares/%s' % (group_id, resource_id),\n method='POST',\n data=json.dumps(data))\n\n return response", "title": "" }, { "docid": "d80a0a510b0b04b09060c1885a906fb9", "score": "0.4466036", "text": "def post(self, request, *args, **kwargs):\n\n serializer = UpdateGroupFileRepositoryRightSerializer(data=request.data, context=self.get_serializer_context())\n\n if not serializer.is_valid():\n\n return Response(\n serializer.errors, status=status.HTTP_400_BAD_REQUEST\n )\n\n group_file_repository_right = serializer.validated_data['group_file_repository_right']\n group_file_repository_right.read = serializer.validated_data['read']\n group_file_repository_right.write = serializer.validated_data['write']\n group_file_repository_right.grant = serializer.validated_data['grant']\n group_file_repository_right.save()\n\n return Response(status=status.HTTP_200_OK)", "title": "" }, { "docid": "c250800a12034c2fc1712ace388d908d", "score": "0.446277", "text": "def replace_contributor(self, old, new):\n if not self.is_member(old):\n return False\n\n # Remove unclaimed record for the group\n if self._id in old.unclaimed_records:\n del old.unclaimed_records[self._id]\n old.save()\n\n # For the manager and member Django group attached to the OSFGroup,\n # add the new user to the group, and remove the old. This\n # will give the new user the appropriate permissions to the OSFGroup\n for group_name in self.groups.keys():\n if self.get_group(group_name).user_set.filter(id=old.id).exists():\n self.get_group(group_name).user_set.remove(old)\n self.get_group(group_name).user_set.add(new)\n\n self.update_search()\n return True", "title": "" }, { "docid": "b7332889bf3478f96a06900dd7a89d8e", "score": "0.44563234", "text": "def test_replace_group(self):\n\n replacement_data = dict(\n ug_id=100,\n new_field='test_new_field'\n )\n\n # Replace non-existing user will insert a new group in Database\n replaced = self.mu_group.replace_group(USER_GROUP['ug_name'], replacement_data)\n self.assertTrue(replaced.success)\n\n # Verify that group was inserted\n selected = self.mu_group.get_group_by_name(USER_GROUP['ug_name'])\n self.assertTrue(selected.success)\n self.assertEqual(selected.documents['new_field'], 'test_new_field')\n\n # Replace existing use with USER_GROUP data\n replaced = self.mu_group.replace_group(USER_GROUP['ug_name'], USER_GROUP)\n self.assertTrue(replaced.success)\n # Verify username has changed\n self.assertIsNone(replaced.documents.get('new_field'))", "title": "" }, { "docid": "5ce02f5a973d31ca058f455cdfb72de3", "score": "0.44555146", "text": "def set(isamAppliance, name, group_name, type='embedded_ldap', check_mode=False, force=False):\n new_group = True\n ret_obj = ibmsecurity.isam.base.management_authorization.role.get(isamAppliance, name)\n\n if (ret_obj['data']['groups'] == None):\n ret_obj['data']['groups'] = []\n else:\n for grp in ret_obj['data']['groups']:\n if grp['name'] == group_name:\n if grp['type'] == type:\n if force is False:\n return isamAppliance.create_return_object()\n new_group = False\n else: # Replace group with new type\n ret_obj['data']['groups'].remove(grp)\n break\n\n if new_group is True:\n ret_obj['data']['groups'].append({'name': group_name, 'type': type})\n\n if check_mode is True:\n return isamAppliance.create_return_object(changed=True)\n else:\n return isamAppliance.invoke_put(\n \"Add group to management authorization role\",\n \"/authorization/roles/{0}/v1\".format(name), ret_obj['data'])", "title": "" }, { "docid": "19b45d436f130aaf20763e5baa9007c4", "score": "0.44455317", "text": "def test_update_group(self):\n\n update_dict = dict(\n ug_name='test_diff_name',\n ug_id=100\n )\n\n # Update non-existing document\n updated = self.mu_group.update_group(group_name=USER_GROUP['ug_name'], data=update_dict)\n self.assertTrue(updated.success)\n self.assertEqual(updated.count, 0)\n\n # Inserting group into Database\n inserted = self.mu_group.insert_group(USER_GROUP)\n self.assertTrue(inserted.success)\n\n # Update existing group\n updated = self.mu_group.update_group(group_name=USER_GROUP['ug_name'], data=update_dict)\n self.assertTrue(updated.success)\n self.assertEqual(updated.count, 1)\n # Verify that data was update\n selected = self.mu_group.get_group_by_name(update_dict['ug_name'])\n self.assertTrue(selected.success)\n self.assertEqual(selected.documents['ug_id'], update_dict['ug_id'])", "title": "" }, { "docid": "eecee4504245a1c51970547b8d19e143", "score": "0.4444343", "text": "def test_is_mutable_by_with_change_group_perm(self):\n user = self.create_user(perms=[\n ('reviews', 'change_group'),\n ])\n group = self.create_review_group()\n\n self.assertTrue(group.is_mutable_by(user))", "title": "" }, { "docid": "36ecd4a12f554834e61c942104ae6aa1", "score": "0.44314274", "text": "def perform_create(self, serializer):\n queryset = self.request.user.haveWatchedMediaItem.\\\n all().filter(mdbID=self.request.data['mdbID'])\n if queryset.exists():\n raise ValidationError('Elementet finnes allerede i listen.')\n serializer.save(owner=self.request.user)", "title": "" }, { "docid": "cef7ec97a9ec73675dfd624b721a24e0", "score": "0.4423399", "text": "def test_remove_group_and_fail_publish(self):\n\t\tself.test_add_group()\n\t\tdraft = ReviewRequestDraft.create(self.review_request)\n\t\tdraft.target_groups.remove(self.group)\n\t\tself._check_counters(total_outgoing=1, pending_outgoing=1, total_incoming=1, group_incoming=1, starred_public=1)\n\t\tself.spy_on(ReviewRequestDraft.publish, owner=ReviewRequestDraft, call_fake=self._raise_publish_error)\n\t\twith self.assertRaises(NotModifiedError):\n\t\t\tself.review_request.publish(self.user)\n\t\tself._check_counters(total_outgoing=1, pending_outgoing=1, total_incoming=1, group_incoming=1, starred_public=1)", "title": "" }, { "docid": "ca42a09b4a11dde8f369abc163b81bd2", "score": "0.4422585", "text": "def group_membership_onaccept(form):\n\n if isinstance(form, Row):\n try:\n record_id = form.id\n except AttributeError:\n record_id = None\n else:\n record_id = get_form_record_id(form)\n if not record_id:\n return\n\n db = current.db\n mtable = current.s3db.org_group_membership\n\n record = db(mtable.id == record_id).select(mtable.id,\n mtable.group_id,\n mtable.organisation_id,\n mtable.deleted,\n mtable.deleted_fk,\n limitby = (0, 1),\n ).first()\n if not record:\n return\n\n organisation_id = record.organisation_id\n group_id = record.group_id\n if organisation_id and group_id and not record.deleted:\n query = (mtable.organisation_id == organisation_id) & \\\n (mtable.group_id == group_id) & \\\n (mtable.id != record.id) & \\\n (mtable.deleted != True)\n deleted_fk = {\"organisation_id\": organisation_id,\n \"group_id\": group_id,\n }\n db(query).update(deleted = True,\n organisation_id = None,\n group_id = None,\n deleted_fk = json.dumps(deleted_fk),\n )\n\n org_update_affiliations(\"org_group_membership\", record)", "title": "" }, { "docid": "d48888101171fdf011886af950be811c", "score": "0.4416711", "text": "def create(self, group):\n self.request.mongo_connection.shinken.hostgroups.insert(\n group.as_dict()\n )", "title": "" }, { "docid": "8b68342a6e42a368fe284c38ab9e66a5", "score": "0.44156888", "text": "def store_group(group_data):\n group_id = group_data.get('id')\n if not Group.objects.filter(id=group_id).exists():\n group = Group()\n group.id = group_data.get('id')\n group.name = group_data.get('name')\n group.description = group_data.get('description')\n group.updated_time = group_data.get('updated_time')\n group.privacy = group_data.get('privacy')\n group.save()\n if 'owner' in group_data:\n group.owner = store_user(group_data.get('owner').get('id'), group_data.get('owner').get('name'), group)\n group.save()\n logger.info('Saved group: %s', group)\n else:\n group = Group.objects.filter(id=group_id)[0]\n group.name = group_data.get('name')\n group.description = group_data.get('description')\n group.updated_time = group_data.get('updated_time')\n group.privacy = group_data.get('privacy')\n if 'owner' in group_data:\n group.owner = store_user(group_data.get('owner').get('id'), group_data.get('owner').get('name'), group)\n group.save()\n logger.info('Update group updated_time: %s', group.id)\n return group", "title": "" }, { "docid": "13345edf32c50c2c6e9f26e225a50116", "score": "0.4412525", "text": "def group_id(self, group_id, persister=None):\n persister.exec_stmt(Shards.UPDATE_SHARD,\n {\"params\":(group_id, self.__shard_id)})\n self.__group_id = group_id", "title": "" }, { "docid": "11e1892d0a31a850305a65730bfa2953", "score": "0.44080812", "text": "def addMedToPlan(medicationName, dose, timeSlot):\n if not medicationName or not dose or not timeSlot:\n return question(render_template(\"ask_for_repeat\"))\n else:\n userId = context.System.device.deviceId\n userData = getUserData(userId)\n sanitizer = AlexaInputSanitizer()\n medicationName = sanitizer.sanitizeInputs(medicationName)\n timeSlot = sanitizer.sanitizeInputs(timeSlot)\n medicationData = userData.getMedication(timeSlot, medicationName)\n if not medicationData:\n medicationData = {\n \"name\": medicationName,\n \"taken\": None,\n \"dose\": dose\n }\n else:\n medicationData['dose'] += dose\n userData.updateMedication(timeSlot, medicationName, medicationData)\n statementText = render_template('dosage_added', dosenumber=dose, dosestring=getDoseString(dose), medicationname=medicationName, timeslot=timeSlot)\n return statement(statementText)", "title": "" }, { "docid": "c9bccb9d0d8ab56a5cdd3e5d0590aa91", "score": "0.4407952", "text": "def test_post_tg_g_role_admin(self, g_rname):\n with factories.single_commit():\n self.setup_helper.setup_workflow((g_rname,))\n\n g_person = self.setup_helper.get_person(g_rname,\n ac_roles.workflow.ADMIN_NAME)\n self.api_helper.set_user(g_person)\n\n workflow = all_models.Workflow.query.one()\n\n data = workflow_api.get_task_group_post_dict(workflow, g_person)\n response = self.api_helper.post(all_models.TaskGroup, data)\n self.assertEqual(response.status_code, 201)", "title": "" }, { "docid": "27fe5c097fb4c81f62eeac349e9b66fc", "score": "0.44068104", "text": "def __try_update(group_data):\n group_id = group_data['id'] if 'id' in group_data else None\n\n if not group_id:\n raise Exception(\"Couldn't find id for group\")\n\n logging.info(f'trying to update group {group_data.get(\"displayName\")}')\n make_request(f'{GRAPH_URL}{RESOURCE_PATH}{group_id}', 'PATCH', group_data)\n logging.info(f'group {group_data.get(\"displayName\")} updated successfully')", "title": "" }, { "docid": "ca534c7dddd2ea5f87053a70c1133292", "score": "0.4403035", "text": "def Update(name, **kwargs):\n if len(kwargs) == 0:\n return\n sqlBuilder = \"UPDATE ren_group SET \"\n kBuilder = \"\"\n for k in kwargs:\n kBuilder += \"%s = %s, \" % (k, kwargs[k])\n kBuilder = kBuilder[0:len(kBuilder) - 2]\n sqlBuilder += kBuilder\n sqlBuilder += \" WHERE name = '%s'\" % name\n GroupModel._persistDAO.ExecuteSQL(sqlBuilder, needRet=False)", "title": "" }, { "docid": "51bddc8e90d8502225b60fc0c4fcebfd", "score": "0.43958947", "text": "def cancel_at_resource(\n self,\n resource_id, # type: str\n remediation_name, # type: str\n **kwargs # type: Any\n ):\n # type: (...) -> \"_models.Remediation\"\n cls = kwargs.pop('cls', None) # type: ClsType[\"_models.Remediation\"]\n error_map = {\n 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError\n }\n error_map.update(kwargs.pop('error_map', {}))\n api_version = \"2019-07-01\"\n accept = \"application/json\"\n\n # Construct URL\n url = self.cancel_at_resource.metadata['url'] # type: ignore\n path_format_arguments = {\n 'resourceId': self._serialize.url(\"resource_id\", resource_id, 'str', skip_quote=True),\n 'remediationName': self._serialize.url(\"remediation_name\", remediation_name, 'str'),\n }\n url = self._client.format_url(url, **path_format_arguments)\n\n # Construct parameters\n query_parameters = {} # type: Dict[str, Any]\n query_parameters['api-version'] = self._serialize.query(\"api_version\", api_version, 'str')\n\n # Construct headers\n header_parameters = {} # type: Dict[str, Any]\n header_parameters['Accept'] = self._serialize.header(\"accept\", accept, 'str')\n\n request = self._client.post(url, query_parameters, header_parameters)\n pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)\n response = pipeline_response.http_response\n\n if response.status_code not in [200]:\n map_error(status_code=response.status_code, response=response, error_map=error_map)\n error = self._deserialize(_models.ErrorResponse, response)\n raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)\n\n deserialized = self._deserialize('Remediation', pipeline_response)\n\n if cls:\n return cls(pipeline_response, deserialized, {})\n\n return deserialized", "title": "" }, { "docid": "582778371b7a506f95fb5da1fc4bc498", "score": "0.43926808", "text": "def put(self, request, *args, **kwargs):\n\n serializer = CreateGroupFileRepositoryRightSerializer(data=request.data, context=self.get_serializer_context())\n\n if not serializer.is_valid():\n\n return Response(\n serializer.errors, status=status.HTTP_400_BAD_REQUEST\n )\n\n group_file_repository_right_id = Group_File_Repository_Right.objects.create(\n group_id=serializer.validated_data['group_id'],\n file_repository_id=serializer.validated_data['file_repository_id'],\n read=serializer.validated_data['read'],\n write=serializer.validated_data['write'],\n grant=serializer.validated_data['grant']\n )\n\n return Response({'group_file_repository_right_id': group_file_repository_right_id.id}, status=status.HTTP_201_CREATED)", "title": "" }, { "docid": "fbf6f95a5ec2112e67e6aa07655e4635", "score": "0.43893638", "text": "def perform_update(self, serializer):\n\n user = serializer.save()\n _update_group_memberships(user)", "title": "" }, { "docid": "c133f94e41cfa048a1882072d2cecf6a", "score": "0.43881246", "text": "def UpdateGroup(self, request, context):\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details(\"Method not implemented!\")\n raise NotImplementedError(\"Method not implemented!\")", "title": "" }, { "docid": "fb0763dcf961ead45a4c0c325c105a0a", "score": "0.43809637", "text": "def test_remove_group(self):\n\t\tself.test_add_group()\n\t\tdraft = ReviewRequestDraft.create(self.review_request)\n\t\tdraft.target_groups.remove(self.group)\n\t\tdraft.target_people = [self.user]\n\t\tself._check_counters(total_outgoing=1, pending_outgoing=1, total_incoming=1, direct_incoming=0, group_incoming=1, starred_public=1)\n\t\tself.review_request.publish(self.user)\n\t\tself._check_counters(total_outgoing=1, pending_outgoing=1, direct_incoming=1, total_incoming=1, starred_public=1)", "title": "" }, { "docid": "6f55f37140974e44210836b9d639ccf4", "score": "0.4380298", "text": "def update_resource_mgr(self, _id, data):\n response, body = self.http_client.json_request(\n url=\"v2/resource_mgrs/%s\" % _id,\n method=\"PUT\",\n body=data)\n return response, body", "title": "" }, { "docid": "43b2d91ad2e6dc8f58e5ac7f817fa38c", "score": "0.43741304", "text": "def upsert_group(datadict, ckanapi, debug=False):\n print(\"[upsert_group] Upserting organisation {0}, id {1}\".format(\n datadict[\"title\"], datadict[\"name\"]))\n if debug:\n print(\"[upsert_group] Input:\\n{0}\".format(str(datadict)))\n\n try:\n org = ckanapi.action.group_show(id=datadict[\"name\"])\n print(\"[upsert_group] Group exists, updating...\")\n org = ckanapi.action.group_update(id=datadict[\"name\"], **datadict)\n print(\"[upsert_group] Updated {0}\".format(datadict[\"title\"]))\n\n except:\n print(\"[upsert_group] Group not found, inserting...\")\n org = ckanapi.action.group_create(**datadict)\n print(\"[upsert_group] Inserted {0}\".format(datadict[\"title\"]))\n if org:\n return org", "title": "" }, { "docid": "9f4737255a1319b8eddc886a7f5b98bd", "score": "0.43716148", "text": "def grant_role(request):\n\n # if has_permission(request.user, \"Manage sharing\"):\n name = request.GET.get('name')\n role = Role.objects.get(name=request.GET.get('role'))\n resource = Resource.objects.get(global_id=request.GET.get('global_id'))\n\n try:\n principal = User.objects.get(username=name)\n except ObjectDoesNotExist, e:\n principal = Group.objects.get(name=name)\n\n # TODO ADD GLOBAL ROLE ACCORDING TO RESOURCE NAME!!!\n try:\n # look for a group with the dataset name\n group_name = get_resource_global_group_name(resource, role)\n group = Group.objects.get(name=group_name)\n group.user_set.add(principal)\n group.save()\n\n except ObjectDoesNotExist, e:\n # global_role, created = Role.objects.get_or_create(name=\"%s_%s\" % (resource.globa_id, role.name))\n # add_role(principal, global_role)\n pass\n\n # grant local role to the user\n add_local_role(resource, principal, role)\n\n # change request state if exists\n try:\n resource_request = ResourceRequest.objects.get(requestor=principal, resource=resource)\n if is_request_pending(resource_request):\n do_transition(resource_request, request_accept_transition, request.user)\n\n # alert requestor\n alert_user_by_email(\n mail_from='VPH-Share Webmaster <[email protected]>',\n mail_to='%s %s <%s>' % (principal.first_name, principal.last_name, principal.email),\n subject='[VPH-Share] Your request for sharing has been accepted',\n mail_template='request_for_sharing_accepted',\n dictionary={\n 'message': request.GET.get('requestmessage', ''),\n 'resource': resource,\n 'requestor': principal\n }\n )\n\n except ObjectDoesNotExist, e:\n pass\n except Exception, e:\n pass\n\n response_body = json.dumps({\"status\": \"OK\", \"message\": \"Role granted correctly\", \"alertclass\": \"alert-success\"})\n response = HttpResponse(content=response_body, content_type='application/json')\n return response", "title": "" }, { "docid": "4784ec9b74330f5aebb08103563713a6", "score": "0.43692994", "text": "def updated(self, group, **payload):\n pass", "title": "" }, { "docid": "da2772a7bfc95fe2f493b3245ec2f58e", "score": "0.43685713", "text": "def modify_dbinstance_resource_group_with_options(\n self,\n request: gpdb_20160503_models.ModifyDBInstanceResourceGroupRequest,\n runtime: util_models.RuntimeOptions,\n ) -> gpdb_20160503_models.ModifyDBInstanceResourceGroupResponse:\n UtilClient.validate_model(request)\n query = {}\n if not UtilClient.is_unset(request.dbinstance_id):\n query['DBInstanceId'] = request.dbinstance_id\n if not UtilClient.is_unset(request.new_resource_group_id):\n query['NewResourceGroupId'] = request.new_resource_group_id\n if not UtilClient.is_unset(request.owner_account):\n query['OwnerAccount'] = request.owner_account\n if not UtilClient.is_unset(request.owner_id):\n query['OwnerId'] = request.owner_id\n if not UtilClient.is_unset(request.resource_group_id):\n query['ResourceGroupId'] = request.resource_group_id\n if not UtilClient.is_unset(request.resource_owner_account):\n query['ResourceOwnerAccount'] = request.resource_owner_account\n if not UtilClient.is_unset(request.resource_owner_id):\n query['ResourceOwnerId'] = request.resource_owner_id\n req = open_api_models.OpenApiRequest(\n query=OpenApiUtilClient.query(query)\n )\n params = open_api_models.Params(\n action='ModifyDBInstanceResourceGroup',\n version='2016-05-03',\n protocol='HTTPS',\n pathname='/',\n method='POST',\n auth_type='AK',\n style='RPC',\n req_body_type='formData',\n body_type='json'\n )\n return TeaCore.from_map(\n gpdb_20160503_models.ModifyDBInstanceResourceGroupResponse(),\n self.call_api(params, req, runtime)\n )", "title": "" }, { "docid": "2a8ae64dfdd51d546fcd6766084fc97e", "score": "0.43674698", "text": "def put(self):\n parser = restful.reqparse.RequestParser()\n parser.add_argument(\"name\", type=str, required=True)\n args = parser.parse_args()\n\n user = auth(session, required=True)\n\n if not user.can(\"modify_usergroup\"):\n return {}, 403\n\n group = UserGroup(name=args.name)\n\n db.session.add(group)\n db.session.commit()\n\n return group.jsonify()", "title": "" }, { "docid": "9ff3d05de6e49c6e839d2295d732a49c", "score": "0.43648785", "text": "def create_gras(GRAs, restore_purged, restore_deleted, verbose):\n if verbose:\n print(\"\\n ####### GroupRoleAssociation #######\")\n for gra in GRAs:\n # check if this gra already exists\n gra_e = sa_session.query(GroupRoleAssociation).filter(Role.name == \\\n gra['role__name']).filter(Group.name == gra['group__name']).count()\n if gra_e == 0:\n try:\n the_group = sa_session.query(Group).filter_by(name=\\\n gra['group__name']).one()\n except (MultipleResultsFound, NoResultFound) as e:\n if verbose:\n print(\"You have an error when trying to retrieving\"+\\\n \" the group of this GroupRoleAssociation (%s)\" %(e))\n continue\n try:\n the_role = sa_session.query(Role).filter_by(name=\\\n gra['role__name']).one()\n except (MultipleResultsFound, NoResultFound) as e:\n if verbose:\n print(\"You have an error when trying to retrieving \"+\\\n \"the role of this GroupRoleAssociation (%s)\" %(e))\n continue\n new_gra = GroupRoleAssociation(the_group, the_role)\n sa_session.add(new_gra)\n sa_session.flush()\n the_group = \"\"\n the_role = \"\"\n else:\n if verbose:\n print(\"This GroupRoleAssociation already exists \"+\\\n \"group(%s),role(%s) !\" %(gra['group__name'], \\\n gra['role__name']))", "title": "" }, { "docid": "038b2ef9d688e511d2edc22624a91c7f", "score": "0.43643647", "text": "def patch(self, request, *args, **kwargs):\n\t\tinstance = self.get_object()\n\t\tif(instance.admin.pk == request.user.pk):\n\t\t\tserializer = self.get_serializer(instance)\n\t\t\tsuper(ManageGroup, self).patch(request, args, kwargs)\n\t\t\tdata = serializer.data\n\t\t\tresponse = {\"status_code\": status.HTTP_200_OK,\n\t\t\t \"message\": \"Successfully updated\",\n\t\t\t\t\t\t\"result\": data}\n\t\t\treturn Response(response)\n\t\telse:\n\t\t\tresponse = {\"status_code\": status.HTTP_200_OK,\n\t\t\t \"message\": \"You are not admin of the group\"\n\t\t\t\t\t\t}\n\t\t\treturn Response(response)", "title": "" }, { "docid": "67b11be46db0e173730bd287162495c5", "score": "0.43633956", "text": "async def modify_dbinstance_resource_group_async(\n self,\n request: gpdb_20160503_models.ModifyDBInstanceResourceGroupRequest,\n ) -> gpdb_20160503_models.ModifyDBInstanceResourceGroupResponse:\n runtime = util_models.RuntimeOptions()\n return await self.modify_dbinstance_resource_group_with_options_async(request, runtime)", "title": "" }, { "docid": "8bfdeb7030ecee716a9d09f8ed7d21ae", "score": "0.43610483", "text": "def add_resource_mgr(self, data):\n response, body = self.http_client.json_request(\n url=\"v2/resource_mgrs/\",\n method=\"POST\",\n body=data)\n return response, body", "title": "" }, { "docid": "d8300344eeefe6a9c34736e6364dc46a", "score": "0.4360581", "text": "def update_group(self, group_id, group):\n raise exception.NotImplemented() # pragma: no cover", "title": "" }, { "docid": "b3ac3293fe5310e3ef0a62743b12c41f", "score": "0.435043", "text": "def declare_round_endowment(self, resource, productivity, product, command='default_resource', group='all'):\n productivity = str(productivity)\n if command not in self._resource_commands:\n self._resource_commands[command] = []\n\n if command in self._resource_command_group:\n if self._resource_command_group[command] != group:\n raise SystemExit('Different groups assigned to the same command')\n else:\n self._resource_command_group[command] = group\n self._resource_commands[command].append([resource, productivity, product])", "title": "" }, { "docid": "7ebe507cd6f9e2b9c84b1132b29d46c7", "score": "0.43464014", "text": "def add_moderator(self, moderator_name, channel_name):\n channel = self.get_channel(channel_name)\n with request_name(\"/r/[channel_name]/about/moderators/?raw_json=1\"):\n mod_doesnt_exist = moderator_name not in channel.moderator()\n if mod_doesnt_exist:\n with request_name(\"/r/[channel_name]/api/friend/?raw_json=1\"):\n channel.moderator.add(moderator_name)\n api = Api(FakeUser(moderator_name))\n with request_name(\"/r/[channel_name]/api/accept_moderator_invite?raw_json=1\"):\n api.accept_invite(channel_name)\n return Redditor(self.reddit, name=moderator_name)", "title": "" }, { "docid": "8390e6b935a5b2e16bdfc91bbdc9fccd", "score": "0.43414533", "text": "def patch(self, request, *args, **kwargs):\n\n\t\tserializer = GroupSerializerOnlyUsers(data=request.data)\n\t\tserializer.is_valid(raise_exception=True)\n\t\tgroup_pk = int(self.kwargs[\"group_pk\"])\n\t\tgroup = Group.objects.get(id=group_pk)\n\n\t\tif serializer.is_valid():\n\n\t\t\tgroup.users.add( serializer.data[\"users\"][0] )\n\t\t\t#group.users.add(request.user)\n\t\t\t\"\"\"\n\t\t\tProbleme deja vu mais non résolu\n\t\t\tla récupération d'un tableau marche depuis insomnia\n\t\t\tmais pas deouis le front\n\t\t\t---\n\t\t\tusers = serializer.validated_data['users']\n\t\t\tfor user in users:\n\t\t\t\tif not group.users.filter(id=user.id).exists():\n\t\t\t\t\tgroup.users.add(user) \"\"\"\n\n\t\t\tresponse = {\"status_code\": status.HTTP_200_OK,\n\t\t\t\t\t\t\"message\": \"Successfully updated\"}\n\n\t\t\treturn Response(response)", "title": "" }, { "docid": "9c145fb26f2095f77802660065e5fbc3", "score": "0.43335027", "text": "def update(self, resource):", "title": "" } ]
3c04d8af50b3919f9e97e929008fb5ff
Returns the noise covariance for the motion model. Assumes a diagonal structure for now.
[ { "docid": "b2de8502e03abc6778161866151c328b", "score": "0.55802447", "text": "def get_motion_covar(self, dt):\n return self.motion_covar", "title": "" } ]
[ { "docid": "52217a99a1ba06b728716177e5ef9ae1", "score": "0.72257763", "text": "def getNoiseCov2D(self):\n noise_cov_mat = np.array([\n [self.forward_velocity_noise_std ** 2., 0., 0.],\n [0., self.forward_velocity_noise_std ** 2., 0.],\n [0., 0., self.yaw_rate_noise_std ** 2.]\n ])\n return noise_cov_mat", "title": "" }, { "docid": "a55df85719601c0ab20f8586093e4ef9", "score": "0.7050796", "text": "def covariances(self):\n # ngauss x 2 x 2\n # this has no derivatives, since the radii are fixed.\n return (self.radii**2)[:, None, None] * np.eye(2)", "title": "" }, { "docid": "a55df85719601c0ab20f8586093e4ef9", "score": "0.7050796", "text": "def covariances(self):\n # ngauss x 2 x 2\n # this has no derivatives, since the radii are fixed.\n return (self.radii**2)[:, None, None] * np.eye(2)", "title": "" }, { "docid": "f05a83adbc3b206a7df8be30f70c3ae8", "score": "0.68033445", "text": "def covariances(self):\n # ngauss x 2 x 2\n # this has no derivatives, since the radii are fixed.\n return np.zeros([1, 2, 2])", "title": "" }, { "docid": "21606fcba4e975b90b305553b61ad700", "score": "0.6796326", "text": "def conditional_cov(self):\n return np.squeeze(self.Sigmas[\"Sigma_c\"])", "title": "" }, { "docid": "22b7a400676ab86b28d8ee9944dc9e70", "score": "0.6767795", "text": "def covariance_matrix(self):\n return self._cov_matrix", "title": "" }, { "docid": "84b18623e9c25566069077db20b5f5b5", "score": "0.6514761", "text": "def covariance_matrix(x):\n return np.cov(x)", "title": "" }, { "docid": "54249978dd447267ec1105d617a4b2a0", "score": "0.6502479", "text": "def MVN_Denoise(Y, mvn_model, noise_std):\r\n\r\n # Weiner formula for denosing the images\r\n return weiner_formula(Y, mvn_model.cov, mvn_model.mean, noise_std)", "title": "" }, { "docid": "e06d53c5fcd29934501f6fb9b798a267", "score": "0.6494553", "text": "def _covariance_matrix(self):\n self.covariance = np.cov(self.deviation, rowvar=0)", "title": "" }, { "docid": "85e6d6d8f242fd4b289adb3bb5dcc214", "score": "0.646031", "text": "def covariance(self) -> float:\n return self.__covariance", "title": "" }, { "docid": "f828b8424b0bb2b0821ea48d6cfd5b50", "score": "0.6426304", "text": "def covariance(x, y):\n\n _data_check(x)\n _data_check(y)\n m_x = mean(x)\n m_y = mean(y)\n dev_x = [i - m_x for i in x]\n dev_y = [i - m_y for i in y]\n\n return round((_dot(dev_x, dev_y) / len(x)), 2)", "title": "" }, { "docid": "f392ac1f0711609c7ad197a4e310889a", "score": "0.64231825", "text": "def __cov(self, X):\n mean = np.mean(X, axis=1, keepdims=True)\n m = X - mean\n\n return (m @ m.T) / (X.shape[1] - 1)", "title": "" }, { "docid": "e070be400a267831f66ec00ba219a0b3", "score": "0.6379443", "text": "def noise_estimation(self):\n return estimate_sigma(\n self.noisy_image,\n multichannel=True,\n average_sigmas=True\n )", "title": "" }, { "docid": "a1a422dcaf490f7356260b4716d5c7f2", "score": "0.6351703", "text": "def get_cov(self):\n\n if self._cov is not None:\n return self._cov\n\n names = ['ra', 'dec', 'parallax', 'pmra', 'pmdec']\n\n C = np.zeros((6,6))\n\n # pre-load the diagonal\n for i,name in enumerate(names):\n full_name = \"{}_error\".format(name)\n C[i,i] = self._data[full_name]**2\n\n for i,name1 in enumerate(names):\n for j,name2 in enumerate(names):\n if j <= i:\n continue\n full_name = \"{}_{}_corr\".format(name1, name2)\n C[i,j] = self._data[full_name] * np.sqrt(C[i,i]*C[j,j])\n C[j,i] = self._data[full_name] * np.sqrt(C[i,i]*C[j,j])\n\n if self._rv_err is not None:\n C[5,5] = self._rv_err**2\n\n self._cov = C\n return self._cov", "title": "" }, { "docid": "3811729529468e254dbbc3544b134d6a", "score": "0.6337999", "text": "def var_cov_matrix(self):\n return self._var_cov", "title": "" }, { "docid": "eaeb7c38b982d0f2a992c376b7fe78d3", "score": "0.62638545", "text": "def covariance(self, mean=None, smooth=None, **kwargs):\n pass", "title": "" }, { "docid": "eaeb7c38b982d0f2a992c376b7fe78d3", "score": "0.62638545", "text": "def covariance(self, mean=None, smooth=None, **kwargs):\n pass", "title": "" }, { "docid": "44f6d80a986e834f56c34429314a4ead", "score": "0.62554723", "text": "def cov(self, model_X, model_Y=None):\n raise NotImplementedError()", "title": "" }, { "docid": "8aadd024ab134f8117fa2dc2d6f44b4b", "score": "0.62308145", "text": "def transition_covariance(self):\n\n beta, sigma = self.beta, self.sigma\n return sm.simplify(\n sm.integrate(sm.exp(-beta * (t - tau)) @ sigma @ sigma.T @ sm.exp(-beta * (t - tau)).T, (tau, 0, t)))", "title": "" }, { "docid": "7b17bd8edb8be63dd1e1fe807e6dec75", "score": "0.62270766", "text": "def get_noise_freq_domain_CovarMatrix( comatrix , df , inittime , parityN , seed='none' , N_previous_draws=0 ) :\n if len( comatrix.shape ) != 3 :\n raise InputError , 'Input Covariance matrices must be a 3-D numpy array!'\n if comatrix.shape[0] != comatrix.shape[1] :\n raise InputError , 'Covariance matrix must be square at each frequency!'\n\n Nts , Nf = comatrix.shape[0] , comatrix.shape[2]\n\n if parityN == 'Odd' :\n N = 2 * Nf + 1\n elif parityN == 'Even' :\n N = 2 * ( Nf + 1 ) \n else :\n raise InputError , \"parityN must be either 'Odd' or 'Even'!\"\n stime = 1 / ( N*df )\n t = inittime + stime * np.arange( N )\n\n if seed == 'none' :\n print 'Not setting the seed for np.random.standard_normal()'\n pass\n elif seed == 'random' :\n np.random.seed( None )\n else :\n np.random.seed( int( seed ) )\n print N_previous_draws\n np.random.standard_normal( N_previous_draws ) ;\n\n zs = np.array( [ ( np.random.standard_normal((Nf,)) + 1j * np.random.standard_normal((Nf,)) ) / np.sqrt(2)\n for i in range( Nts ) ] )\n\n ntilde_p = np.zeros( ( Nts , Nf ) , dtype=complex )\n for k in range( Nf ) :\n C = comatrix[ :,:,k ]\n if not np.allclose( C , np.conj( np.transpose( C ) ) ) :\n print \"Covariance matrix NOT Hermitian! Unphysical.\" \n w , V = sp_linalg.eigh( C )\n for m in range( w.shape[0] ) :\n w[m] = np.real( w[m] )\n if np.abs(w[m]) / np.max(w) < 1e-10 :\n w[m] = 0\n if w[m] < 0 :\n print 'Negative eigenvalue! Simulating unpysical signal...'\n\n ntilde_p[ :,k ] = np.conj( np.sqrt( N / (2*stime) ) * np.dot( V , np.dot( np.sqrt( np.diag( w ) ) , zs[ :,k ] ) ) )\n \n zerofill = np.zeros( ( Nts , 1 ) )\n if N % 2 == 0 :\n ntilde = np.concatenate( ( zerofill , ntilde_p , zerofill , np.conj(np.fliplr(ntilde_p)) ) , axis = 1 )\n else :\n ntilde = np.concatenate( ( zerofill , ntilde_p , np.conj(np.fliplr(ntilde_p)) ) , axis = 1 )\n n = np.real( sp.ifft( ntilde , axis = 1 ) )\n return t , n", "title": "" }, { "docid": "9d736a69beea6cdc506bfb901236055f", "score": "0.6216853", "text": "def covariance_theta(self) -> np.ndarray:\n return utils.solve_covariance(self.theta.get_moments())", "title": "" }, { "docid": "8856c059db1cc46876a6f18a1e865981", "score": "0.62103933", "text": "def gen_covariance(self):\n h = self._tp_corr_length\n pres_prof = (self.pressure_profile)\n\n return np.exp(-1.0 * np.abs(np.log(pres_prof[:, None] /\n pres_prof[None, :])) / h)", "title": "" }, { "docid": "c4f81d786a08d3fc8d4534a935de5cf4", "score": "0.62067014", "text": "def cov(self, data, rowvar=False):\n x = data.detach().clone()\n if x.dim() > 2:\n raise ValueError('data has more than 2 dimensions')\n if x.dim() < 2:\n x = x.view(1, -1)\n if not rowvar and x.size(0) != 1:\n x = x.t()\n fact = 1.0 / (x.size(1) - 1)\n x -= torch.mean(x, dim=1, keepdim=True)\n return fact * x.matmul(x.t()).squeeze()", "title": "" }, { "docid": "2c42586c74e0ff573392aad9c1929cdd", "score": "0.62018263", "text": "def get_noise(self):\n return np.diagonal(self.P)", "title": "" }, { "docid": "e9d37927f91c2da9f257bc920fb17828", "score": "0.61688215", "text": "def covariance(self):\n assert self.solved > 0, \"calcResiduals requires the equations to be solved\"\n if self.solved == 1:\n self.N = linalg.inv(self.N)\n self.solved = 2\n return self.N", "title": "" }, { "docid": "7ab130439ef016b4606161c6630ac4f4", "score": "0.61498225", "text": "def covariance(jac):\r\n\r\n # Do Moore-Penrose inverse discarding zero singular values.\r\n U, s, VT = np.linalg.svd(jac, full_matrices=False)\r\n threshold = np.finfo(float).eps * max(jac.shape) * s[0]\r\n s = s[s > threshold]\r\n VT = VT[:s.size]\r\n cov = np.dot(VT.T / s ** 2, VT)\r\n\r\n # Alternative method found, but assumes the residuals are small\r\n #cov = np.linalg.inv(np.dot(jac.T, jac))\r\n\r\n return cov", "title": "" }, { "docid": "abcbf6d53bbe1895bb94024dd3de5a40", "score": "0.61456764", "text": "def covariance(x, y):\n n_samples, horizon = x.shape\n mean_x = x.mean(dim=1, keepdim=True)\n mean_y = y.mean(dim=1, keepdim=True)\n xm = x - mean_x # (n_samples, horizon)\n ym = y - mean_y # (n_samples, horizon)\n\n cov = (xm * ym).sum(dim=1) / horizon\n\n return cov", "title": "" }, { "docid": "10dc78accda1fb52654c5db50c32c2a4", "score": "0.61323124", "text": "def _sigma_noise(self):\n # get the CARMA(p,q) model variance of the time series\n var = self._samples['var']\n\n # get the roots of the AR(p) characteristic polynomial\n ar_roots = self._samples['ar_roots']\n\n # get the moving average coefficients\n ma_coefs = self._samples['ma_coefs']\n\n # calculate the variance of a CAR(p) process, assuming sigma = 1.0\n sigma1_variance = np.zeros_like(var) + 0j\n for k in xrange(self.p):\n denom = -2.0 * ar_roots[:, k].real + 0j\n for l in xrange(self.p):\n if l != k:\n denom *= (ar_roots[:, l] - ar_roots[:, k]) * (np.conjugate(ar_roots[:, l]) + ar_roots[:, k])\n\n ma_sum1 = np.zeros_like(ar_roots[:, 0])\n ma_sum2 = ma_sum1.copy()\n for l in xrange(ma_coefs.shape[1]):\n ma_sum1 += ma_coefs[:, l] * ar_roots[:, k] ** l\n ma_sum2 += ma_coefs[:, l] * (-1.0 * ar_roots[:, k]) ** l\n numer = ma_sum1 * ma_sum2\n sigma1_variance += numer / denom\n\n sigsqr = var / sigma1_variance.real\n\n # add the white noise sigmas to the MCMC samples\n self._samples['sigma'] = np.sqrt(sigsqr)", "title": "" }, { "docid": "e4f4ec7481029d7970a39ca1cf689232", "score": "0.6070059", "text": "def _calc_covariance_matrix(self,theta,x,y):\r\n f_vals = np.empty(y.shape)\r\n for i,ti in enumerate(theta):\r\n f_vals[:,i] = self.shape.f(ti,x)\r\n r = y - f_vals\r\n r = np.ma.masked_array(r, np.isnan(r))\r\n C = np.ma.cov(r,rowvar=0)\r\n return C", "title": "" }, { "docid": "5db2d533ce4edde3585b083d33196208", "score": "0.606362", "text": "def cov(self, x, mu):\n diff = X - mu\n cov = np.dot(diff.T, diff) / X.shape[0]\n return cov + np.eye(X.shape[1]) * 0.001", "title": "" }, { "docid": "0a3e1046ef03009f4ff1a25f92b18d02", "score": "0.6039782", "text": "def cov_estimator(TS):\n return np.cov( TS.T)", "title": "" }, { "docid": "97492a1bdd9f39e119bd31490af8ed89", "score": "0.59962165", "text": "def getCovMatrix(self):\n return self._covMatrix", "title": "" }, { "docid": "f3b6d982849bbfb60e5a61161ba229a9", "score": "0.5975663", "text": "def cov(self):\n return self._cov", "title": "" }, { "docid": "5cfd39d0e92a89dbab99c0feb432d514", "score": "0.59414965", "text": "def get_covar(self, date):\n logger.debug('Getting covariance on %s', date)\n cov = self.cov.loc[date]\n cov = cov.set_index('code')\n return cov", "title": "" }, { "docid": "0020c427ab97adcf12b0d85d4e841e74", "score": "0.5935267", "text": "def y_var_noise_model(self):\n # square the sigma samples\n sigma_squared = self.parameters[self.noise_parameter_index] ** 2\n # weighted average\n return np.average(sigma_squared, weights=self.particle_weights)", "title": "" }, { "docid": "a6c8b5bd04f41c899f6642b3adb202c2", "score": "0.59229183", "text": "def Covariance(st):\n trN,trE,trZ= SelectTracesNEZ(st)\n \n #Covariance matrix\n size= len(trN.data)\n c11= np.dot(trN.data, trN.data)/size\n c12= np.dot(trN.data, trE.data)/size\n c13= np.dot(trN.data, trZ.data)/size\n c21 = np.dot(trE.data, trN.data)/size\n c22 = np.dot(trE.data, trE.data)/size\n c23 = np.dot(trE.data, trZ.data)/size\n c31 = np.dot(trZ.data, trN.data)/size\n c32 = np.dot(trZ.data, trE.data)/size\n c33 = np.dot(trZ.data, trZ.data)/size\n Cov = np.array([[c11,c12,c13],[c21,c22,c23],[c31,c32,c33]])\n return Cov", "title": "" }, { "docid": "d9a818c0d27fa48fdd03e587323753d6", "score": "0.5919239", "text": "def covariance(self, x_cond, n_samples=None):\n assert x_cond.ndim == 2 and x_cond.shape[1] == self.ndim_x\n\n covs = self._std(x_cond)\n return covs.reshape((covs.shape[0],self.ndim_y, self.ndim_y))", "title": "" }, { "docid": "c3dff0464d7353808c477fa860bd3381", "score": "0.58876956", "text": "def cov_random(self, D, Sinv=None):\n if Sinv is not None:\n self.compute_P(Sinv)\n t = N.dot(self.Z, D)\n return D - N.dot(N.dot(t.T, self.P), t)", "title": "" }, { "docid": "079d680c046bdad8d83c31c7d41c6111", "score": "0.58760965", "text": "def getInitialCovMat2D(self):\n\n initial_cov_mat = np.array(\n [\n [self.xy_obs_noise_std ** 2.0, 0.0, 0.0],\n [0.0, self.xy_obs_noise_std ** 2.0, 0.0],\n [0.0, 0.0, self.initial_yaw_std ** 2.0],\n ]\n )\n return initial_cov_mat", "title": "" }, { "docid": "40962eb0cf57d910eecd831a29b46a25", "score": "0.58655775", "text": "def MVN_Denoise(Y, mvn_model, noise_std):\n\n return weiner_filter(Y, mu=mvn_model.mean, Sigma=mvn_model.cov, noise_std=noise_std)", "title": "" }, { "docid": "129922134ddbadbfcad45b67c15afbd4", "score": "0.5860362", "text": "def measure_cov(self):\n\n import fimage\n\n f=self.file()\n\n sigma_vals = self.sigma_vals()\n ellip_vals = self.ellip_vals()\n data = self.struct(sigma_vals.size*ellip_vals.size)\n\n ii=0\n for i in xrange(sigma_vals.size):\n sigma=sigma_vals[i]\n for j in xrange(ellip_vals.size):\n ellip = ellip_vals[j]\n\n Irr,Irc,Icc = util.ellip2mom(2*sigma**2, e=ellip, theta=0.0)\n\n dim = int( numpy.ceil(2.*self.sigfac*sigma ) )\n if (dim % 2) == 0:\n dim += 1\n dims=[dim,dim]\n cen=[(dim-1)/2]*2\n print(\"sigma:\",sigma,\"ellip:\",ellip,\"dims:\",dims)\n\n im=fimage.model_image(self.model,dims,cen,[Irr,Irc,Icc],nsub=8)\n res = admom(im, cen[0], cen[1], guess=sigma, nsub=4)\n\n data['sigma_index'][ii] = i\n data['ellip_index'][ii] = j\n data['Irr_input'][ii] = Irr\n data['Irc_input'][ii] = Irc\n data['Icc_input'][ii] = Icc\n data['Irr_meas'][ii] = res['Irr']\n data['Irc_meas'][ii] = res['Irc']\n data['Icc_meas'][ii] = res['Icc']\n\n ii+=1\n\n hdr={'model':self.model}\n eu.io.write(f, data, delim=' ', verbose=True, clobber=True, header=hdr)", "title": "" }, { "docid": "8f2303f738c4e9d42b4c9441b93d9126", "score": "0.5851395", "text": "def ICA_Denoise(Y, ica_model, noise_std):\n\n s_noisy = ica_model.P.T @ Y\n denoise_Y = np.zeros(Y.shape)\n\n d, k = ica_model.means.shape\n\n for i in range(d):\n for j in range(k):\n denoise_Y[[i], :] += ica_model.mix[i, j] * weiner_filter(Y=s_noisy[[i], :],\n mu=ica_model.means[i, j].reshape([1, 1]),\n Sigma=ica_model.vars[i, j].reshape([1, 1]),\n noise_std=noise_std)\n\n return ica_model.P @ denoise_Y", "title": "" }, { "docid": "6835a7e195b3441e7603af1ae895645c", "score": "0.5831767", "text": "def covariance_matrix(X, Y=None):\n if Y is None:\n Y = X\n n_samples = np.shape(X)[0]\n covariance_matrix = (1 / (n_samples-1)) * (X - X.mean(axis=0)).T.dot(Y - Y.mean(axis=0))\n\n return np.array(covariance_matrix, dtype=float)", "title": "" }, { "docid": "0de244b6a386d880f5457e73c8875309", "score": "0.5821699", "text": "def cov_altogether_noise(models, stitch, diagonal_model_covariance=False):\n x_obs = []\n f_obs = []\n for model in models:\n x_obs.append(model['x'])\n f_obs.append(model['f'])\n x_obs = np.concatenate(x_obs)\n f_obs = np.concatenate(f_obs)\n\n ### compute the big uncertainty estimate between all the models\n Nobs = len(x_obs)\n\n ### add in \"theory model noise\" as diagonal components based on variance of means at each pressure\n if stitch:\n x_stitch = []\n f_stitch = []\n for s in stitch:\n x_stitch.append(s['x'])\n f_stitch.append(s['f'])\n x_stitch = np.concatenate(x_stitch)\n f_stitch = np.concatenate(f_stitch)\n\n num_stitch = len(x_stitch)\n covs = np.zeros((Nobs+num_stitch,)*2, dtype=float) ### include space for the stitching conditions\n\n x_obs = np.concatenate((x_obs, x_stitch)) ### add in stitching data\n f_obs = np.concatenate((f_obs, f_stitch))\n start = Nobs\n for s in stitch:\n stop = start+len(s['x'])\n covs[start:stop,start:stop] = s['cov']\n start = stop\n else:\n covs = np.zeros((Nobs,Nobs), dtype=float)\n num_stitch = 0\n\n ### add block-diagonal components\n start = 0\n for model in models:\n stop = start+len(model['x']) ### the number of points in this model\n covs[start:stop,start:stop] = model['cov'] ### fill in block-diagonal component\n start = stop\n\n ### iterate through pressure samples and compute theory variance of each\n ### NOTE: the following iteration may not be the most efficient thing in the world, but it should get the job done...\n\n # compute means of means (average over models)\n x_set = np.array(sorted(set([x for model in models for x in model['x']])), dtype=float)\n n_set = len(x_set)\n mu_set = np.empty(n_set, dtype=float)\n for ind, x in enumerate(x_set): # iterate over all included x-points\n sample = []\n for model in models:\n i = x==model['x'] ### should be either 1 or 0 matches\n if np.any(i):\n sample.append(model['f'][i])\n\n mu_set[ind] = np.mean(sample)\n\n # compute the average of the covariances and the 2nd moment of the means\n cov_set = np.zeros((n_set, n_set), dtype=float)\n for ind, x in enumerate(x_set):\n for IND, X in enumerate(x_set[ind:]):\n IND += ind ### correct index for the big set\n\n if diagonal_model_covariance and (ind!=IND): ### only include diagonal components\n continue\n\n sample = []\n for model in models:\n i = x==model['x'] ### either 1 or 0 matches\n j = X==model['x']\n if np.any(i) and np.any(j): ### both abscissa are present in this model\n sample.append(model['f'][i]*model['f'][j] + model['cov'][i,j]) ### add both these things together for convenience\n\n if sample: ### there is something to add here, which is not guaranteed\n cov_set[ind,IND] = np.mean(sample) - mu_set[ind]*mu_set[IND] ### NOTE:\n if ind!=IND:\n cov_set[IND,ind] = cov_set[ind,IND] ### this is equivalent to the average (over models) of the covariance of each model\n ### plus the covariance between the mean of each model (with respect to the models)\n else:\n cov_set[ind,ind] = max(cov_set[ind,ind],0) ### minimum allowable\n\n cov_set = posdef(cov_set) ### regularize the result to make sure it's positive definite (for numerical stability)\n\n # map cov_set into the appropriate elements of model_covs\n model_covs = np.zeros_like(covs, dtype=float)\n start = 0\n ind_set = np.arange(n_set)\n truth_set = np.empty(n_set, dtype=bool)\n for model in models:\n\n # identify which abscissa from this model correspond to which indecies in x_set\n truth_set[:] = False\n truth_set[np.array([ind_set[x==x_set] for x in model['x']])] = True\n\n # map these indecies into model_covs. ASSUMES ABSCISSA ARE ORDERED WITHIN model_covs\n n = len(model['x'])\n model_covs[start:start+n,start:start+n] = cov_set[np.outer(truth_set,truth_set)].reshape((n,n))\n\n # bump starting index\n start += n\n\n ##############################################################################################\n ### FOR TESTING PURPOSES: want to visualize the covariance between models, etc\n ##############################################################################################\n# import matplotlib\n# matplotlib.use(\"Agg\")\n# from matplotlib import pyplot as plt\n#\n# fig = plt.figure(figsize=(10,4))\n# ax1 = plt.subplot(1,2,1)\n# cb1 = fig.colorbar(\n# ax1.imshow(np.tanh(covs/0.01), cmap='RdGy_r', origin='lower', aspect='equal', vmin=-1, vmax=+1),\n# orientation='vertical',\n# shrink=0.90,\n# )\n# cb1.set_label('tanh(covs/0.01)')\n#\n# ax2 = plt.subplot(1,2,2)\n# cb2 = fig.colorbar(\n# ax2.imshow(np.tanh(model_covs/0.01), cmap='RdGy_r', origin='lower', aspect='equal', vmin=-1, vmax=+1),\n# orientation='vertical',\n# shrink=0.90,\n# )\n# cb2.set_label('tanh(model covs/0.01)')\n#\n# fig.savefig('TEST.png')\n# plt.close(fig)\n# ##############################################################################################\n\n return x_obs, f_obs, covs, model_covs, num_stitch", "title": "" }, { "docid": "36ea0539c13ddf09d05e590c76679d17", "score": "0.5820795", "text": "def covar(x):\n mu = numpy.mean(x,axis=0)\n N = x.shape[0]\n y = x-mu\n sigma = numpy.dot(y.T,y)/(N-1)\n\n # Note, the maximum likelihood estimator is /N [not /(N-1)] as\n # above, but that works only for a multivariate normal.\n\n return sigma", "title": "" }, { "docid": "531de58c7a3d7f9b3be68bf43a16ad3d", "score": "0.5812992", "text": "def covariances(self):\n return self._covariances", "title": "" }, { "docid": "1d2dc204b755591a6ad3f93004430e2f", "score": "0.58056086", "text": "def cov_nb(self, x):\n cov = np.diag(np.var(x, axis=0))\n return cov + np.eye(X.shape[1]) * 0.001", "title": "" }, { "docid": "591de46801a2ffc701832dfdc6160817", "score": "0.5788552", "text": "def noise(self):\n noise = np.random.choice([-1,1])*(1. / (1. + self.curr_t + self.curr_ep))\n # print('noise:', noise)\n return noise", "title": "" }, { "docid": "1d7cb9e4c62372c9092d3627ed02f9e5", "score": "0.5788069", "text": "def cov(self):\n return self.__node_value[1:self.__n_features * self.__n_features + 1].reshape((self.__n_features, self.__n_features))", "title": "" }, { "docid": "06c39d9388b03803fa8081692b277ccd", "score": "0.5787068", "text": "def noise(self):\n noise = 1. / (1. + self.curr_t + self.curr_ep)\n return noise", "title": "" }, { "docid": "6bc96e0b18689093f642b54917dda41c", "score": "0.57847613", "text": "def getNoise(self):\n return self.noise", "title": "" }, { "docid": "394ca88fe39b2fbff9f7c706e871d843", "score": "0.57741725", "text": "def calculate_covariance_matrix(X, Y=None):\n if Y is None:\n Y = X\n n_samples = np.shape(X)[0]\n covariance_matrix = (1 / (n_samples-1)) * (X - X.mean(axis=0)).T.dot(Y - Y.mean(axis=0))\n\n return np.array(covariance_matrix, dtype=float)", "title": "" }, { "docid": "268e30b5f89da8b0fd48ac160d3c27f6", "score": "0.5769279", "text": "def get_cov(self):\n return self._cov", "title": "" }, { "docid": "16a4cd010ac3258449d327a19d1dee79", "score": "0.5759923", "text": "def noise(self):\n return self._noise", "title": "" }, { "docid": "e6a89a8397a5ff32ea01ae649344ad50", "score": "0.5753217", "text": "def predict_covariance(self, X: np.ndarray, with_noise: bool=True) -> np.ndarray:\n X = map_to_feature_space(X,self.encoding)\n _, v = self.model.predict(X, full_cov=True, include_likelihood=with_noise)\n v = np.clip(v, 1e-10, np.inf)\n\n return v", "title": "" }, { "docid": "7bbe652a026d390da678a21b01452334", "score": "0.5751831", "text": "def getNoise(self):\n return self._noise", "title": "" }, { "docid": "fa396cb35c73d6bb4c91b34a2615bc84", "score": "0.57239354", "text": "def cov(x,y=None):\n if y:\n x = numpy.transpose(numpy.array([x,y], x.typecode()))\n mu = numpy.mean(x)\n sum_cov = 0.0\n for v in x:\n sum_cov = sum_cov+numpy.multiply.outer(v,v)\n return (sum_cov-len(x)*numpy.multiply.outer(mu,mu))/(len(x)-1)", "title": "" }, { "docid": "f97cf61e3a7082173fb0c4cd7c3e7749", "score": "0.5714841", "text": "def test_covariance_spectrum():\n bd = galsim.BaseDeviate(rseed)\n Sigma = {}\n for i in range(2):\n for j in range(2):\n if i > j: continue\n Sigma[(i, j)] = galsim.Gaussian(fwhm=1) # anything with a drawKImage will do...\n SEDs = [galsim.SED('1', 'nm', 'fphotons'), galsim.SED('wave', 'nm', 'fphotons')]\n covspec = galsim.CovarianceSpectrum(Sigma, SEDs)\n\n do_pickle(covspec)\n\n wcs = galsim.PixelScale(0.1)\n psf = galsim.Gaussian(fwhm=1)\n bp = galsim.Bandpass('1', 'nm', blue_limit=500.0, red_limit=600.0)\n do_pickle(covspec, lambda x: x.toNoise(bp, psf, wcs, rng=bd))\n\n covspec = covspec.transform(1.1, 0.2, 0.1, 0.9)\n do_pickle(covspec)\n do_pickle(covspec, lambda x: x.toNoise(bp, psf, wcs, rng=bd))", "title": "" }, { "docid": "9d717673a4bfaaf116f17a60954693c2", "score": "0.5705867", "text": "def _recalculate_covariance_matrix(self):\n new_K = np.zeros((self.t, self.t))\n for i in range(self.t):\n new_K[i] = self.k(self.thetas[i], self.thetas)\n \n new_K += self.obs_noise * np.identity(self.t)\n self.K = new_K\n self.K_inv = la.inv(new_K)", "title": "" }, { "docid": "ea095371023ac5f50107b9d8b6ba1c64", "score": "0.5686947", "text": "def GSM_Denoise(Y, gsm_model, noise_std):\r\n\r\n cov = np.copy(gsm_model.cov)\r\n k = len(cov)\r\n D, M = Y.shape\r\n\r\n # Create K eye matrix for each guassian\r\n I = np.full([k, * cov.shape[1:]], fill_value=np.eye(cov.shape[1], cov.shape[2]))\r\n noise_mat = noise_std * I\r\n\r\n c = calculte_posterior_probability(Y, gsm_model.mix, cov + noise_mat)\r\n\r\n # Multiply each weiner (that fits to the cov of the i'th guassian) with th coresponded coefficients\r\n result = np.array([c[:, i] * weiner_formula(Y=Y, cov=cov[i], mean=np.zeros([D]), noise_std=noise_std) for i in range(k)])\r\n return np.sum(result, axis=0)", "title": "" }, { "docid": "8da256d7166d5bff729c40720016c84f", "score": "0.565555", "text": "def covariance(X, Y):\n res = 0\n for x, y in zip(X, Y):\n res += (x - average(X)) * (y - average(Y))\n return res", "title": "" }, { "docid": "6208618ac8501d9db2e48357d17bd6a5", "score": "0.5654421", "text": "def calculate_covariance_matrix(X, Y=None):\n if Y is None:\n Y = X\n n_samples = np.shape(X)[0]\n covariance_matrix = (1 / (n_samples-1)) * (X - X.mean(axis=0)).T.dot(Y - Y.mean(axis=0))\n\n return np.array(covariance_matrix, dtype=float)", "title": "" }, { "docid": "e7cba05a2c06cc8fa54a0b7b65df8875", "score": "0.5626653", "text": "def get_covariance_matrix(self, X):\n column_averages = np.mean(X, axis=0)\n demeaned_matrix = X - column_averages\n return np.dot(demeaned_matrix.T, demeaned_matrix) / (X.shape[0] - 1)", "title": "" }, { "docid": "18f007132e9a2719231944654cd03aad", "score": "0.5623084", "text": "def cov_fixed(self):\n return self.Sinv", "title": "" }, { "docid": "c1e70125e4f864fde46e108a526ac230", "score": "0.5619489", "text": "def get_posterior_cov(Sigma, Sigma_z):\n #get inverses (need the 1e-6 for stability)\n Sigma_i = np.linalg.pinv(Sigma + 1e-6 * np.mean(Sigma.diagonal()) * np.eye(len(Sigma)))\n Sigma_z_i = np.linalg.pinv(Sigma_z + 1e-6 * np.mean(Sigma_z.diagonal()) * np.eye(len(Sigma_z)))\n\n #posterior covariance of all Xs\n Sigma_xgz = np.linalg.pinv(Sigma_i + Sigma_z_i)\n\n return Sigma_xgz", "title": "" }, { "docid": "5966fa87dd0311e859fae5b579a90eea", "score": "0.56189275", "text": "def cov_matrix(_y, _x): \n if _x.shape[0] != _y.shape[0]:\n raise Exception(\"Shapes do not match\")\n\n # make sure we use matrix multiplication, not array multiplication\n _xm = np.matrix(np.mean(_x, axis=0).repeat(_x.shape[0], axis = 0).reshape(_x.shape))\n _ym = np.matrix(np.mean(_y, axis=0).repeat(_y.shape[0], axis = 0).reshape(_y.shape))\n\n return ((_x - _xm).T * (_y - _ym)) * 1 / _x.shape[0]", "title": "" }, { "docid": "7cb6cd9a1c9510d47e6292254e70bea8", "score": "0.56060684", "text": "def to_tlwhz_cov(self):\n ret = self.mean[:5].copy()\n ret[2] *= ret[3]\n ret[:2] -= ret[2:4] / 2\n\n xx = self.covariance[0][0]\n xz = self.covariance[0][4]\n zx = self.covariance[4][0]\n zz = self.covariance[4][4]\n return list(ret) + [xx, xz, zx, zz]", "title": "" }, { "docid": "7950b22ba2ddf32158d2ce463ddc425d", "score": "0.5594748", "text": "def _get_covars(self):\n if self.covariance_type == 'full':\n return self.covars_\n elif self.covariance_type == 'diag':\n return [np.diag(cov) for cov in self.covars_]", "title": "" }, { "docid": "162ff3b79fc6ac297731d38ca4a2750a", "score": "0.55926", "text": "def calculate_noise(self):\r\n dX = self.theta * (self.mu - self.X)\r\n dX += self.sigma * np.random.randn(self.n_action)\r\n self.X += dX\r\n return self.X", "title": "" }, { "docid": "bafd511e6e78e9990279445a3b2c0905", "score": "0.55786306", "text": "def cov_velocities(self, T):\n activations = self._rbfs_derivative_nd_sequence(T)\n return activations.T.dot(self.weight_cov).dot(activations)", "title": "" }, { "docid": "dba61e4aff2ad8802104a0b37d2b1ff6", "score": "0.55646914", "text": "def covariance_matrix(self,fm,svd=True):\n if svd:\n inverse_fm = self._invert_matrix_(fm)\n else:\n inverse_fm = np.matrix(fm).I\n return inverse_fm", "title": "" }, { "docid": "afe085bba5dfdb16e4dac5777d2f23a8", "score": "0.5557756", "text": "def _covar_in_mean(self):\n if self.covar is None:\n return None\n nchan = self.covar.shape[-1]\n nbin = self.flux.shape[0]\n inpix = numpy.ma.power(self.npix, -1.)\n covar = numpy.empty(nchan, dtype=sparse.csr_matrix)\n for i in range(nchan):\n j = self.covar.input_indx[i]\n _inpix = inpix[:,j,None]*inpix[None,:,j]\n covar[i] = sparse.triu(self.covar.toarray(channel=j) * _inpix).tocsr()\n return Covariance(covar, input_indx=self.covar.input_indx)", "title": "" }, { "docid": "50098cb156e133b1252766e104ae638e", "score": "0.5555965", "text": "def get_cov_matrix(data):\n num_examples,num_features = data.shape\n cov_matrix = np.dot(np.transpose(data),data) / (1.0 * num_examples)\n return cov_matrix", "title": "" }, { "docid": "8b37d4a4af3399a9351d44cc2abc220a", "score": "0.55473256", "text": "def noise(self) -> Sequence:\n\n return self._noise", "title": "" }, { "docid": "14007ffe10bec8d6f5b402540c89d40f", "score": "0.55459356", "text": "def _inverse_measurement_covar(self, measurement_model, **kwargs):\n if hasattr(measurement_model, 'inverse_covar'):\n inv_measurement_covar = measurement_model.inverse_covar(**kwargs)\n else:\n inv_measurement_covar = np.linalg.inv(measurement_model.covar(**kwargs))\n\n return inv_measurement_covar", "title": "" }, { "docid": "e8b5219ce06e3fe624edd9bbed5a5f2a", "score": "0.55454314", "text": "def _covar_dense(X, Y, symmetrize=False):\n if symmetrize and np.shape(X)[1]!=np.shape(Y)[1]:\n raise ValueError('Cannot compute symmetric covariance matrix for differently sized data')\n Craw = np.dot(X.T, Y)\n if symmetrize:\n return 0.5*(Craw + Craw.T)\n else:\n return Craw", "title": "" }, { "docid": "22400e2a316f7b94a2e03a2053a69f31", "score": "0.55423766", "text": "def get_noise_realization():\n df = data.fetch_processed()\n var_in = df['dt21'].values**2\n\n noise_realization = np.sqrt(var_in) * np.random.uniform(\n low=-1., high=1.,\n size=len(var_in))\n\n return noise_realization", "title": "" }, { "docid": "c98c93c3d628048c51a73dfd2c7d7d12", "score": "0.5538364", "text": "def find_cov(return_mat, weight_factor, builtin):\n\n if builtin:\n return np.cov(return_mat, aweights=weight_factor)\n\n diff_mat = return_mat - np.mean(return_mat, axis=1, keepdims=True)\n weight_mat = MomentGenerator.calc_weight_mat(return_mat, weight_factor)\n return np.dot(weight_mat * diff_mat * (return_mat.shape[1]/(return_mat.shape[1] - 1)), diff_mat.T)", "title": "" }, { "docid": "d9b5c1e6c3c42fe86938a9074bef8ade", "score": "0.55358696", "text": "def covariance(self, model_parameter_names, paramvals, fitinfo):\r\n\r\n if 'hess_inv' in fitinfo:\r\n cov = fitinfo['hess_inv']\r\n elif 'hess' in fitinfo:\r\n hess = fitinfo['hess']\r\n cov = np.linalg.inv(hess)\r\n #elif 'jac' in fitinfo:\r\n # jac = fitinfo['jac']\r\n # cov = covariance(jac)\r\n else:\r\n inc = [self.hessInc[p] for p in model_parameter_names]\r\n # TODO : Check if this is correct or replace it with other methods\r\n jac = np.expand_dims(sp.optimise.approx_fprime(paramvals, self.fitness, inc), axis=0)\r\n #cov = covariance(jac)\r\n cov = np.linalg.inv(np.dot(jac.T, jac))\r\n\r\n return cov", "title": "" }, { "docid": "dc10b95032e6ff285bafe050366599b9", "score": "0.55333495", "text": "def cov_trajectory(self, T):\n activations = self._rbfs_nd_sequence(T)\n return activations.T.dot(self.weight_cov).dot(activations)", "title": "" }, { "docid": "f259e75c0b02877d68ce2fbe0b3bfa32", "score": "0.55307794", "text": "def ICA_Denoise(Y, ica_model, noise_std):\r\n\r\n # TODO: YOUR CODE HERE\r", "title": "" }, { "docid": "eaf9e38e57f54a9fcf698c5c71f72ade", "score": "0.55211747", "text": "def GetMises(self,epsilon):\n Hydrostatic=(epsilon[0,0]+epsilon[1,1]+epsilon[2,2])\\\n /3.0\n Deviatoric=epsilon-Hydrostatic*np.eye(3,3)\n vMStrain=(2.0/3.0)*np.sqrt\\\n (np.tensordot(Deviatoric,Deviatoric,axes=2)) \n \n return vMStrain", "title": "" }, { "docid": "8ecaffdfabbacd0415a9e2bcbdb6faf4", "score": "0.5509221", "text": "def cov_parameters(m):\n m = np.atleast_2d(m)\n N = m.shape[0]\n\n z = np.linalg.cholesky(m)\n z.flat[::(N+1)] = np.log(z.flat[::(N+1)])\n\n return z[np.tril_indices(N)]", "title": "" }, { "docid": "091122a0f5d84be076fa87bcc925fdf3", "score": "0.550169", "text": "def CovarianceMatrix(self, Correlation):\n D = np.zeros((len(self.D),len(self.D)))\n for i in range(len(self.D)):\n for j in range(len(self.D)):\n if i==j :\n D[i,j] = np.sqrt(self.D[i].variance)\n else:\n D[i,j] = 0\n R = Correlation\n Si = np.matmul(D, R)\n S = np.matmul(Si, D)\n return S", "title": "" }, { "docid": "376e5966fd0d29e9e41eb9f55c95a723", "score": "0.5492497", "text": "def get_gnss_cov(self, z_gnss: GnssMeasurement) -> 'ndarray[3,3]':\r\n if self.use_gnss_accuracy and z_gnss.accuracy is not None:\r\n # play around with this part, the suggested way is not optimal\r\n gnss_cov = (z_gnss.accuracy/3)**2 * self.gnss_cov\r\n\r\n else:\r\n # dont change this part\r\n gnss_cov = self.gnss_cov\r\n return gnss_cov", "title": "" }, { "docid": "72e8624cdfbafa7a4c93cc792f5d453e", "score": "0.5470916", "text": "def cov_matrix(x):\n y = _cov_params_to_cholesky(x)\n\n return np.dot(y, y.T)", "title": "" }, { "docid": "1b1c40d5054516cf1e8cccdc24cdfd4e", "score": "0.54608333", "text": "def covariance_bits(C=C, mesh=mesh):\n C_obs = copy.copy(C)\n try:\n U, C_eval, Uo_Cxo = C_obs.observe(\n mesh, np.zeros(mesh.shape[0]), output_type='s')\n return U.T.copy('F'), C_eval, C_obs, Uo_Cxo\n except np.linalg.LinAlgError:\n return None", "title": "" }, { "docid": "fb537c04e300c53700e4834ececbb362", "score": "0.5458917", "text": "def noise(self, nx, ny):\n return self.gen.noise2d(nx, ny) / 2.0 + 0.5", "title": "" }, { "docid": "7700c6413ffc8d4a2b4c2d4b58188f34", "score": "0.54564476", "text": "def _covariance_beta(self):\n return self.XXinv()[:,:,np.newaxis] * self._var_Y()[np.newaxis,np.newaxis,:]", "title": "" }, { "docid": "967084e8097ad05052e7e1aa2eadf026", "score": "0.5454914", "text": "def compute_covariance_theoretical(self,\n num_samples=1000,\n ignore_cache=False):\n return self.random_network.compute_covariance_KL(\n sigma_x=self.sigma_x,\n sigma_y=self.sigma_y,\n sigma_baseline=self.sigma_baseline,\n T=self.T,\n beta=1.0,\n num_samples=num_samples,\n ignore_cache=ignore_cache)", "title": "" }, { "docid": "b773c7a3265a8f932921f324a32fb1a0", "score": "0.545079", "text": "def covariance_beta(self):\n if self.dim_Y==1:\n return self._covariance_beta()[:,:,0]\n else:\n return self._covariance_beta()", "title": "" }, { "docid": "077a6ce27c9e1baffbe57162a3100718", "score": "0.5448954", "text": "def _get_noise_scale_vec(self, cfg):\n noise_vec = torch.zeros_like(self.obs_buf[0])\n self.add_noise = self.cfg.noise.add_noise\n noise_scales = self.cfg.noise.noise_scales\n noise_level = self.cfg.noise.noise_level\n noise_vec[:3] = noise_scales.lin_vel * noise_level * self.obs_scales.lin_vel\n noise_vec[3:6] = noise_scales.ang_vel * noise_level * self.obs_scales.ang_vel\n noise_vec[6:9] = noise_scales.gravity * noise_level\n noise_vec[9:12] = 0. # commands\n noise_vec[12:24] = noise_scales.dof_pos * noise_level * self.obs_scales.dof_pos\n noise_vec[24:36] = noise_scales.dof_vel * noise_level * self.obs_scales.dof_vel\n noise_vec[36:48] = 0. # previous actions\n if self.cfg.terrain.measure_heights:\n noise_vec[48:235] = noise_scales.height_measurements* noise_level * self.obs_scales.height_measurements\n return noise_vec", "title": "" }, { "docid": "6448f90ab2a670c2786ef08f3eb4fd20", "score": "0.5422462", "text": "def _cov_to_dist(self, sigma):\n n = sigma.shape[0]\n ones = np.ones(n).reshape(n, 1)\n sigma_diag = np.diag(sigma).reshape(n, 1)\n d = ones.dot(sigma_diag.T) + sigma_diag.dot(ones.T) - (2. * sigma)\n\n return(d)", "title": "" }, { "docid": "3c2a28263a2df26d2a31785d599ee192", "score": "0.54216117", "text": "def get_cov_matrix(self, sample):\n print \"Calculating cov_matrix\",\n self.n_cov_events = 0.\n n_var = 4\n var_list = range(4)\n self.mean = numpy.array([0. for i in range(n_var)])\n self.cov = numpy.array([[0. for i in range(n_var)] for j in range(n_var)])\n for run, spill, event, psv, amplitude in self.retrieve(sample):\n self.n_cov_events += 1.\n if self.n_cov_events % 1000 == 0:\n print self.n_cov_events,\n sys.stdout.flush()\n for i in var_list:\n delta = (self.n_cov_events-1.)/self.n_cov_events\n self.mean[i] = self.mean[i]*delta + psv[i]/self.n_cov_events\n for j in range(i, n_var):\n self.cov[i][j] = self.cov[i][j]*delta + \\\n psv[i]*psv[j]/self.n_cov_events\n for i in var_list:\n for j in range(i, n_var):\n self.cov[i][j] -= self.mean[i]*self.mean[j]\n self.cov[j][i] = self.cov[i][j]\n print \"... found\", self.n_cov_events, \"events\"\n sys.stdout.flush()\n self.cov_inv = numpy.linalg.inv(self.cov)\n self.get_emittance()", "title": "" }, { "docid": "f629866911135060a544412dba4d2f38", "score": "0.5420268", "text": "def _cal_varcov(self, θ2_vec):\n θ2, ix_θ2_T, Z, LinvW, X1 = self.θ2, self.ix_θ2_T, self.Z, self.LinvW, self.X1\n\n θ2.T[ix_θ2_T] = θ2_vec\n\n # update δ\n δ = self._cal_δ(θ2)\n\n jacob = self._cal_jacobian(θ2, δ)\n\n θ1, ξ = self._cal_θ1_and_ξ(δ)\n\n Zres = Z * ξ.reshape(-1, 1)\n Ω = Zres.T @ Zres # covariance of the momconds\n\n G = (np.c_[X1, jacob].T @ Z).T # gradient of the momconds\n\n WG = cho_solve(LinvW, G)\n WΩ = cho_solve(LinvW, Ω)\n\n tmp = solve(G.T @ WG, G.T @ WΩ @ WG).T # G'WΩWG(G'WG)^(-1) part\n\n varcov = solve((G.T @ WG), tmp)\n\n return varcov", "title": "" }, { "docid": "cd077f4641bff857e85591bcd3865c36", "score": "0.54123884", "text": "def PER_cov(n_pts, len_scale_sq = None, period = None, var = 1):\n t_pts = np.arange(n_pts)[:,None]\n if len_scale_sq == None:\n len_scale_sq = n_pts / 2\n if period == None:\n period = n_pts / 2\n Sigma = var * np.exp(- 2*np.sin( np.pi * np.abs(t_pts - t_pts.T) / period)**2 / (len_scale_sq))\n return Sigma", "title": "" }, { "docid": "f5cfaca4de9769924e2c16eb4adc657c", "score": "0.5410382", "text": "def cov(self) -> Union[float, np.ndarray]:\n pass", "title": "" }, { "docid": "58e12c3559937bb891feef5b1e7bafca", "score": "0.5409086", "text": "def gen_diag_data_covar(Nsample, var):\n return np.asarray([np.diag(var)]*Nsample)", "title": "" }, { "docid": "c055e3525afc092ecefc8e2b4e8fbe99", "score": "0.54022706", "text": "def remove_from_cov_matrix(self, update_psv_array):\n if update_psv_array.shape[0] == 0:\n return\n #print \"Removing\", update_psv_array.shape[0], \"values from cov matrix\"\n n_var = 4\n var_list = range(4)\n # convert to not centred moments\n if update_psv_array.shape[0] >= int(round(self.n_cov_events)):\n for i in var_list:\n self.mean[i] = 0.\n for j in var_list:\n self.cov[i][j] = 0.\n return\n\n for i in var_list:\n for j in var_list:\n self.cov[i][j] += self.mean[i]*self.mean[j]\n\n for psv in update_psv_array:\n delta = self.n_cov_events/(self.n_cov_events-1) \n for i in var_list:\n self.mean[i] = self.mean[i]*delta - psv[i]/self.n_cov_events\n for j in range(i, n_var):\n self.cov[i][j] = self.cov[i][j]*delta - \\\n psv[i]*psv[j]/self.n_cov_events\n self.n_cov_events -= 1.\n\n for i in var_list:\n for j in range(i, n_var):\n self.cov[i][j] -= self.mean[i]*self.mean[j]\n self.cov[j][i] = self.cov[i][j]\n self.cov_inv = numpy.linalg.inv(self.cov)\n self.get_emittance()", "title": "" }, { "docid": "35ecea3a6cd2231289d97c45ede61424", "score": "0.53871065", "text": "def vitesse(covMat):\n u, s, vh = np.linalg.svd(covMat)\n return s[...,0]**0.5 + s[...,1]**0.5", "title": "" } ]
5b24a3d925dee9eea90f6bb8e80ea452
Create an empty file with the specified path.
[ { "docid": "e6b3a69da0e27387cd0b89ed40baa79d", "score": "0.6326646", "text": "def create_file(self, path: str, filename: str):\n self._change_dir(path)\n self.ftp.storbinary(f'STOR {filename}', BytesIO())", "title": "" } ]
[ { "docid": "0105c4a95500b0b8ebc9f23dc501ec83", "score": "0.8275744", "text": "def make_empty_file(file_path: str):\r\n open(file_path, 'a').close()", "title": "" }, { "docid": "3f6c989939edb8c91e3aca2899ded9e8", "score": "0.7651341", "text": "def create_file(path):\n with open(path, \"w\") as test_file:\n test_file.write(\"test\")", "title": "" }, { "docid": "3f6c989939edb8c91e3aca2899ded9e8", "score": "0.7651341", "text": "def create_file(path):\n with open(path, \"w\") as test_file:\n test_file.write(\"test\")", "title": "" }, { "docid": "bfad4f84ac0ab7a2fcc42108e0b85c63", "score": "0.763044", "text": "def save_empty_file(path: str, file_name: str) -> str:\n\n file_path = os.path.join(path, file_name)\n open(file_path, \"a\").close()\n\n return file_path", "title": "" }, { "docid": "5d78bdb9e5195d6454f51bb75525134e", "score": "0.75787836", "text": "def make_blank_file(file_name, file_path):\n\n # if there isn't a file...\n if not os.path.exists(file_path + file_name):\n\n # put in the damn try statement to avoid race conditions and locking\n try:\n open(file_path + file_name, 'a').close()\n except OSError:\n raise Exception('There was an OSError when trying to make ' + location)", "title": "" }, { "docid": "87c2f3d15afde8fd759f3a9264b4daa5", "score": "0.7318868", "text": "def create(self):\n if not os.path.exists(self.path):\n with open(self.path, 'w') as fileobj:\n fileobj.write('')", "title": "" }, { "docid": "8bb4cc93bffb669c360a5b376142ac26", "score": "0.72233486", "text": "def file_generate(path, content):\n if not os.path.exists(os.path.dirname(path)):\n os.makedirs(os.path.dirname(path))\n with open(path, 'w') as target:\n target.write(content)", "title": "" }, { "docid": "99738a0eced9d9dbad365d73c42d5781", "score": "0.70984614", "text": "def write_file(\n path: Union[str, Path], content: Union[str, bytes], mode: str = 'w'\n):\n os.makedirs(os.path.dirname(path), exist_ok=True)\n with open(path, mode) as f:\n f.write(content)", "title": "" }, { "docid": "a8e33c9db672b4932fe618e386c8ba72", "score": "0.68925726", "text": "def _generate_file(file: Path, content: str) -> None:\n file.parent.mkdir(parents=True, exist_ok=True)\n with file.open(\"w+\", encoding=\"utf-8\") as file:\n file.write(content.strip() + \"\\n\")", "title": "" }, { "docid": "d6c8c390a7d9605d4abdd2acb3b75eae", "score": "0.6794918", "text": "def create(self):\n if os.path.isfile(self.path):\n if not os.path.exists(self.path):\n with open(self.path, 'w') as fileobj:\n fileobj.write('')\n else:\n os.makedirs(self.path)", "title": "" }, { "docid": "f8434c98f1fc44cdc2d1c4074f59d001", "score": "0.679289", "text": "def CreatePathFile(self, path_file):\n if not os.path.exists(os.path.dirname(path_file)):\n try:\n os.makedirs(os.path.dirname(path_file))\n except OSError as exc:\n if exc.errno != errno.EEXIST:\n raise", "title": "" }, { "docid": "9e25be24c7145c6c8e16fe3249fade9f", "score": "0.67280054", "text": "def createOutputFile(path):\n\n try:\n output_file = open(path, 'x') # create output file\n except OSError: # as error: # if already exists, remove and overwrite\n try:\n os.remove(path)\n except PermissionError:\n ctypes.windll.user32.MessageBoxW(0, f\"Close output file {path}!\\nCan't write data if file is open.\", \"Warning\", 0 | 0x30)\n # print(f\"\\033[1;31;40m Close {path}!\")\n quit()\n else:\n output_file.close()", "title": "" }, { "docid": "d568879dc7ab27f28748ad9da451cd07", "score": "0.67137647", "text": "def file_ensure_exists(path):\n if (not os.path.lexists(path)):\n fp = open_(path, \"w\")\n fp.close()", "title": "" }, { "docid": "ecd870a22ac065576adba151e5b144db", "score": "0.67112863", "text": "def create(self, path, mode):\n raise fuse.FuseOSError(errno.EROFS)", "title": "" }, { "docid": "de1befffdd66c38bb0661313c6d92c86", "score": "0.6664996", "text": "def make_file(path, contents='', overwrite=False):\n if overwrite or not os.path.exists(path):\n with open(path, 'w') as fh:\n fh.write(contents)\n return True\n\n return False", "title": "" }, { "docid": "8a051028e381a0f6d48c6db238946a09", "score": "0.6636268", "text": "def write_dummy_file(path_to_output_file):\n with open(path_to_output_file, 'w') as inF:\n inF.write(\"\")", "title": "" }, { "docid": "5d9955a24be85584899d60663343d710", "score": "0.6622351", "text": "def createFile(input, path):\n try:\n filename = input['filename']\n if not os.path.exists(filename):\n if not input['file-content']:\n with open(os.path.join(path, filename), 'w') as fp:\n pass\n else:\n content = input['file-content']\n with open(os.path.join(path, filename), 'w') as fp:\n fp.write(content)\n return \"File created successfully\", 200\n except FileExistsError:\n raise FileAlreadyExistsError\n except Exception:\n raise InvalidPathError", "title": "" }, { "docid": "ebf83e7dbe28b514e9d88939b27e62de", "score": "0.6606387", "text": "def create_file(path, content, name, force=False, mode=0o664, quiet=False):\n say = log.debug if quiet else echo\n action = 'created'\n if os.path.exists(path):\n if force:\n action = 'overwritten'\n else:\n raise GeneralError(\"File '{}' already exists.\".format(path))\n try:\n with open(path, 'w') as file_:\n file_.write(content)\n say(\"{} '{}' {}.\".format(name.capitalize(), path, action))\n except OSError as error:\n raise GeneralError(\"Failed to create {} '{}' ({})\".format(\n name, path, error))", "title": "" }, { "docid": "2e6c67b9f0d3873f78dbdb4fa7376c41", "score": "0.65368617", "text": "def ensure_file(path):\n ensure_directory_containing(path)\n open(path, 'a+').close() # touch the file", "title": "" }, { "docid": "2c2bcfa0b8fded0584d0c6954118142e", "score": "0.6445845", "text": "def create_file(path, contents, writemode=\"w\", perms=0o600):\n with open(path, writemode) as f:\n f.write(contents)\n os.chmod(path, perms)\n logging.info(f\"Created file {path} with permissions {oct(perms)}\")", "title": "" }, { "docid": "a34b1723dd24980daade32927abcc349", "score": "0.6429738", "text": "def create_file(self, path, opt=None):\n\n url = self._paths_url(path, 'create-file')\n return self._post(url, opt).json()", "title": "" }, { "docid": "c1283fd49e8419836552700529a538e5", "score": "0.6415089", "text": "def create(self, path, mode, fi=None):\n full_path = self._full_path(path)\n return os.open(full_path, os.O_WRONLY | os.O_CREAT, mode)", "title": "" }, { "docid": "c5cff58750e8dc126fcc3f7b045b6461", "score": "0.6385919", "text": "def _touch(path, overwrite=False):\n if overwrite or not os.path.exists(path):\n with open(path, 'w') as fd:\n fd.write('')", "title": "" }, { "docid": "fc971ba49cd8d71aec246a7e295d848f", "score": "0.6359612", "text": "def make_dummy_file(fn):\n import time\n mkdir_p(os.path.dirname(fn))\n ff=open(fn,'w')\n ff.write(\"DummyFile with Proper time stamp\")\n time.sleep(1) # 1 second\n ff.close()", "title": "" }, { "docid": "c8a95719a06d98da3dd943e1d615eb5a", "score": "0.63469476", "text": "def cleanFile(path):\n file=open(path,'w')\n file.write(\"\")\n file.close", "title": "" }, { "docid": "c3a636442e8e8a553c17588e909a1152", "score": "0.6333022", "text": "def mk_file(filename):\n if not os.path.exists(filename):\n open(filename, 'w').close() # noqa: WPS515", "title": "" }, { "docid": "5b065fc0a42a626b0b9a582c4b778d50", "score": "0.6330847", "text": "def create_file(self, path: str)-> bool:\n try:\n open(path, mode=\"x\")\n return True\n except FileExistsError:\n print(\"The file is created\")\n return False", "title": "" }, { "docid": "5d99834d14b5572fbb3c3763f48e342c", "score": "0.63076454", "text": "def _mkfile(f, message=None):\n assert not os.path.exists(f), \"File already exists: {}\".format(f)\n with open(f, 'w'):\n if message:\n print(\"{}: {}\".format(message, f))\n return f", "title": "" }, { "docid": "60461164f87d69232c7d72b6d4137df2", "score": "0.6296771", "text": "def touch(path):\n open(path, 'a').close()", "title": "" }, { "docid": "33d28b785177cf0bcbec56e83a93e6f3", "score": "0.6264807", "text": "def write_file(\n self, path: Union[Path, str], contents: bytes, mode: int = 0o644\n ) -> None:\n file_path = self.checkout_path / path\n file_path.parent.mkdir(parents=True, exist_ok=True)\n with file_path.open(\"wb\") as f:\n os.fchmod(f.fileno(), mode)\n f.write(contents)", "title": "" }, { "docid": "3f397d37cb4ada84a9eb898ef717a4e6", "score": "0.6233215", "text": "def create_file(file_name, size):\n with open(file_name, 'wb') as f:\n if size:\n f.seek(size - 1)\n f.write(b'\\x00')", "title": "" }, { "docid": "6f30aa65636e74171b9b7779796c2178", "score": "0.623254", "text": "def create_null_file(mode, closed):\n f = open(os.devnull, mode)\n if closed:\n f.close()\n return f", "title": "" }, { "docid": "316fd15ec68463c2f56db4c7be4be418", "score": "0.6222454", "text": "def _create_path_ignore_existing(self, path):\n if not os.path.exists(path):\n try:\n os.makedirs(path)\n except OSError as e:\n # File exists (17) is okay\n if e.errno != 17:\n raise\n self._fix_permission(path)", "title": "" }, { "docid": "99e3fe08cc874f0c2895bc8c5c51123f", "score": "0.62214166", "text": "def create_tmpfile(self) -> str:\n tmp_path = self.generate_tracking_path()\n Path(tmp_path).parent.absolute().mkdir(parents=True, exist_ok=True)\n\n # Create an empty file\n with open(tmp_path, 'w') as fp:\n pass\n\n return tmp_path", "title": "" }, { "docid": "bec4101f593b91979981958b3d936356", "score": "0.6190332", "text": "def create(self, name, content = None):\r\n\r\n path = self.get_path(name)\r\n\r\n with io.open(path, 'wb') as f:\r\n if content:\r\n if isinstance(content, bytes):\r\n f.writelines(content)\r\n else:\r\n for b in content:\r\n f.write(b)\r\n\r\n return path", "title": "" }, { "docid": "86b7a9e854cb5e84d2456ce9fea4a1e0", "score": "0.6181492", "text": "def __file_writer(path, file_name, context):\n if not os.path.exists(path):\n os.makedirs(path)\n with open(path+'/'+file_name, 'a') as opener:\n opener.write(context)", "title": "" }, { "docid": "4bcee3f5ffcb0f88c8047149c286406c", "score": "0.61759245", "text": "def createPathLocally(self, path):\n os.makedirs(os.path.dirname(path), mode = 0o777, exist_ok = True)", "title": "" }, { "docid": "91b4771345fb6866e726be2494e419d9", "score": "0.616449", "text": "def newfile(path):\n if (type(path) != str):\n raise TypeError(\"Expected 'path' to be string\")\n \n print(\"Attempting to open file: \" + str(path))\n try:\n file = open(path, \"x+b\")\n file.close()\n except Exception as i:\n print(\"ERROR: Could not open file to write to: \" + str(i))\n return -1\n\n openfile(path)\n goto(0)\n return 0", "title": "" }, { "docid": "29aa2a4af37d2d3afb63950833963486", "score": "0.6153647", "text": "def create_path(file_path: Text) -> None:\n\n parent_dir = os.path.dirname(os.path.abspath(file_path))\n if not os.path.exists(parent_dir):\n os.makedirs(parent_dir)", "title": "" }, { "docid": "eba8fb964dfae15804a1c2c9e7e0108c", "score": "0.6135784", "text": "def write_file(file_path,content=None,mode='w'):\n with open(file_path,mode) as f:\n f.write(content)", "title": "" }, { "docid": "fad33dcb345e6f391af5c3413ea446fa", "score": "0.6114738", "text": "def _create(self, path):\r\n if self._create_file_operation:\r\n path = os.path.join(self._base_path, path)\r\n folder = os.path.split(path)[0]\r\n if not os.path.exists(folder):\r\n os.makedirs(folder)\r\n ninjaide = IDE.get_service('ide')\r\n current_nfile = ninjaide.get_or_create_nfile(path)\r\n current_nfile.create()\r\n main_container = IDE.get_service('main_container')\r\n if main_container:\r\n main_container.open_file(path)\r\n else:\r\n if not os.path.exists(path):\r\n file_manager.create_folder(path)\r\n self.hide()", "title": "" }, { "docid": "dbff328f21f7e893c89c0c60e39d961f", "score": "0.60797495", "text": "def ensure_path(path: str):\n if path != '' and not os.path.exists(path):\n os.mkdir(path)", "title": "" }, { "docid": "ba685ad4a5d42895d6b127950951263f", "score": "0.6054938", "text": "def _ensure_file_path(self):\n storage_root = os.path.dirname(self.file_path)\n needs_storage_root = storage_root and not os.path.isdir(storage_root)\n if needs_storage_root: # pragma: no cover\n os.makedirs(storage_root)\n if not os.path.isfile(self.file_path):\n # create the file without group/world permissions\n with open(self.file_path, 'w'):\n pass\n user_read_write = 0o600\n os.chmod(self.file_path, user_read_write)", "title": "" }, { "docid": "aa902aaefff163026b1c299b680d76cf", "score": "0.6053032", "text": "def gen_path(path):\n\n if not os.path.exists(path): # pragma: no cover\n os.makedirs(path)", "title": "" }, { "docid": "89f3baad36ffa8bde0a2ebe6bbffc784", "score": "0.6046188", "text": "def __init__(self, path, mode, overwrite=False):\n self.path = path\n if mode == 'r':\n if not os.path.exists(path):\n raise FileNotFoundError('{} not found.'.format(path))\n elif mode == 'w':\n path_directory = os.path.dirname(path)\n if path_directory and not os.path.exists(path_directory):\n raise FileNotFoundError('Output file path does not exist! --> {}'.format(path_directory))\n if os.path.exists(path):\n if not overwrite:\n # Raise most specific subclass of FileExistsError (3.6) and IOError (2.7).\n raise FileExistsError('Output file already exists! --> {}'.format(path))\n else:\n os.remove(path)", "title": "" }, { "docid": "e363f8ad98000a748d19160bcf1fbda6", "score": "0.6036646", "text": "def write_file(wt, path, text):\n f = pathlib.Path(os.path.join(wt, path))\n os.makedirs(f.parent, exist_ok=True)\n with open(os.path.join(wt, path), 'w') as file:\n file.write(text)\n return f", "title": "" }, { "docid": "8c31e8c919102fbd2305c583d12c4ead", "score": "0.601689", "text": "def safe_open_w(path):\n mkdir_p(os.path.dirname(path))\n return open(path, 'w')", "title": "" }, { "docid": "8c31e8c919102fbd2305c583d12c4ead", "score": "0.601689", "text": "def safe_open_w(path):\n mkdir_p(os.path.dirname(path))\n return open(path, 'w')", "title": "" }, { "docid": "57b6dad4e295c67565a4a0c195d54656", "score": "0.6013557", "text": "def touch(path: str):\n basedir = os.path.dirname(path)\n if not os.path.exists(basedir):\n os.makedirs(basedir)\n with open(path, 'a'):\n os.utime(path, None)", "title": "" }, { "docid": "d4cb5cb668c69dd12d3db6f11454fba2", "score": "0.6011867", "text": "def create_path(path):\n dir = os.path.dirname(path)\n if not os.path.exists(dir):\n os.makedirs(dir)", "title": "" }, { "docid": "7f461a6696c6ca957e2aaa3429f18130", "score": "0.59772563", "text": "def write_file(self, path, content):\n tmp_fp, tmp_filename = tempfile.mkstemp()\n os.write(tmp_fp, content)\n os.close(tmp_fp)\n self.move(tmp_filename, path)", "title": "" }, { "docid": "a41d3e26d6393337bfbcc0e063068025", "score": "0.5976787", "text": "def create_path(path):\n if not os.path.isdir(path):\n os.makedirs(path, exist_ok=True)", "title": "" }, { "docid": "fb74de23fe25a1d2e552f83d76adcf50", "score": "0.5972546", "text": "def _ensure_file_exists(self):\n\n Path(self._file_path).touch()", "title": "" }, { "docid": "7678c4e8867b0d05a380958b311f9d4e", "score": "0.5961828", "text": "def create_file(self, path, perms=None, handler=None):\n self.java_obj.createFile(path, perms, AsyncHandler(handler))\n return self", "title": "" }, { "docid": "35d6e16ebb0a500a9a9e9f5346777abb", "score": "0.5955663", "text": "def eraseFileContents(path: str = \"\"):\n if path == \"\": raise Exception(\"It looks like you forgot to give me the path\")\n open(path, \"w\").close()", "title": "" }, { "docid": "1ceb8d869cb94aad4be827c76505789c", "score": "0.59308577", "text": "def t_createfile(self, filepath, uesec_ctime=None):", "title": "" }, { "docid": "171df7ba6920fcb124f9ded4f8a5b6c6", "score": "0.59275395", "text": "def creat_excl(path, mode=0o644):\n fd = os.open(path, os.O_WRONLY | os.O_CREAT | os.O_EXCL, mode)\n return os.fdopen(fd, 'w')", "title": "" }, { "docid": "b8ef53d1cdccdb1af04fa86311ca4226", "score": "0.5918701", "text": "def create(self, name,flags,mode,umask, ctx=None):\n\t\traise IOError(errno.EROFS, \"File.create is not implemented\")", "title": "" }, { "docid": "115562ecf329a59e95dd46b004e676a4", "score": "0.5909201", "text": "def create_modified_file(self):\n file_name = os.path.join(self.dir, str(uuid.uuid4()))\n # create the file\n with open(file_name, \"wb\") as file_handler:\n file_handler.write(b\"\\0\")\n\n st = os.stat(file_name)\n access_time = st[ST_ATIME]\n modified_time = st[ST_MTIME]\n\n os.utime(file_name, (access_time, modified_time + (4 * 3600)))", "title": "" }, { "docid": "d02eefd989ddc8c43bde035a376de795", "score": "0.588853", "text": "def new_file(name):\n if os.path.exists(name): os.unlink(name)\n return file(name, 'wt')", "title": "" }, { "docid": "ef54d11f22a7c875f806fdd5046e9aa3", "score": "0.5880125", "text": "def mkfile(path, url):\n # Get a connection to ACR.\n proxy = CURRENT_ACR\n \n # Invoke the method.\n try:\n proxy.astrogrid.myspace.createFile(path)\n proxy.astrogrid.myspace.copyURLToContent(url, path)\n except:\n raise(MethodInvocationError())\n return", "title": "" }, { "docid": "2fa5ea25396be175ad2a994b10eefddf", "score": "0.5866333", "text": "def _ProduceFile(self, path):\n f = open(path, \"wb+\")\n desired_size = random.randint(self.min_file_size, self.max_file_size)\n if desired_size > 0:\n # generate pseudo-random bytes and write to file\n # (while trying to keep python 2/3 compatibility)\n if PY39: # if python > 3.9, getrandbytes is available and the best choice\n f.write(random.randbytes(desired_size))\n elif PY3: # if python 3, we can use the implementation of randbytes\n f.write(random.getrandbits(desired_size * 8).to_bytes(desired_size, 'little'))\n elif PY2: # if python 2, need to use an alternative for to_bytes\n f.write(hex(random.getrandbits(desired_size * 8)))\n f.close()\n self.files_produced += 1\n self.bytes_produced += desired_size", "title": "" }, { "docid": "7a7cda96de4035335b6fcfa9d3c652fd", "score": "0.58517593", "text": "def touch(path):\r\n\r\n with file(path, 'a'):\r\n os.utime(path, None)", "title": "" }, { "docid": "5e4959cd599401c1192cc7a79c506402", "score": "0.5850014", "text": "def save(self, file_path: str) -> None:\n Path(file_path).parent.mkdir(parents=True, exist_ok=True)\n export_to(file_path)", "title": "" }, { "docid": "5da92f2ba9bd372514eee061264bce98", "score": "0.58111423", "text": "def touch(self, path, truncate=True, **kwargs):\n if truncate or not self.exists(path):\n with self.open(path, \"wb\", **kwargs):\n pass\n else:\n raise NotImplementedError # update timestamp, if possible", "title": "" }, { "docid": "26d23b73466789ebecdcb40cfa494fb6", "score": "0.5807744", "text": "def create_file(self, name):\n\n return self._sysroot.create_file(name, component=self)", "title": "" }, { "docid": "e67b6917eaf0572cf92bf30ad2f484f7", "score": "0.5801281", "text": "def _ensure_empty_dir(self, path):\n self._executor.ensure_dir_exists(path, ensure_empty=True)", "title": "" }, { "docid": "f4b2c921a3835d995b81b9aebd9791cb", "score": "0.58006984", "text": "def write_file(path, content):\n try:\n _file = open(path, \"w+\")\n _file.write(content)\n _file.close()\n except EnvironmentError as e:\n print(\"File operation failed\\nError: %s\" % e)\n return \"\", -1\n return path, 0", "title": "" }, { "docid": "72684619daf9960d261fc066f53be806", "score": "0.5798606", "text": "def make_file(self, feed, path):\n data = self.create_book(feed)\n file = \"\".join(str(\"book of news\"))\n file_path = os.path.join(path, f\"{file}.epub\")\n epub.write_epub(f\"{file_path}\", data, {})\n if not os.path.exists(file_path):\n logger.error(\"Bad path\")", "title": "" }, { "docid": "c533b44d5c800a9e8039f28646ab361a", "score": "0.57896715", "text": "def test_make_scratch_file_1 (self):\n\t\t## Preparations:\n\t\tnew_file = 'foo.txt'\n\t\t## Main:\n\t\tf = scratchfile.make_scratch_file (new_file)\n\t\tassert (f.startswith (tempfile.tempdir))\n\t\tassert (f.endswith ('foo.txt'))\n\t\tassert (not os.path.exists (f))\n\t\toutfile = open (f, 'w')\n\t\toutfile.write (\"bar\")\n\t\toutfile.close()\n\t\tassert (os.path.exists (f))\n\t\tos.remove (f)", "title": "" }, { "docid": "e2d5ea66acb3e3d937aa92355f99532d", "score": "0.5787441", "text": "def mkdir_p(path):\n if not os.path.exists(path):\n os.makedirs(path)", "title": "" }, { "docid": "adeda7724f4ea5194ac847408653572a", "score": "0.57844174", "text": "def mkdir(path):\n if not os.path.exists(path):\n os.mkdir(path)", "title": "" }, { "docid": "ae31dbb6afb9fc94e743c6fd1d1008a2", "score": "0.57814527", "text": "def write_file(path, data):\n with open(path, 'w') as f:\n f.write(data)", "title": "" }, { "docid": "8aa864d955b2137fe26eb0b10a2b5a53", "score": "0.57689935", "text": "def create(self, header=None, clobber=False):\n if not clobber and self.exists():\n raise ValueError(\"File already exists, user clobber=True to overwrite\")\n\n self.directory.build()\n header = self.header if header is None else header\n if isinstance(header, basestring):\n with self.open(\"w\") as f:\n f.write(header)\n elif hasattr(header, \"__call__\"):\n with self.open(\"w\") as f:\n f.write(header()) \n elif header is not None:\n raise ValueError(\"header must be a string or a callable method got a %s object\"%type(header))\n else: \n with self.open(\"w\") as f:\n f.write(\"\")", "title": "" }, { "docid": "e84e36c458742fef9906e69c8fc5085e", "score": "0.5768141", "text": "def new(cls, path, force):\n\n if folder_exists(path) and path != '.':\n\n err_msg = f\"\"\"\n [Error]: Could not create directory.\n Path ({os.path.abspath(path)}) Already Exists.\n Please make sure the directory is empty or use --force\n to overwrite the files.\n \"\"\"\n\n if force:\n print(f'Overwriting content inside {path}')\n clear_directory(path)\n print('Done !')\n return cls._generate_boilerplate(path)\n\n print_error_and_exit(err_msg)\n\n else:\n cls._generate_boilerplate(path)", "title": "" }, { "docid": "a04fa533123c3aaedf58b65935dd5f48", "score": "0.57657677", "text": "def prepare_dir(path, empty=False):\n if not os.path.exists(path):\n create_dir(path)", "title": "" }, { "docid": "8bcac5f8d51cd43677789ba57ea8ae30", "score": "0.5765557", "text": "def touch(path):\n with open(path, 'a'):\n os.utime(path, None)", "title": "" }, { "docid": "49738880b27c2112695b220770f78980", "score": "0.57558644", "text": "def createPath(self, path):\n if os.path.abspath('.') != os.path.abspath(path):\n try:\n os.makedirs(path)\n except OSError:\n print \"Error: Path already exists.\"\n self._handleCollision(path)", "title": "" }, { "docid": "29282119b760bfd177ba88dd0efdd2f2", "score": "0.5747097", "text": "def wipe(path):\n file = open(path, 'w')\n file.close()", "title": "" }, { "docid": "a67ca3cf5b92801e16faac454d8efe5a", "score": "0.574565", "text": "def touch(path):\n\n with open(path, 'a'):\n os.utime(path, None)", "title": "" }, { "docid": "42bdf3c1b53e48c5cd3b124bc95b25e6", "score": "0.57439554", "text": "def open(path):\n if Writer.f is None:\n Writer.f = open(path, 'w')", "title": "" }, { "docid": "fdced5b38233330237bd78dd4c174103", "score": "0.57418644", "text": "def generateEmptyFile(self):\n return self.__generate_empty_file", "title": "" }, { "docid": "dad62ce4de09fd0664aaddc94991a752", "score": "0.57410675", "text": "def mkdir(self, path):", "title": "" }, { "docid": "dad62ce4de09fd0664aaddc94991a752", "score": "0.57410675", "text": "def mkdir(self, path):", "title": "" }, { "docid": "35d698fb9c1200d4dadacba117482592", "score": "0.5739132", "text": "def open_create(filename):\n fd = os.open(filename, os.O_RDWR | os.O_CREAT | os.O_EXCL)\n fp = os.fdopen(fd, 'wb')\n return fp", "title": "" }, { "docid": "2910630a97b5f5dd0fc8c17a023bc4ea", "score": "0.57374585", "text": "def create_sample_text_file(self):\n path = os.path.join(self.temp_dir, self.sample_name)\n with open(path, 'w') as f:\n f.write(\"sample data\")\n return path", "title": "" }, { "docid": "bf08e5d441dd6430e901a781b64cc532", "score": "0.57317907", "text": "def create(self, path, mode):\n return self.mknod(path, mode, 0)", "title": "" }, { "docid": "179550c15764e98d2a7fd985568a11dd", "score": "0.5729543", "text": "def create_files_dir(path):\n if not os.path.isdir(path):\n os.mkdir(path)", "title": "" }, { "docid": "20678bb9198aae9bb3dffe286ee481d4", "score": "0.57234", "text": "def make_dir(path: Path\n ) -> None:\n if not os.path.exists(path):\n os.mkdir(path)", "title": "" }, { "docid": "24b47cc05c0e5796122729f2759d256a", "score": "0.5719493", "text": "def create_simple_file(file_name):\n with open(file_name, 'w') as text_file:\n # Encode some output data so it can serve double duty\n text_file.write(\"1122,0\\n\")\n text_file.write(\"945,1\\n\")\n text_file.write(\"created by automated software for testing\\n\")", "title": "" }, { "docid": "865c8b9235ab2350078e5529fbf209aa", "score": "0.57179636", "text": "def create_dir(path):\n if not os.path.exists(path):\n os.makedirs(path)", "title": "" }, { "docid": "db2c15f573db37f8168c630e60a3265c", "score": "0.5710188", "text": "def touch(self, mode: int = 0o666, exist_ok: bool = True) -> None:\n del mode # Unused\n if self.exists():\n if exist_ok:\n return\n else:\n raise FileExistsError(f'{self} already exists.')\n self.write_text('')", "title": "" }, { "docid": "c7d375673ebec0043f0349b3da496167", "score": "0.5706608", "text": "def open_output_file(name, mode=\"w\"):\n if os.path.dirname(name) != \"\" and \\\n not os.path.exists(os.path.dirname(name)):\n os.makedirs(os.path.dirname(name))\n return open(name, mode)", "title": "" }, { "docid": "abaa4358268c8fda781db9453510cd82", "score": "0.56926244", "text": "def touch(self, path):\n with open(path, mode='a', encoding='utf-8'):\n os.utime(path, None)", "title": "" }, { "docid": "e99c6826907a34a8374a857b829cf27d", "score": "0.5683868", "text": "def create_file_if_missing(filename, content):\n if exists(filename):\n return False\n with open(filename, 'w') as f:\n f.write(content)\n return True", "title": "" }, { "docid": "c11e108265a28686724d851a943bdf87", "score": "0.5676414", "text": "def mkdir_p(path): # pragma: no cover\n import os\n\n os.makedirs(path, exist_ok=True)", "title": "" }, { "docid": "6cdea7c44e03fcf6c857a848f84b9327", "score": "0.5654827", "text": "def create_named_file(self):\n file_name = os.path.join(self.dir, 'named_file.jpg')\n with open(file_name, \"wb\") as _:\n pass", "title": "" }, { "docid": "85829bb3107ac4a4a7b48437ad73c7bf", "score": "0.565221", "text": "def setGenerateEmptyFile(self):\n self.__generate_empty_file = True", "title": "" }, { "docid": "8cd18c3023ce603a08f355139e8b8568", "score": "0.56513876", "text": "def write_file(\n file_path: Union[Text, Path],\n content: Any,\n encoding: Text = DEFAULT_ENCODING,\n mode: Text = \"w\",\n) -> None:\n create_path(file_path)\n\n with open(file_path, mode, encoding=encoding if \"b\" not in mode else None) as file:\n file.write(content)", "title": "" }, { "docid": "277296558a28461b5b67eb22951347e4", "score": "0.56391", "text": "def touch(fp):\n fh = open(fp,'a')\n fh.close()", "title": "" } ]
5c16a482f7a757991ddf0cdaf4bb15a4
Sets the specified host's ability to accept new instances.
[ { "docid": "6b19995ff5d8a1e937b499be3b6fa6b8", "score": "0.57665277", "text": "def set_host_enabled(self, host, enabled):\n raise NotImplementedError()", "title": "" } ]
[ { "docid": "4beedd366207c71c155936dc1228c681", "score": "0.5856326", "text": "def addBan(self, hostmask, expiration=0):\r\n assert not conf.supybot.protocols.irc.strictRfc() or \\\r\n ircutils.isUserHostmask(hostmask), 'got %s' % hostmask\r\n self.bans[hostmask] = int(expiration)", "title": "" }, { "docid": "7d37e16886efd12d3e8824e70fe9369e", "score": "0.5850635", "text": "def do_guests(self):\n self._conf.ALLOW_GUESTS = not self._conf.ALLOW_GUESTS\n self._responder('Allow Guests: %s' % self._conf.ALLOW_GUESTS)", "title": "" }, { "docid": "0d1b1626dc3c2ba0ebeea10746d891c2", "score": "0.58361995", "text": "def set_host(new_host):\n\n gazu.set_host(new_host)", "title": "" }, { "docid": "025d71a99125d05f7a1f9a0bf5bf42e0", "score": "0.5642275", "text": "def add_host(self, host):\r\n self.host_list.append(host)\r\n self.check_host(host)", "title": "" }, { "docid": "518ef943a7043aaabb4768aebf885022", "score": "0.55530375", "text": "def test_vps_access():\n rules.assertPermits('5.5.5.5', adminhost, 'ssh')", "title": "" }, { "docid": "fdc62f85230668e6db4350e6a1ab5f09", "score": "0.55167353", "text": "def allow_host_ports(self) -> bool:\n return self.__allow_host_ports", "title": "" }, { "docid": "2ffbda84e0b1e8c467ae07e2f72bb237", "score": "0.5504246", "text": "def set_host(self, a_host):\n self.host = a_host", "title": "" }, { "docid": "2571493ab8cf5299f4a0f47aa1f0d82c", "score": "0.5460188", "text": "def set_privileges(self):\n self.privileges = ['aaaaa']", "title": "" }, { "docid": "bab4083473c434ceee69821671ccfec7", "score": "0.5380136", "text": "def put_instance_public_ports(portInfos=None, instanceName=None):\n pass", "title": "" }, { "docid": "b4984db3ce0abe34b19a1bed916069a5", "score": "0.53762037", "text": "def set_permission(self, username, virtual_host, configure_regex='.*',\n write_regex='.*', read_regex='.*'):\n virtual_host = quote(virtual_host, '')\n permission_payload = json.dumps({\n \"configure\": configure_regex,\n \"read\": read_regex,\n \"write\": write_regex\n })\n return self.http_client.put(API_USER_VIRTUAL_HOST_PERMISSIONS %\n (\n virtual_host,\n username\n ),\n payload=permission_payload)", "title": "" }, { "docid": "5c61ff4b4cbe2c88ab4b5fd403bbabe5", "score": "0.5294786", "text": "def add_host(self, host):\n self.test_hosts[host.name] = host", "title": "" }, { "docid": "935065bd1ea0410fa567c5d3f97ea066", "score": "0.5284111", "text": "def test_admin_access():\n rules.assertPermits(adminhost, workers + puppetmaster, 'ssh')", "title": "" }, { "docid": "24eb204210532ed0608105df60c0e3f4", "score": "0.5278607", "text": "def add_host(self, hostname):\n self._hostnames[hostname] = 1", "title": "" }, { "docid": "4600a4a39a9ed977a4b2433ba9cc1f0e", "score": "0.525653", "text": "def set_host(self, mc3_host):\n self._host = mc3_host", "title": "" }, { "docid": "fd61609ab8885ecad7e5cf51fd364182", "score": "0.5249113", "text": "def modify_hosts(AutoPlacement=None, HostIds=None, HostRecovery=None, InstanceType=None, InstanceFamily=None):\n pass", "title": "" }, { "docid": "7979013b77e7773d9bc6ffdc0d0f2199", "score": "0.5243496", "text": "def host(self, host):\n\n self._host = host", "title": "" }, { "docid": "7979013b77e7773d9bc6ffdc0d0f2199", "score": "0.5243496", "text": "def host(self, host):\n\n self._host = host", "title": "" }, { "docid": "7979013b77e7773d9bc6ffdc0d0f2199", "score": "0.5243496", "text": "def host(self, host):\n\n self._host = host", "title": "" }, { "docid": "7979013b77e7773d9bc6ffdc0d0f2199", "score": "0.5243496", "text": "def host(self, host):\n\n self._host = host", "title": "" }, { "docid": "3608643ed0efb0f0256ee6addb5f4dd3", "score": "0.52237034", "text": "def allow_access(self, server, share_name, access_type, access_level,\n access_to):\n if access_type != 'ip':\n reason = _('Only ip access type allowed.')\n raise exception.InvalidShareAccess(reason=reason)\n if access_level != const.ACCESS_LEVEL_RW:\n raise exception.InvalidShareAccessLevel(level=access_level)\n\n hosts = self._get_allow_hosts(server, share_name)\n if access_to in hosts:\n raise exception.ShareAccessExists(\n access_type=access_type, access=access_to)\n hosts.append(access_to)\n self._set_allow_hosts(server, hosts, share_name)", "title": "" }, { "docid": "2507800808d61436d88a0594bceff905", "score": "0.52205443", "text": "def set_permission(host, user, conf, write, read):\n rabbitmq_ctl(\"set_permissions\", \"-p\", host, user, conf, write, read)", "title": "" }, { "docid": "a11524a9ad963c50a87ad238c55a9581", "score": "0.5209604", "text": "def set_host_enabled(self, enabled):\n raise NotImplementedError()", "title": "" }, { "docid": "a4fa1be4c8a63a7d207c9c815dc2ddb7", "score": "0.5182553", "text": "def host(self, value: HostManager):\n if self._host:\n raise RuntimeError(\"HostManager already set!\")\n self._host = value", "title": "" }, { "docid": "38d372e81516ce3c2450ecded02c7f75", "score": "0.5141687", "text": "def allow_access(self, local_path, share_name, access_type, access):\n if access_type != 'ip':\n reason = 'only ip access type allowed'\n raise exception.InvalidShareAccess(reason)\n parser = ConfigParser.ConfigParser()\n parser.read(self.config)\n\n hosts = parser.get(share_name, 'hosts allow')\n if access in hosts.split():\n raise exception.ShareAccessExists(access_type=access_type,\n access=access)\n hosts += ' %s' % (access,)\n parser.set(share_name, 'hosts allow', hosts)\n self._update_config(parser)", "title": "" }, { "docid": "c7be01a46bc8e130735720a774a39457", "score": "0.5133434", "text": "def set_host(host_name, set_password=\"True\"):\n env.hosts = [host_format_full(host_name)]\n if bool(distutils.util.strtobool(set_password)):\n set_env_passwords(decrypt_get_passwords())", "title": "" }, { "docid": "964a18e57c752e9f50999a692b484b62", "score": "0.512186", "text": "def do_guest_nicks(self):\n self._conf.ALLOW_GUESTS_NICKS = not self._conf.ALLOW_GUESTS_NICKS\n self._responder('Allow Guest Nicks: %s' %\n self._conf.ALLOW_GUESTS_NICKS)", "title": "" }, { "docid": "840aab574455947a8f46ad844935aadc", "score": "0.50619423", "text": "def allow_dedicated(self, allow_dedicated):\n\n self._allow_dedicated = allow_dedicated", "title": "" }, { "docid": "6e9e501885c420df6d523077816cd8f0", "score": "0.5057353", "text": "def allow_access(self, local_path, share_name, access_type, access):\n if access_type != 'ip':\n reason = _('only ip access type allowed')\n raise exception.InvalidShareAccess(reason=reason)\n\n hosts = self._get_allow_hosts(share_name)\n if access in hosts:\n raise exception.ShareAccessExists(access_type=access_type,\n access=access)\n hosts.append(access)\n self._set_allow_hosts(hosts, share_name)", "title": "" }, { "docid": "714e3196e6f208b973f7bb56afa9a220", "score": "0.50569904", "text": "def add_host_server(self, hackathon, args):\n\n host_server = DockerHostServer(vm_name=args.vm_name,\n public_dns=args.public_dns,\n public_ip=args.public_ip,\n public_docker_api_port=args.public_docker_api_port,\n private_ip=args.private_ip,\n private_docker_api_port=args.private_docker_api_port,\n container_count=0,\n container_max_count=args.container_max_count,\n is_auto=False,\n disabled=args.get(\"disabled\", False),\n hackathon=hackathon)\n\n if self.docker.ping(host_server):\n host_server.state = DockerHostServerStatus.DOCKER_READY\n else:\n host_server.state = DockerHostServerStatus.UNAVAILABLE\n\n host_server.save()\n return host_server.dic()", "title": "" }, { "docid": "714e3196e6f208b973f7bb56afa9a220", "score": "0.50569904", "text": "def add_host_server(self, hackathon, args):\n\n host_server = DockerHostServer(vm_name=args.vm_name,\n public_dns=args.public_dns,\n public_ip=args.public_ip,\n public_docker_api_port=args.public_docker_api_port,\n private_ip=args.private_ip,\n private_docker_api_port=args.private_docker_api_port,\n container_count=0,\n container_max_count=args.container_max_count,\n is_auto=False,\n disabled=args.get(\"disabled\", False),\n hackathon=hackathon)\n\n if self.docker.ping(host_server):\n host_server.state = DockerHostServerStatus.DOCKER_READY\n else:\n host_server.state = DockerHostServerStatus.UNAVAILABLE\n\n host_server.save()\n return host_server.dic()", "title": "" }, { "docid": "e6e83928aabfc4d49def93b99d8d7df3", "score": "0.5041215", "text": "def do_allowrep(self, line):\n self._split_args(line, 0, 0)\n self._command_processor.get_session().get_replication_policy(\n ).set_replication_allowed(True)\n self._print_info_if_verbose('Set replication policy to allow replication')", "title": "" }, { "docid": "793f0a3a401167e412c814a54513dc88", "score": "0.5041042", "text": "def allow_connections_from(self, other: aws.ec2.SecurityGroup) -> None:\n self.swarm.allow_connections_from(other, Ec2Port(\"tcp\", 9080))", "title": "" }, { "docid": "8c609911c21e073fac4bfcec13db3bb9", "score": "0.5038368", "text": "def create(hosts_to_reserve, username=None):\r\n hosts = models.Host.smart_get_bulk(hosts_to_reserve)\r\n if not hosts:\r\n raise Exception(\"At least one host must be specified\")\r\n # check if this user can access specified hosts\r\n user = get_user(username)\r\n models.AclGroup.check_for_acl_violation_hosts(hosts, user.login)\r\n user_acl, created = models.AclGroup.objects.get_or_create(name=user.login)\r\n if created:\r\n user_acl.users = [user]\r\n user_acl.save()\r\n for host in hosts:\r\n # remove host from other acls\r\n user_acl.hosts.clear()\r\n host.aclgroup_set.add(user_acl)\r\n # and add to reservation acl\r\n user_acl.hosts.add(*hosts)\r\n user_acl.on_host_membership_change()", "title": "" }, { "docid": "9e63b86d0d26941df1d007023e78e819", "score": "0.5011117", "text": "def set(ip, hosts):\n hosts_file = Hosts(path=HOSTS_FILE)\n new_entry = HostsEntry(entry_type='ipv4', address=ip, names=hosts)\n hosts_file.add([new_entry])\n hosts_file.write()", "title": "" }, { "docid": "58ee9c7dc14f8e9e1311a6736dac4bcd", "score": "0.5010411", "text": "def host_compliance_count(self, host_compliance_count):\n\n self._host_compliance_count = host_compliance_count", "title": "" }, { "docid": "7f593c8d483bae79e8ec01f4ada86ff8", "score": "0.49946234", "text": "def set(ctx, name):\n config = ctx.obj['config']\n host_names = [x[\"name\"] for x in config.get(\"hosts\")]\n if name not in host_names:\n logger.error(\"%s is not a defined host\", name)\n return False\n config.set(\"active_host\", name)\n logger.info(\"Set active host to: %s\", name)", "title": "" }, { "docid": "430148b98c4cad1c9aa1ca2a1cea161f", "score": "0.49937284", "text": "def __allow_ingress(self):\n try:\n cluster_vpc_id = self.__redshift.describe_clusters(\n ClusterIdentifier=self.dwh_config.get('DWH_CLUSTER_IDENTIFIER'))['Clusters'][0]['VpcId']\n\n vpc = self.__ec2.Vpc(id=cluster_vpc_id)\n default_sg = list(vpc.security_groups.all())[0]\n\n default_sg.authorize_ingress(\n GroupName='default',\n CidrIp='0.0.0.0/0',\n IpProtocol='TCP',\n FromPort=int(self.dwh_config.get('DWH_PORT')),\n ToPort=int(self.dwh_config.get('DWH_PORT'))\n )\n except ClientError as ce:\n if ce.response['Error']['Code'] == 'InvalidPermission.Duplicate':\n print(\"The rule for the TCP ingress for the provided peer already exists.\")\n except Exception as e:\n print(e)\n raise", "title": "" }, { "docid": "38251a76c6311e801455cd5966256ef1", "score": "0.49823552", "text": "def new_host(self):\r\n d = new_host_dialog(self.master)\r\n address, username, password = d.result\r\n self.__host = Host(address, username, password, self.__local.db)\r\n self.__vms = self.__host.vms\r\n self.__vdis = {}\r\n for vm in self.__vms:\r\n self.__vdis[vm.name] = vm.vdis\r\n print(\"VMS: %s\" % self.__vms)\r\n self.populate_page()", "title": "" }, { "docid": "1b60f01d0be5820f382d34927ef3020d", "score": "0.49789384", "text": "def addIgnore(self, hostmask, expiration=0):\r\n assert ircutils.isUserHostmask(hostmask), 'got %s' % hostmask\r\n self.ignores[hostmask] = int(expiration)", "title": "" }, { "docid": "a8dc35ce7cd707616177ae75d286c89f", "score": "0.49600124", "text": "def addHostmask(self, hostmask):\r\n assert ircutils.isUserHostmask(hostmask), 'got %s' % hostmask\r\n if len(unWildcardHostmask(hostmask)) < 3:\r\n raise ValueError('Hostmask must contain at least 3 non-wildcard characters.')\r\n self.hostmasks.add(hostmask)", "title": "" }, { "docid": "4aa77c7d59b3d89cc7a4f6a83711664d", "score": "0.49578714", "text": "def assign_ports(self):\n\n if self.ports_assigned:\n return\n\n from environment import reserve_ports # pylint: disable=g-import-not-at-top\n import utils # pylint: disable=g-import-not-at-top\n\n self.zk_port_base = reserve_ports(3)\n self.hostname = utils.hostname\n self.zk_ports = ':'.join(str(self.zk_port_base + i) for i in range(3))\n self.addr = 'localhost:%d' % (self.zk_port_base + 2)\n self.ports_assigned = True", "title": "" }, { "docid": "549fc6bbe3e6248d2fe5897efdee06f0", "score": "0.4956696", "text": "def change_host(self, host, port):\n pass", "title": "" }, { "docid": "e16cdfcccbd7fbe8ae8390ad273a411a", "score": "0.4954343", "text": "def hosts(self, hosts):\n self.hosts_names = hosts\n return self", "title": "" }, { "docid": "f8b0a3bf77e7f7402e4b46594d7f9508", "score": "0.49465692", "text": "def allow_host_network(self) -> bool:\n return self.__allow_host_network", "title": "" }, { "docid": "604c3609f96ffa1b200f293a797f476a", "score": "0.49264506", "text": "def allow(self, rule):\n self._addRule(True, rule)", "title": "" }, { "docid": "ffda2c4aa5578e7a66acd9218b438f1f", "score": "0.4920946", "text": "def host_ip(self, host_ip):\n\n self._host_ip = host_ip", "title": "" }, { "docid": "dc7e6459aff204efc72ba5072eb1eb9b", "score": "0.49198905", "text": "def instruction_expose(ports):\n # need to use an openstack call to add this to the security group that the\n # instance is being deployed to.\n return True", "title": "" }, { "docid": "9a23aa888824214ebca3c369d2233b3b", "score": "0.48956135", "text": "def test_host_added_to_set(self):\n instance = yield self.create_host()\n found = False\n for x in model.Host.all():\n if x.identifier == 'testhost':\n found = True\n self.assert_(found)", "title": "" }, { "docid": "54cdcb36e9ceaf73f63299bf1bda1b47", "score": "0.48715428", "text": "def allow_host_pid(self) -> bool:\n return self.__allow_host_pid", "title": "" }, { "docid": "501c498a852f87db8675432a1a16c94a", "score": "0.48470172", "text": "def test_associate_public_ip_with_server_instance(self):\n pass", "title": "" }, { "docid": "c49fa6c7fc71c9d12a3260d2e89e41e1", "score": "0.48382053", "text": "def purchase_host_reservation(ClientToken=None, CurrencyCode=None, HostIdSet=None, LimitPrice=None, OfferingId=None):\n pass", "title": "" }, { "docid": "4d0bb9f13aa6276141784ca4dc4c3ccc", "score": "0.48376584", "text": "def allow_access(self, server, share_name, access_type, access_level,\n access_to):\n raise NotImplementedError()", "title": "" }, { "docid": "388f48631ca6c78cd2437691128482ac", "score": "0.48366988", "text": "def __init__(self, privileges=['can add post', 'can delete post', 'can ban user']):\n self.privileges = privileges", "title": "" }, { "docid": "075388c5ff8e60c972280c90fdbf86a1", "score": "0.48216915", "text": "def host_network_firewall(self, host_network_firewall):\n\n self._host_network_firewall = host_network_firewall", "title": "" }, { "docid": "a427afe1918e2a0f793deba6a403de3b", "score": "0.47971442", "text": "def add_host(self, req, host_meta):\n self._enforce(req, 'add_host')\n # if host is update in '_verify_interface_among_hosts', no need add\n # host continue.\n cluster_id = host_meta.get('cluster', None)\n if cluster_id:\n self.get_cluster_meta_or_404(req, cluster_id)\n\n if 'role' in host_meta and host_meta['role']:\n role_id_list = []\n host_roles = []\n if 'cluster' in host_meta:\n params = self._get_query_params(req)\n role_list = registry.get_roles_detail(req.context, **params)\n for role_name in role_list:\n if role_name['cluster_id'] == host_meta['cluster']:\n host_roles = list(host_meta['role'])\n for host_role in host_roles:\n if role_name['name'] == host_role:\n role_id_list.append(role_name['id'])\n continue\n if len(role_id_list) != len(host_roles):\n msg = \"The role of params %s is not exist, \" \\\n \"please use the right name\" % host_roles\n LOG.error(msg)\n raise HTTPBadRequest(explanation=msg,\n request=req,\n content_type=\"text/plain\")\n host_meta['role'] = role_id_list\n else:\n msg = \"cluster params is none\"\n LOG.error(msg)\n raise HTTPBadRequest(explanation=msg,\n request=req,\n content_type=\"text/plain\")\n # if host is found from ssh, don't set pxe interface\n if host_meta.get('os_status', None) == 'init':\n self._set_pxe_interface_for_host(req, host_meta)\n\n result = self._check_add_host_interfaces(req, host_meta)\n if result:\n return result\n\n if 'resource_type' in host_meta:\n if host_meta['resource_type'] not in self.support_resource_type:\n msg = \"resource type is not supported, please use it in %s\" % \\\n self.support_resource_type\n LOG.error(msg)\n raise HTTPBadRequest(explanation=msg,\n request=req,\n content_type=\"text/plain\")\n else:\n host_meta['resource_type'] = 'baremetal'\n\n if 'os_status' in host_meta:\n if host_meta['os_status'] not in ['init', 'installing',\n 'active', 'failed', 'none']:\n msg = \"os_status is not valid.\"\n LOG.error(msg)\n raise HTTPBadRequest(explanation=msg,\n request=req,\n content_type=\"text/plain\")\n\n self._check_dvs_huge(host_meta)\n\n if host_meta.get('config_set_id'):\n self.get_config_set_meta_or_404(req,\n host_meta['config_set_id'])\n if host_meta.get(\"discover_mode\") and \\\n (host_meta[\"discover_mode\"] not in self.support_discover_mode):\n msg = \"discover mode is not supported, please use it in %s\" % \\\n self.support_discover_mode\n LOG.error(msg)\n raise HTTPBadRequest(explanation=msg,\n request=req,\n content_type=\"text/plain\")\n\n host_meta = registry.add_host_metadata(req.context, host_meta)\n\n return {'host_meta': host_meta}", "title": "" }, { "docid": "a9c80d4be665a947058b79aa1ab833a6", "score": "0.47786993", "text": "def test_set_cluster_ha_safe(self):\n assert ll_hosts.activate_host(\n positive=True, host=conf.HOSTS[1], host_resource=conf.VDS_HOSTS[1]\n )\n\n testflow.step(\n \"Check if cluster %s is HA safe\", conf.CLUSTER_NAME[0]\n )\n assert helpers.is_cluster_ha_safe()", "title": "" }, { "docid": "4686741cd26d47a9675f5c240a169b34", "score": "0.47637606", "text": "def open_instance_public_ports(portInfo=None, instanceName=None):\n pass", "title": "" }, { "docid": "3430140bb45a38bdc84d5b06c50c5a98", "score": "0.47607416", "text": "def allow_server_access(self) -> bool:\n return False", "title": "" }, { "docid": "6c4fee98a194855966c09f7b84b43276", "score": "0.47597373", "text": "def __init__(self, privileges=[\"can add post\", \"can delete post\", \"can ban user\"]):\n self.privileges = privileges", "title": "" }, { "docid": "a53deb0cdf5017b0cb8de93afcd30405", "score": "0.47556776", "text": "def init_host(self, host):\n pass", "title": "" }, { "docid": "a53deb0cdf5017b0cb8de93afcd30405", "score": "0.47556776", "text": "def init_host(self, host):\n pass", "title": "" }, { "docid": "856e6aa385f900f351e82bf0e71efdd8", "score": "0.47531465", "text": "def setHostKeyChecking (value):\n old = ansible_C.HOST_KEY_CHECKING\n ansible_C.HOST_KEY_CHECKING = value\n return old", "title": "" }, { "docid": "cf0578a77e2ad02fb022458427b82f7f", "score": "0.4749536", "text": "def x_allow_host(self, hostname: str) -> bool:\n xhost_result = subprocess.run(\n [\"xhost\", \"+local:\" + hostname],\n stdout=subprocess.DEVNULL,\n stderr=subprocess.DEVNULL,\n )\n if xhost_result.returncode != 0:\n LOGGER.warning(xhost_result.stderr)\n return False\n return True", "title": "" }, { "docid": "ffb637c700e22e791e18f24947197fcd", "score": "0.47471064", "text": "def __init__(__self__, *,\n allow_multiple_sites: Optional[bool] = None,\n public_network_access: Optional[str] = None,\n sites: Optional[Sequence[str]] = None):\n if allow_multiple_sites is not None:\n pulumi.set(__self__, \"allow_multiple_sites\", allow_multiple_sites)\n if public_network_access is not None:\n pulumi.set(__self__, \"public_network_access\", public_network_access)\n if sites is not None:\n pulumi.set(__self__, \"sites\", sites)", "title": "" }, { "docid": "6cf790316a4c1678b3491d7205199936", "score": "0.4736972", "text": "async def allow_user(self, guild: discord.Guild, user):\n async with self.edit_config(guild) as config:\n allowed_users = config.get(\"allowed_users\", [])\n # have to do manual replacement in case we get []\n config[\"allowed_users\"] = allowed_users + [user]", "title": "" }, { "docid": "783131215985e38809ec528e61598a3a", "score": "0.47283185", "text": "def grant_access(address, port):\n log('granting access: {}:{}'.format(address, port), level='DEBUG')\n ufw.grant_access(address, port=str(port), proto='tcp',\n index=FIRST)", "title": "" }, { "docid": "2870bce6a9b8ff596c7a5885f2fcade1", "score": "0.47278512", "text": "def add_host(self, name, cls=DAQHost, ip_addr=None, env_vars=None, vol_maps=None,\n port=None, tmpdir=None):\n params = {'ip': ip_addr} if ip_addr else {}\n params['tmpdir'] = os.path.join(tmpdir, 'nodes') if tmpdir else None\n params['env_vars'] = env_vars if env_vars else []\n params['vol_maps'] = vol_maps if vol_maps else []\n host = self.net.addHost(name, cls, **params)\n try:\n switch_link = self._retry_func(\n partial(self.net.addLink, self.pri, host, port1=port, fast=False))\n LOGGER.info('Created host %s with pid %s/%s, intf %s',\n name, host.pid, host.shell.pid, switch_link.intf1)\n host.switch_intf = switch_link.intf1\n self.switch_links[host] = switch_link\n if self.net.built:\n host.configDefault()\n self._switch_attach(self.pri, host.switch_intf)\n except Exception as e:\n host.terminate()\n raise e\n return host", "title": "" }, { "docid": "4f92c8b2bdc8791dab9b6e1e4631cb9f", "score": "0.47255218", "text": "def addHost(self, name, resrc):\n self.hosts[name] = resrc", "title": "" }, { "docid": "cc70adfb4309927a2b95c2937168ff0b", "score": "0.4719206", "text": "def add_host(vm_name, group, inventory, flavor=None, ip=None):\n\n if not group in inventory:\n inventory[group] = {\n 'hosts' : [],\n }\n\n if not vm_name in inventory[group]:\n inventory[group]['hosts'].append(vm_name)\n\n if not vm_name in inventory['_meta']:\n inventory['_meta']['hostvars'][vm_name] = {}\n if flavor:\n inventory['_meta']['hostvars'][vm_name]['flavor_id'] = flavor\n if ip:\n inventory['_meta']['hostvars'][vm_name]['ansible_ssh_host'] = ip", "title": "" }, { "docid": "a3a2bd62e3a644a0d0ce8fc3074df919", "score": "0.47128972", "text": "def set_allow_all(self, value=True):\n self.allow_all = value", "title": "" }, { "docid": "746831c0c469242e2b6834ae61efb413", "score": "0.47123715", "text": "def add_host(self, host, groups=None, vars=None):\n if not groups:\n groups = set(\"all\")\n for group in groups:\n self.add_group(group, children=groups[group].setdefault(\"children\", []))\n if host not in self.inventory[group][\"hosts\"]:\n self.inventory[group][\"hosts\"].append(host)\n if vars:\n self.add_host_vars(host, vars)", "title": "" }, { "docid": "456ebf0aaa834cee03120549f8e231e9", "score": "0.47080493", "text": "def _add_host(host):\n validated_input_host_dict = HostSchema(strict=True).load(host)\n\n input_host = Host.from_json(validated_input_host_dict.data)\n\n if not current_identity.is_trusted_system and current_identity.account_number != input_host.account:\n raise InventoryException(\n title=\"Invalid request\",\n detail=\"The account number associated with the user does not match the account number associated with the \"\n \"host\",\n )\n\n existing_host = find_existing_host(input_host.account, input_host.canonical_facts)\n\n if existing_host:\n return update_existing_host(existing_host, input_host)\n else:\n return create_new_host(input_host)", "title": "" }, { "docid": "351857836f696eee70f9c61931c526ae", "score": "0.46980044", "text": "def set_host(self, hostname, port=None):\n L = logging.getLogger(__name__)\n self._dest = (hostname, port if port else 80)\n self.payload = {}\n L.debug( f'Monitor host set to \"{hostname}\", port={port}.' )", "title": "" }, { "docid": "cbf0f6ac6afe80d40433319242256a4f", "score": "0.46887603", "text": "def add_host(params, host_name, host_visible_name):\n try:\n result = params['zapi'].host.create(\n host=host_name,\n name=host_visible_name,\n description=host_visible_name,\n interfaces=[\n {\n 'type': 1,\n 'main': 1,\n 'useip': 1,\n 'ip': '127.0.0.1',\n 'dns': '',\n 'port': '10050'\n }\n ],\n groups=[\n {\n 'groupid': params['zabbix_group_id']\n }\n ],\n templates=[\n {\n 'templateid': params['envmon_template_id']\n }\n ] if params['envmon'] else []\n )\n return result['hostids'][0]\n except Exception as e:\n if params['debug'] > 1:\n print_debug(u\"add_host: {}\".format(e))\n return None", "title": "" }, { "docid": "34c9442f2591b9718dcc32b1b96c2514", "score": "0.4684", "text": "def __init__(self, hosts):\n if not isinstance(hosts, list):\n hosts = [hosts]\n self.hosts = hosts", "title": "" }, { "docid": "3fedbe46169088a8ae3b2adacd9d58b4", "score": "0.46826023", "text": "def __init__(self, host=\"192.168.144.30\"):\n self.host = host", "title": "" }, { "docid": "3cb6b8ef11549bebcd38991fef3a28d0", "score": "0.46779296", "text": "def available_public_ips(self, available_public_ips):\n self._available_public_ips = available_public_ips", "title": "" }, { "docid": "344d5e6d3e07e1ae4b5e48f78b563560", "score": "0.4675965", "text": "def __set_permissions(self):\n if self.nurse:\n self.overtime.Enable ( )\n self.packet_night_turnuses.Enable ( )\n \n self.overtime.SetValue (self.nurse.has_overtime ( ))\n self.packet_night_turnuses.SetValue (self.nurse.packet_night_turnuses)\n else:\n self.overtime.SetValue (False)\n self.packet_night_turnuses.SetValue (False)\n \n self.overtime.Disable ( )\n self.packet_night_turnuses.Disable ( )", "title": "" }, { "docid": "6bd7582ea2bfad068b680fe9485a9ca1", "score": "0.466902", "text": "def set_group_for_host(self):\r\n\r\n init_counter=0\r\n\r\n end_counter=500\r\n\r\n for item in self.host_to_group_list: # go throug data from csv\r\n\r\n print (item)\r\n\r\n payload ={} # common payload\r\n\r\n group_payload = {} # help payload for nat-settings\r\n\r\n payload ['name'] = item ['name'] # assing name to be able check if object exists\r\n\r\n\r\n\r\n if item ['group'] == None: # if nat-settings is true and should be configured, remove name and create nat-settings payload\r\n\r\n continue\r\n\r\n else:\r\n\r\n group_payload['add'] = item ['group']\r\n\r\n\r\n\r\n if self.connect.check_object('show-host', payload) == True: # check if network object exists\r\n\r\n payload['groups'] = group_payload # add nat_payload to common payload for request\r\n\r\n print(\"Adding host: \" + item['name'] + \" \" + \"into group:\" + item ['group'])\r\n\r\n self.connect.send_cmd('set-host', payload) # modify network settings with NAT config\r\n\r\n init_counter +=1\r\n\r\n print (init_counter)\r\n\r\n if init_counter==end_counter:\r\n\r\n self.connect.publish()\r\n print(\"keepalive:\".format(self.connect.keep_alive()))\r\n\r\n end_counter+=500\r\n\r\n print (\"End counter changed: {}\".format(end_counter))\r\n\r\n\r\n\r\n self.connect.publish()", "title": "" }, { "docid": "63bb07fcbb56339afe7b19df62c9ad28", "score": "0.46635157", "text": "def can(self, can):\n self._can = can", "title": "" }, { "docid": "140200638fc333995948be81e93cfe48", "score": "0.46601337", "text": "def assign_hosting_client(self, client):\n\n # custom messages\n if self.hosting_client.tier == 0:\n print(\"/!\\ Hosting place assigned for the first time to \")\n client.display_client()\n\n elif isinstance(self.hosting_client, Client) and self.hosting_client.tier != 0:\n print(\"KICKED OUT WARNING ! ??\")\n print(\"Previous client hosting \")\n self.hosting_client.display_client()\n print(\"has been replaced by the following client \")\n client.display_client()\n else:\n raise ValueError('self.hosting_client. Wrong type, expected client')\n\n self.hosting_client = client\n return True", "title": "" }, { "docid": "e2fe4182b06a37b9769250a0da7feea1", "score": "0.4659995", "text": "def __init__(self):\n self.privileges = [\n 'can add post',\n 'can delete post',\n 'can ban user',\n ]", "title": "" }, { "docid": "d10e2c2abdb4c3d7f22e6e33a4438022", "score": "0.46589375", "text": "async def can_provision(self, hosts):\n return True", "title": "" }, { "docid": "4828807ebb183aac28e09f712b7464ee", "score": "0.4652449", "text": "def checkHost(host):\n if host not in config.valid_hosts:\n raise Exception(\"Invalid Server\")", "title": "" }, { "docid": "1ca2b6b30997b1763a65720314b0870b", "score": "0.46515453", "text": "def launch_on_demand():\n ec2 = boto3.client('ec2')\n ec2r = boto3.resource('ec2')\n instances = ec2r.create_instances(DryRun=False, ImageId=AMI,\n KeyName=KeyName, MinCount=1, MaxCount=1,\n SecurityGroups=[SecurityGroupId, ],\n InstanceType=\"p2.xlarge\",\n Monitoring={'Enabled': True, },\n IamInstanceProfile=IAM_ROLE)\n for instance in instances:\n instance.wait_until_running()\n instance.reload()\n print(instance.id, instance.instance_type)\n logging.info(\"instance allocated\")\n with open(\"host\", 'w') as out:\n out.write(instance.public_ip_address)\n env.hosts = [instance.public_ip_address, ]\n fh = open(\"connect.sh\", 'w')\n fh.write(\n \"#!/bin/bash\\n\" + 'autossh -M 0 -o \"ServerAliveInterval 30\" -o \"ServerAliveCountMax 3\" -L 8600:localhost:8000 -i ' + env.key_filename + \" \" + env.user + \"@\" +\n env.hosts[0] + \"\\n\")\n fh.close()", "title": "" }, { "docid": "6db56a1cd54f0b471142d5287167bc73", "score": "0.46446654", "text": "def select_host(self, cluster):\n drs_config = cluster.configuration.drsConfig\n drs_enabled = drs_config.enabled\n drs_fully_automated = drs_config.defaultVmBehavior == pyVmomi.vim.cluster.DrsConfigInfo.DrsBehavior.fullyAutomated\n dynamic_placement = drs_enabled and drs_fully_automated\n\n if not self.host and not dynamic_placement:\n self.host = pvc.widget.common.choose_host(\n agent=self.agent,\n dialog=self.dialog,\n folder=cluster\n )\n\n if not self.host:\n self.dialog.msgbox(\n title='Create New Virtual Machine',\n text='No valid host selected'\n )\n return False\n\n return True", "title": "" }, { "docid": "9a132023b3c38af69d3e1e5a946c6427", "score": "0.46443272", "text": "def setDefaultCapability(self, b):\r\n self.defaultAllow = b", "title": "" }, { "docid": "8a4e5e29793fa4a676d4d07f43137c0a", "score": "0.4642613", "text": "def allow_host_ipc(self) -> bool:\n return self.__allow_host_ipc", "title": "" }, { "docid": "e80c54a1dd238a6c0d1bcb4d1a2405ea", "score": "0.4630563", "text": "async def allow(self, ctx: commands.Context, member: discord.Member):\r\n await self._process_allow_deny(ctx, True, member=member)", "title": "" }, { "docid": "6be073a98c074032a19f42ab6af42296", "score": "0.4630514", "text": "def test_servers_puppet():\n rules.assertPermits(servers, puppetmaster, 'puppet')", "title": "" }, { "docid": "3dc92f0d7852de258bf88f0dd6b15527", "score": "0.4628045", "text": "def set_allow_all(self, value=True):\n self._allow_all = value", "title": "" }, { "docid": "aa150f29377a5a3f347d68faf0d3f07f", "score": "0.46208215", "text": "def bound_host(self, host: fdb.Database):\n if not isinstance(host, fdb.Database):\n raise TypeError(\"Error: host must be of type frida.Database\")\n self._bound_host = host", "title": "" }, { "docid": "cda3981175324d607a92a400a8b0396b", "score": "0.46156985", "text": "def set_allowed(type): # real signature unknown; restored from __doc__\n pass", "title": "" }, { "docid": "9bf253fcb1a08524c7d49d77211355a2", "score": "0.46125123", "text": "def test_create_host(self):\n host = yield self.create_host()\n old = yield model.Host(host.identifier)\n self.assertFalse(old.is_new_record())", "title": "" }, { "docid": "09d82a819e0a73301c4c965c5544e4f7", "score": "0.46064016", "text": "def allow_ip(self, username, user_ip):\n\n cmd = \"ssh -o StrictHostKeyChecking=no -i %s %s@%s %s %i %s %s\" % (\n # Router\n self.keyfile,\n self.remote_user,\n self.address,\n self.cmd,\n\n # User\n self.lan_number,\n username.replace(\" \", \"\").replace(\"'\", \"\"),\n user_ip\n ) \n\n args = shlex.split(cmd)\n call(args)", "title": "" }, { "docid": "0dcfe95764d9298d3623eb479ac18753", "score": "0.46062747", "text": "def ethernet_port_create(self, hostid, values):", "title": "" }, { "docid": "15ea8c0bfe5efe3a2ba09b977a753978", "score": "0.46032357", "text": "def kube_host_upgrade_create(self, forhostid, values):", "title": "" }, { "docid": "e028fe9ae6ac0b2bcae1d2a37312ae0f", "score": "0.46009606", "text": "def _SetupHostFirewall(benchmark_spec):\n\n client_vm = benchmark_spec.vms[0]\n server_vm = benchmark_spec.vms[1]\n\n ip_addrs = [client_vm.internal_ip]\n if vm_util.ShouldRunOnExternalIpAddress():\n ip_addrs.append(client_vm.ip_address)\n\n logging.info('setting up host firewall on %s running %s for client at %s',\n server_vm.name, server_vm.image, ip_addrs)\n cmd = 'sudo iptables -A INPUT -p %s -s %s -j ACCEPT'\n for protocol in 'tcp', 'udp':\n for ip_addr in ip_addrs:\n server_vm.RemoteHostCommand(cmd % (protocol, ip_addr))", "title": "" }, { "docid": "23f47d2c260e1dc2ba5f9d34bdf70eff", "score": "0.45977163", "text": "def setUserCap(_beneficiary: address, _cap: uint256):\n\n assert msg.sender == self.owner, \"Access is denied.\"\n self.caps[_beneficiary] = _cap", "title": "" }, { "docid": "a71c878c4f4e5e17f86a8e03a6315cda", "score": "0.45964116", "text": "def __set_permissions (self):\n if self.person:\n self.scheduling_unit_selector.Enable ( )\n self.turnus_checkers.Enable ( )\n else:\n self.scheduling_unit_selector.Disable ( )\n self.turnus_checkers.Disable ( )", "title": "" } ]
429f77e50426e92b6376973ec62b2161
Returns true if both objects are not equal
[ { "docid": "6b6c9d0a192510f895b0a5b6b6744b9a", "score": "0.0", "text": "def __ne__(self, other):\n if not isinstance(other, ConfigurationList):\n return True\n\n return self.to_dict() != other.to_dict()", "title": "" } ]
[ { "docid": "c15c693dce4313646a829d4ac4001f79", "score": "0.84119445", "text": "def __ne__(self, other: object) -> bool:\n return not self == other", "title": "" }, { "docid": "08a3a7ddcafaa068642f3c6cdfde6129", "score": "0.8392633", "text": "def __ne__(self, other: object) -> bool:\n return not self.__eq__(other)", "title": "" }, { "docid": "cb3b6b7cec86e7987aeeba3540d17a77", "score": "0.8354901", "text": "def __ne__(self, other: object) -> bool:\n return not (self == other)", "title": "" }, { "docid": "7ebdbfa6dc13df7361da971afffb5121", "score": "0.8178081", "text": "def __ne__(self, other: 'LockdownObject') -> bool:\n return not self == other", "title": "" }, { "docid": "c8b10ff2d4aaa53b7862334b5be7e1dc", "score": "0.81453633", "text": "def __ne__(self, other) -> bool:\n return not self.__eq__(other)", "title": "" }, { "docid": "cffa8497843224674512b0ce76aad682", "score": "0.81309766", "text": "def __eq__(self,other):\n return not self.__ne__(other)", "title": "" }, { "docid": "ef570130efb7acd84e6c4bb43e4c7c28", "score": "0.8092252", "text": "def __ne__(self, other):\n # type: (object) -> bool\n return not self == other", "title": "" }, { "docid": "ef570130efb7acd84e6c4bb43e4c7c28", "score": "0.8092252", "text": "def __ne__(self, other):\n # type: (object) -> bool\n return not self == other", "title": "" }, { "docid": "e1fdcec5927616ae88526141e1419dec", "score": "0.8085141", "text": "def __ne__(self, other):\r\n return not self == other", "title": "" }, { "docid": "e1fdcec5927616ae88526141e1419dec", "score": "0.8085141", "text": "def __ne__(self, other):\r\n return not self == other", "title": "" }, { "docid": "e1fdcec5927616ae88526141e1419dec", "score": "0.8085141", "text": "def __ne__(self, other):\r\n return not self == other", "title": "" }, { "docid": "e1fdcec5927616ae88526141e1419dec", "score": "0.8085141", "text": "def __ne__(self, other):\r\n return not self == other", "title": "" }, { "docid": "e1fdcec5927616ae88526141e1419dec", "score": "0.8085141", "text": "def __ne__(self, other):\r\n return not self == other", "title": "" }, { "docid": "e1fdcec5927616ae88526141e1419dec", "score": "0.8085141", "text": "def __ne__(self, other):\r\n return not self == other", "title": "" }, { "docid": "e1fdcec5927616ae88526141e1419dec", "score": "0.8085141", "text": "def __ne__(self, other):\r\n return not self == other", "title": "" }, { "docid": "e1fdcec5927616ae88526141e1419dec", "score": "0.8085141", "text": "def __ne__(self, other):\r\n return not self == other", "title": "" }, { "docid": "e7ee3d5ae253b661fc44a7709b3da2af", "score": "0.8069698", "text": "def __ne__(self, other): \r\n return not self == other", "title": "" }, { "docid": "90ef9145d8d6b4537dbd1578540c4b39", "score": "0.8066694", "text": "def __ne__(self,other):\n return not self==other", "title": "" }, { "docid": "b6f615962b5b85358d73f3cc90f84266", "score": "0.8065579", "text": "def __ne__(self, other: 'Instance') -> bool:\n return not self == other", "title": "" }, { "docid": "19611e0f9fade2cddbf9d8bb12dd3022", "score": "0.8061443", "text": "def __ne__(self, other):\r\n return not self==other", "title": "" }, { "docid": "dbc6746df09736c77995c080d8fc4fc4", "score": "0.80593616", "text": "def __ne__(self, other: Any) -> bool:\n return not self == other", "title": "" }, { "docid": "30e027ef6788367a3ed78c7909bbf102", "score": "0.8043786", "text": "def __ne__(self, other):\n return not self == other", "title": "" }, { "docid": "30e027ef6788367a3ed78c7909bbf102", "score": "0.8043786", "text": "def __ne__(self, other):\n return not self == other", "title": "" }, { "docid": "30e027ef6788367a3ed78c7909bbf102", "score": "0.8043786", "text": "def __ne__(self, other):\n return not self == other", "title": "" }, { "docid": "30e027ef6788367a3ed78c7909bbf102", "score": "0.8043786", "text": "def __ne__(self, other):\n return not self == other", "title": "" }, { "docid": "30e027ef6788367a3ed78c7909bbf102", "score": "0.8043786", "text": "def __ne__(self, other):\n return not self == other", "title": "" }, { "docid": "30e027ef6788367a3ed78c7909bbf102", "score": "0.8043786", "text": "def __ne__(self, other):\n return not self == other", "title": "" }, { "docid": "30e027ef6788367a3ed78c7909bbf102", "score": "0.8043786", "text": "def __ne__(self, other):\n return not self == other", "title": "" }, { "docid": "30e027ef6788367a3ed78c7909bbf102", "score": "0.8043786", "text": "def __ne__(self, other):\n return not self == other", "title": "" }, { "docid": "30e027ef6788367a3ed78c7909bbf102", "score": "0.8043786", "text": "def __ne__(self, other):\n return not self == other", "title": "" }, { "docid": "30e027ef6788367a3ed78c7909bbf102", "score": "0.8043786", "text": "def __ne__(self, other):\n return not self == other", "title": "" }, { "docid": "30e027ef6788367a3ed78c7909bbf102", "score": "0.8043786", "text": "def __ne__(self, other):\n return not self == other", "title": "" }, { "docid": "30e027ef6788367a3ed78c7909bbf102", "score": "0.8043786", "text": "def __ne__(self, other):\n return not self == other", "title": "" }, { "docid": "30e027ef6788367a3ed78c7909bbf102", "score": "0.8043786", "text": "def __ne__(self, other):\n return not self == other", "title": "" }, { "docid": "30e027ef6788367a3ed78c7909bbf102", "score": "0.8043786", "text": "def __ne__(self, other):\n return not self == other", "title": "" }, { "docid": "30e027ef6788367a3ed78c7909bbf102", "score": "0.8043786", "text": "def __ne__(self, other):\n return not self == other", "title": "" }, { "docid": "30e027ef6788367a3ed78c7909bbf102", "score": "0.8043786", "text": "def __ne__(self, other):\n return not self == other", "title": "" }, { "docid": "30e027ef6788367a3ed78c7909bbf102", "score": "0.8043786", "text": "def __ne__(self, other):\n return not self == other", "title": "" }, { "docid": "30e027ef6788367a3ed78c7909bbf102", "score": "0.8043786", "text": "def __ne__(self, other):\n return not self == other", "title": "" }, { "docid": "30e027ef6788367a3ed78c7909bbf102", "score": "0.8043786", "text": "def __ne__(self, other):\n return not self == other", "title": "" }, { "docid": "30e027ef6788367a3ed78c7909bbf102", "score": "0.8043786", "text": "def __ne__(self, other):\n return not self == other", "title": "" }, { "docid": "30e027ef6788367a3ed78c7909bbf102", "score": "0.8043786", "text": "def __ne__(self, other):\n return not self == other", "title": "" }, { "docid": "30e027ef6788367a3ed78c7909bbf102", "score": "0.8043786", "text": "def __ne__(self, other):\n return not self == other", "title": "" }, { "docid": "30e027ef6788367a3ed78c7909bbf102", "score": "0.8043786", "text": "def __ne__(self, other):\n return not self == other", "title": "" }, { "docid": "30e027ef6788367a3ed78c7909bbf102", "score": "0.8043786", "text": "def __ne__(self, other):\n return not self == other", "title": "" }, { "docid": "30e027ef6788367a3ed78c7909bbf102", "score": "0.8043786", "text": "def __ne__(self, other):\n return not self == other", "title": "" }, { "docid": "30e027ef6788367a3ed78c7909bbf102", "score": "0.8043786", "text": "def __ne__(self, other):\n return not self == other", "title": "" }, { "docid": "30e027ef6788367a3ed78c7909bbf102", "score": "0.8043786", "text": "def __ne__(self, other):\n return not self == other", "title": "" }, { "docid": "30e027ef6788367a3ed78c7909bbf102", "score": "0.8043786", "text": "def __ne__(self, other):\n return not self == other", "title": "" }, { "docid": "30e027ef6788367a3ed78c7909bbf102", "score": "0.8043786", "text": "def __ne__(self, other):\n return not self == other", "title": "" }, { "docid": "30e027ef6788367a3ed78c7909bbf102", "score": "0.8043786", "text": "def __ne__(self, other):\n return not self == other", "title": "" }, { "docid": "30e027ef6788367a3ed78c7909bbf102", "score": "0.8043786", "text": "def __ne__(self, other):\n return not self == other", "title": "" }, { "docid": "30e027ef6788367a3ed78c7909bbf102", "score": "0.8043786", "text": "def __ne__(self, other):\n return not self == other", "title": "" }, { "docid": "30e027ef6788367a3ed78c7909bbf102", "score": "0.8043786", "text": "def __ne__(self, other):\n return not self == other", "title": "" }, { "docid": "30e027ef6788367a3ed78c7909bbf102", "score": "0.8043786", "text": "def __ne__(self, other):\n return not self == other", "title": "" }, { "docid": "30e027ef6788367a3ed78c7909bbf102", "score": "0.8043786", "text": "def __ne__(self, other):\n return not self == other", "title": "" }, { "docid": "30e027ef6788367a3ed78c7909bbf102", "score": "0.8043786", "text": "def __ne__(self, other):\n return not self == other", "title": "" }, { "docid": "30e027ef6788367a3ed78c7909bbf102", "score": "0.8043786", "text": "def __ne__(self, other):\n return not self == other", "title": "" }, { "docid": "30e027ef6788367a3ed78c7909bbf102", "score": "0.8043786", "text": "def __ne__(self, other):\n return not self == other", "title": "" }, { "docid": "30e027ef6788367a3ed78c7909bbf102", "score": "0.8043786", "text": "def __ne__(self, other):\n return not self == other", "title": "" }, { "docid": "30e027ef6788367a3ed78c7909bbf102", "score": "0.8043786", "text": "def __ne__(self, other):\n return not self == other", "title": "" }, { "docid": "30e027ef6788367a3ed78c7909bbf102", "score": "0.8043786", "text": "def __ne__(self, other):\n return not self == other", "title": "" }, { "docid": "30e027ef6788367a3ed78c7909bbf102", "score": "0.8043786", "text": "def __ne__(self, other):\n return not self == other", "title": "" }, { "docid": "30e027ef6788367a3ed78c7909bbf102", "score": "0.8043786", "text": "def __ne__(self, other):\n return not self == other", "title": "" }, { "docid": "30e027ef6788367a3ed78c7909bbf102", "score": "0.8043786", "text": "def __ne__(self, other):\n return not self == other", "title": "" }, { "docid": "30e027ef6788367a3ed78c7909bbf102", "score": "0.8043786", "text": "def __ne__(self, other):\n return not self == other", "title": "" }, { "docid": "30e027ef6788367a3ed78c7909bbf102", "score": "0.8043786", "text": "def __ne__(self, other):\n return not self == other", "title": "" }, { "docid": "30e027ef6788367a3ed78c7909bbf102", "score": "0.8043786", "text": "def __ne__(self, other):\n return not self == other", "title": "" }, { "docid": "30e027ef6788367a3ed78c7909bbf102", "score": "0.8043786", "text": "def __ne__(self, other):\n return not self == other", "title": "" }, { "docid": "30e027ef6788367a3ed78c7909bbf102", "score": "0.8043786", "text": "def __ne__(self, other):\n return not self == other", "title": "" }, { "docid": "30e027ef6788367a3ed78c7909bbf102", "score": "0.8043786", "text": "def __ne__(self, other):\n return not self == other", "title": "" }, { "docid": "30e027ef6788367a3ed78c7909bbf102", "score": "0.8043786", "text": "def __ne__(self, other):\n return not self == other", "title": "" }, { "docid": "30e027ef6788367a3ed78c7909bbf102", "score": "0.8043786", "text": "def __ne__(self, other):\n return not self == other", "title": "" }, { "docid": "30e027ef6788367a3ed78c7909bbf102", "score": "0.8043786", "text": "def __ne__(self, other):\n return not self == other", "title": "" }, { "docid": "30e027ef6788367a3ed78c7909bbf102", "score": "0.8043786", "text": "def __ne__(self, other):\n return not self == other", "title": "" }, { "docid": "30e027ef6788367a3ed78c7909bbf102", "score": "0.8043786", "text": "def __ne__(self, other):\n return not self == other", "title": "" }, { "docid": "30e027ef6788367a3ed78c7909bbf102", "score": "0.8043786", "text": "def __ne__(self, other):\n return not self == other", "title": "" }, { "docid": "30e027ef6788367a3ed78c7909bbf102", "score": "0.8043786", "text": "def __ne__(self, other):\n return not self == other", "title": "" }, { "docid": "30e027ef6788367a3ed78c7909bbf102", "score": "0.8043786", "text": "def __ne__(self, other):\n return not self == other", "title": "" }, { "docid": "30e027ef6788367a3ed78c7909bbf102", "score": "0.8043786", "text": "def __ne__(self, other):\n return not self == other", "title": "" }, { "docid": "30e027ef6788367a3ed78c7909bbf102", "score": "0.8043786", "text": "def __ne__(self, other):\n return not self == other", "title": "" }, { "docid": "30e027ef6788367a3ed78c7909bbf102", "score": "0.8043786", "text": "def __ne__(self, other):\n return not self == other", "title": "" }, { "docid": "30e027ef6788367a3ed78c7909bbf102", "score": "0.8043786", "text": "def __ne__(self, other):\n return not self == other", "title": "" }, { "docid": "30e027ef6788367a3ed78c7909bbf102", "score": "0.8043786", "text": "def __ne__(self, other):\n return not self == other", "title": "" }, { "docid": "30e027ef6788367a3ed78c7909bbf102", "score": "0.8043786", "text": "def __ne__(self, other):\n return not self == other", "title": "" }, { "docid": "30e027ef6788367a3ed78c7909bbf102", "score": "0.8043786", "text": "def __ne__(self, other):\n return not self == other", "title": "" }, { "docid": "30e027ef6788367a3ed78c7909bbf102", "score": "0.8043786", "text": "def __ne__(self, other):\n return not self == other", "title": "" }, { "docid": "30e027ef6788367a3ed78c7909bbf102", "score": "0.8043786", "text": "def __ne__(self, other):\n return not self == other", "title": "" }, { "docid": "30e027ef6788367a3ed78c7909bbf102", "score": "0.8043786", "text": "def __ne__(self, other):\n return not self == other", "title": "" }, { "docid": "30e027ef6788367a3ed78c7909bbf102", "score": "0.8043786", "text": "def __ne__(self, other):\n return not self == other", "title": "" }, { "docid": "30e027ef6788367a3ed78c7909bbf102", "score": "0.8043786", "text": "def __ne__(self, other):\n return not self == other", "title": "" }, { "docid": "30e027ef6788367a3ed78c7909bbf102", "score": "0.8043786", "text": "def __ne__(self, other):\n return not self == other", "title": "" }, { "docid": "30e027ef6788367a3ed78c7909bbf102", "score": "0.8043786", "text": "def __ne__(self, other):\n return not self == other", "title": "" }, { "docid": "30e027ef6788367a3ed78c7909bbf102", "score": "0.8043786", "text": "def __ne__(self, other):\n return not self == other", "title": "" }, { "docid": "30e027ef6788367a3ed78c7909bbf102", "score": "0.8043786", "text": "def __ne__(self, other):\n return not self == other", "title": "" }, { "docid": "30e027ef6788367a3ed78c7909bbf102", "score": "0.8043786", "text": "def __ne__(self, other):\n return not self == other", "title": "" }, { "docid": "30e027ef6788367a3ed78c7909bbf102", "score": "0.8043786", "text": "def __ne__(self, other):\n return not self == other", "title": "" }, { "docid": "30e027ef6788367a3ed78c7909bbf102", "score": "0.8043786", "text": "def __ne__(self, other):\n return not self == other", "title": "" }, { "docid": "30e027ef6788367a3ed78c7909bbf102", "score": "0.8043786", "text": "def __ne__(self, other):\n return not self == other", "title": "" }, { "docid": "30e027ef6788367a3ed78c7909bbf102", "score": "0.8043786", "text": "def __ne__(self, other):\n return not self == other", "title": "" }, { "docid": "30e027ef6788367a3ed78c7909bbf102", "score": "0.8043786", "text": "def __ne__(self, other):\n return not self == other", "title": "" } ]
d459f0d3f8b05ab0775fd17971c9d7c6
Test creating a policy with the default values
[ { "docid": "b7da8e756309a176023a1cd47ca2de83", "score": "0.71639955", "text": "def test_defaults(self):\n p = RequestPolicy.from_dict({})\n self.assertTrue(p.validate_signatures)", "title": "" } ]
[ { "docid": "16990dc1123a821d58bc52b849f9d3dd", "score": "0.71139294", "text": "def test_create_alert_policy(self):\n pass", "title": "" }, { "docid": "53e5896b50d3184c74a003708c5aafec", "score": "0.68160045", "text": "def test_execute_policy(self):\n pass", "title": "" }, { "docid": "ff5d838cd5b24a30dbb88ba7bf3c3730", "score": "0.68063253", "text": "def test_user_with_empty_policy_is_created_successfully(self):\n assert self.create_user()", "title": "" }, { "docid": "d4ed1cead924264d382c81d9cacf07b5", "score": "0.6653158", "text": "def __init__(self, policy):\n self.policy = policy", "title": "" }, { "docid": "6eabc527dfa9e84d2d0561da22bef921", "score": "0.6592734", "text": "def create_policy():\n builds = get_builds()\n avail_builds = sorted(unique_list([x[\"build\"] for x in builds]))\n avail_plats = sorted(unique_list([y[\"platform\"] for y in builds]))\n all_platforms = []\n for platform in avail_plats:\n plat = {}\n plat[\"Platform\"] = platform\n all_platforms.append(plat)\n all_builds = []\n for build in avail_builds:\n bld = {}\n bld[\"Build\"] = build\n all_builds.append(bld)\n name_to_use = input(\"Name to use for the new policy? \")\n policy_desc = input(\"Description to use for this policy? \")\n build_id = get_build_response(all_builds, avail_builds)\n plat_id = get_platform_response(all_platforms, avail_plats)\n creation = falcon.create_policies_v2(platform_name=plat_id,\n description=policy_desc,\n name=name_to_use,\n build=build_id\n )\n if creation[\"status_code\"] != 201:\n raise SystemExit(generate_api_error_list(creation[\"body\"][\"errors\"]))", "title": "" }, { "docid": "91dc0bc7cea40524090b8cddc5b7164e", "score": "0.65758103", "text": "def init_default_policies_test(request):\n def fin():\n \"\"\"\n 1) Remove PowerSaving and EvenDistribution scheduler policies\n \"\"\"\n results = list()\n for policy_name in (\n sla_conf.POLICY_CUSTOM_PS, sla_conf.POLICY_CUSTOM_ED\n ):\n results.append(\n ll_sch_policies.remove_scheduling_policy(\n policy_name=policy_name\n )\n )\n assert all(results)\n request.addfinalizer(fin)\n\n for policy_name, unit_name in zip(\n (sla_conf.POLICY_CUSTOM_PS, sla_conf.POLICY_CUSTOM_ED),\n (sla_conf.PS_OPTIMAL_FOR_CPU_UNIT, sla_conf.ED_OPTIMAL_FOR_CPU_UNIT)\n ):\n sch_helpers.add_scheduler_policy(\n policy_name=policy_name,\n policy_units=sla_conf.TEST_SCHEDULER_POLICIES_UNITS[policy_name],\n additional_params={\n sla_conf.PREFERRED_HOSTS: {sla_conf.WEIGHT_FACTOR: 99},\n unit_name: {sla_conf.WEIGHT_FACTOR: 10}\n }\n )\n assert ll_clusters.updateCluster(\n positive=True,\n cluster=sla_conf.CLUSTER_NAME[0],\n mem_ovrcmt_prc=100\n )", "title": "" }, { "docid": "5380118329383f0b397cf83fb2780e40", "score": "0.6493501", "text": "def MakeEmptyIPolicy():\n return {}", "title": "" }, { "docid": "8680bf42acb624a6932b1a8e47c272e8", "score": "0.6479176", "text": "def test_no_default_policy(self) -> None:\n room_id = self.helper.create_room_as(self.user_id, tok=self.token)\n\n self._test_retention(room_id)", "title": "" }, { "docid": "775819c9749bfbc03cf42497934e9146", "score": "0.6370387", "text": "def create_policy(self, **kwargs):\n post_body = json.dumps({'policy': kwargs})\n resp, body = self.post('policies', post_body)\n self.expected_success(201, resp.status)\n body = json.loads(body)\n return rest_client.ResponseBody(resp, body)", "title": "" }, { "docid": "a7ce05b03a7fd0b108433fa787280d8f", "score": "0.6355079", "text": "def test_fixed_retry_policy_default_parameters(fixed_retry_policy):\n assert isinstance(\n fixed_retry_policy, RequestsStampede.policy.retry.AbstractRetryPolicy\n )\n\n assert isinstance(fixed_retry_policy.attempts, int)\n assert fixed_retry_policy.attempts == 5", "title": "" }, { "docid": "ed207c65d7e9deacb3b66d279aa13b97", "score": "0.62932855", "text": "def test_qos():\n #*** Instantiate policy, specifying\n #*** a particular main_policy file to use that has no custom classifiers:\n policy = policy_module.Policy(config,\n pol_dir_default=\"config/tests/regression\",\n pol_dir_user=\"config/tests/foo\",\n pol_filename=\"main_policy_regression_static.yaml\")\n assert policy.qos('default_priority') == 0\n assert policy.qos('constrained_bw') == 1\n assert policy.qos('high_priority') == 2\n assert policy.qos('low_priority') == 3\n assert policy.qos('foo') == 0", "title": "" }, { "docid": "d5e9ff9faa04e3bcbb2e55d641aad403", "score": "0.6290386", "text": "def DefaultPolicyLevel(self) -> _n_3_t_0:", "title": "" }, { "docid": "0a571dcd2622dd4aa1d3226b00be474d", "score": "0.6253099", "text": "def create_empty_policy(name, print_response=False):\n\n json_request = {\n \"policy_name\": name,\n }\n response = admin_api.create_policy_v2(json_request)\n if print_response:\n pretty = json.dumps(response, indent=4, sort_keys=True, default=str)\n print(pretty)\n return response.get(\"policy_key\")", "title": "" }, { "docid": "836742ae533daa97b9fb4e5670cb90d0", "score": "0.6203605", "text": "def test_put_scaling_policy(self):\n pass", "title": "" }, { "docid": "6dbd981bef6af891432947eb24ad41e8", "score": "0.61988413", "text": "def defaultPolicy(self, rs, v):\n vs = copy.deepcopy(v)\n act = vs.getLegalPacmanActions()\n if len(act) != 0:\n for i in range(0, 5):\n # act = vs.getLegalPacmanActions()\n ns = vs.generatePacmanSuccessor(act[random.randint(0, len(act) - 1)])\n if ns == None:\n #print None\n break\n elif ns.isLose() or ns.isWin():\n vs = ns\n #print \"in def policy win lose\"\n break\n else:\n vs = ns\n act = vs.getLegalPacmanActions()\n score = normalizedScoreEvaluation(rs, vs)\n return score", "title": "" }, { "docid": "16e71e159f3af9c25b3fd3f6268858f4", "score": "0.6184469", "text": "def governance_policy_create(cls, val):\n return cls('governance_policy_create', val)", "title": "" }, { "docid": "ba666fe5bfd68b89be51933789265660", "score": "0.61818826", "text": "def initialize_or_load(self, policy):\n pass", "title": "" }, { "docid": "cf7d230dbbd5972ac7e58429913c9154", "score": "0.6164644", "text": "def test_policies(self):\n box_env = TfEnv(DummyBoxEnv())\n discrete_env = TfEnv(DummyDiscreteEnv())\n categorical_gru_policy = CategoricalGRUPolicy(\n env_spec=discrete_env, hidden_dim=1)\n categorical_lstm_policy = CategoricalLSTMPolicy(\n env_spec=discrete_env, hidden_dim=1)\n categorical_mlp_policy = CategoricalMLPPolicy(\n env_spec=discrete_env, hidden_sizes=(1, ))\n continuous_mlp_policy = ContinuousMLPPolicy(\n env_spec=box_env, hidden_sizes=(1, ))\n deterministic_mlp_policy = DeterministicMLPPolicy(\n env_spec=box_env, hidden_sizes=(1, ))\n gaussian_gru_policy = GaussianGRUPolicy(env_spec=box_env, hidden_dim=1)\n gaussian_lstm_policy = GaussianLSTMPolicy(\n env_spec=box_env, hidden_dim=1)\n gaussian_mlp_policy = GaussianMLPPolicy(\n env_spec=box_env, hidden_sizes=(1, ))", "title": "" }, { "docid": "26b5a8b892a77063e22d26df31388532", "score": "0.6153935", "text": "def test_conditional_retry_policy_default_parameters():\n try:\n RequestsStampede.policy.retry.ConditionalRetryPolicy()\n except NotImplementedError as e:\n assert isinstance(e, NotImplementedError)\n else:\n assert False", "title": "" }, { "docid": "996f83bc08f0337d869e3c13d93d8e12", "score": "0.604737", "text": "def create_policy(self, **attrs):\n return self._create(_policy.Policy, **attrs)", "title": "" }, { "docid": "354f204cbb832881eb4d6bb37b78698b", "score": "0.60328877", "text": "def create_policy(self, Content: str, Description: str, Name: str, Type: str) -> Dict:\n pass", "title": "" }, { "docid": "47ff0ea66a9e602a780a69f3c8ffe340", "score": "0.6023121", "text": "def create_policy(self, **attrs):\n return self._create(policy.Policy, **attrs)", "title": "" }, { "docid": "1eebe96fdc2e6890f0d83ded0c5102ce", "score": "0.6018984", "text": "def __init__(self,\n *,\n policies: List['Policy'] = None) -> None:\n self.policies = policies", "title": "" }, { "docid": "e094daa648eff054ed393a1ca542f04a", "score": "0.59876645", "text": "def test_init(self):\n policy.AttributePolicy(contents.ProtocolVersion.create(1, 0))", "title": "" }, { "docid": "c620079a0b279e8becac2553c0d49001", "score": "0.59560364", "text": "def test_init_use_default_reparams(model, proposal, value, expected):\n proposal.use_default_reparameterisations = False\n FlowProposal.__init__(\n proposal, model, poolsize=10, use_default_reparameterisations=value\n )\n assert proposal.use_default_reparameterisations is expected", "title": "" }, { "docid": "8b3afd024a93fc199973ebba9d599b9a", "score": "0.592193", "text": "def test_get_alert_policy(self):\n pass", "title": "" }, { "docid": "1aa0087a542a12f05f25f2330204200e", "score": "0.59127384", "text": "def test_policy_definitions(self):\n definitions = get_object_column_definitions(models.Policy)\n display_names = set([val[\"display_name\"] for val in definitions.values()])\n expected_names = set([\n \"Title\",\n \"Description\",\n \"Notes\",\n \"Owner\",\n \"Primary Contact\",\n \"Secondary Contact\",\n \"Policy URL\",\n \"Reference URL\",\n \"Kind/Type\",\n \"Code\",\n \"Effective Date\",\n \"Stop Date\",\n \"State\",\n ])\n self.assertEquals(expected_names, display_names)\n mandatory = {val[\"display_name\"]: val[\"mandatory\"]\n for val in definitions.values()}\n self.assertTrue(mandatory[\"Title\"])", "title": "" }, { "docid": "13957323070877ff3159366da8b3e2db", "score": "0.589724", "text": "def test_update_alert_policy(self):\n pass", "title": "" }, { "docid": "30b0ef983854f6c0e214e2de2afeee0d", "score": "0.5891595", "text": "def __init__(__self__, *,\n policy: Optional[pulumi.Input['NodePlacementPolicyType']] = None):\n if policy is not None:\n pulumi.set(__self__, \"policy\", policy)", "title": "" }, { "docid": "6ef7002b700923212e11728221c61900", "score": "0.5881051", "text": "def test_validate_policy_create_with_extra_parameters_succeeds(self):\n request_to_validate = {'blob': 'some blob information',\n 'type': 'application/json',\n 'extra': 'some extra stuff'}\n self.create_policy_validator.validate(request_to_validate)", "title": "" }, { "docid": "7a559c63b6889ceaf923645e11ce47c0", "score": "0.5853033", "text": "def get_empty_policy():\n return copy.deepcopy(EMPTY_POLICY)", "title": "" }, { "docid": "f20ca787b51692772c106c85ccedc93a", "score": "0.57914484", "text": "def __init__(__self__, *,\n display_name: pulumi.Input[str],\n mode: pulumi.Input[str],\n policy_type: pulumi.Input[str],\n description: Optional[pulumi.Input[str]] = None,\n management_group_id: Optional[pulumi.Input[str]] = None,\n metadata: Optional[pulumi.Input[str]] = None,\n name: Optional[pulumi.Input[str]] = None,\n parameters: Optional[pulumi.Input[str]] = None,\n policy_rule: Optional[pulumi.Input[str]] = None):\n pulumi.set(__self__, \"display_name\", display_name)\n pulumi.set(__self__, \"mode\", mode)\n pulumi.set(__self__, \"policy_type\", policy_type)\n if description is not None:\n pulumi.set(__self__, \"description\", description)\n if management_group_id is not None:\n pulumi.set(__self__, \"management_group_id\", management_group_id)\n if metadata is not None:\n pulumi.set(__self__, \"metadata\", metadata)\n if name is not None:\n pulumi.set(__self__, \"name\", name)\n if parameters is not None:\n pulumi.set(__self__, \"parameters\", parameters)\n if policy_rule is not None:\n pulumi.set(__self__, \"policy_rule\", policy_rule)", "title": "" }, { "docid": "783901ecef5f0c9c4a14a983b1871cab", "score": "0.5786493", "text": "def test_check_policy():\n #*** Instantiate tc, flows and identities classes, specifying\n #*** a particular main_policy file to use:\n policy = policy_module.Policy(config,\n pol_dir_default=\"config/tests/regression\",\n pol_dir_user=\"config/tests/foo\",\n pol_filename=\"main_policy_regression_static.yaml\")\n flow = flows_module.Flow(config)\n ident = identities.Identities(config, policy)\n\n #*** Note: cannot query a classification until a packet has been\n #*** ingested - will throw error\n\n #*** Ingest a packet:\n #*** Test Flow 1 Packet 1 (Client TCP SYN):\n # 10.1.0.1 10.1.0.2 TCP 74 43297 > http [SYN]\n flow.ingest_packet(DPID1, INPORT1, pkts.RAW[0], datetime.datetime.now())\n #*** Check policy:\n policy.check_policy(flow, ident)\n #*** Should not match any rules in that policy:\n logger.debug(\"flow.classification.classified=%s\", flow.classification.classified)\n assert flow.classification.classified == 1\n assert flow.classification.classification_tag == \"\"\n assert flow.classification.actions == {}\n\n #*** Re-instantiate policy with different policy that should classify:\n policy = policy_module.Policy(config,\n pol_dir_default=\"config/tests/regression\",\n pol_dir_user=\"config/tests/foo\",\n pol_filename=\"main_policy_regression_static_3.yaml\")\n\n #*** Re-ingest packet:\n #*** Test Flow 1 Packet 1 (Client TCP SYN):\n # 10.1.0.1 10.1.0.2 TCP 74 43297 > http [SYN]\n flow.ingest_packet(DPID1, INPORT1, pkts.RAW[0], datetime.datetime.now())\n #*** Check policy:\n policy.check_policy(flow, ident)\n #*** Should match policy:\n assert flow.classification.classified == 1\n assert flow.classification.classification_tag == \"Constrained Bandwidth Traffic\"\n logger.debug(\"flow.classification.actions=%s\", flow.classification.actions)\n assert flow.classification.actions == {'set_desc': 'Constrained Bandwidth Traffic',\n 'qos_treatment': 'constrained_bw'}", "title": "" }, { "docid": "775046c70c3cd820280a1d19677955db", "score": "0.5770167", "text": "def test_defaults(self):\n\n prod = Product(\"Test prod\")\n self.assertEqual(prod.weight, 20)\n self.assertEqual(prod.flammability, .5)\n self.assertEqual(prod.name, \"Test prod\")", "title": "" }, { "docid": "775046c70c3cd820280a1d19677955db", "score": "0.5770167", "text": "def test_defaults(self):\n\n prod = Product(\"Test prod\")\n self.assertEqual(prod.weight, 20)\n self.assertEqual(prod.flammability, .5)\n self.assertEqual(prod.name, \"Test prod\")", "title": "" }, { "docid": "a5d34b656d0f939f3a54e38b59642f3b", "score": "0.5767771", "text": "def test_validate_policy_succeeds(self):\n request_to_validate = {'blob': 'some blob information',\n 'type': 'application/json'}\n self.create_policy_validator.validate(request_to_validate)", "title": "" }, { "docid": "9b9ea41896771e918c793ecc731371be", "score": "0.57541466", "text": "def add(self, policy_name, policy_id, policy_arn, default_version_id):\n temp_dict = {\n 'policy_id': policy_id,\n 'policy_arn': policy_arn,\n 'default_version_id': default_version_id\n }\n self.policies[policy_name] = temp_dict", "title": "" }, { "docid": "e7b3bf47f3cdfef69c00ab01c83df26e", "score": "0.5752198", "text": "def __init__(__self__, *,\n policy_name: pulumi.Input[str],\n policy_type: pulumi.Input[str],\n resource_id: Optional[pulumi.Input[str]] = None,\n scalable_dimension: Optional[pulumi.Input[str]] = None,\n scaling_target_id: Optional[pulumi.Input[str]] = None,\n service_namespace: Optional[pulumi.Input[str]] = None,\n step_scaling_policy_configuration: Optional[pulumi.Input['ScalingPolicyStepScalingPolicyConfigurationArgs']] = None,\n target_tracking_scaling_policy_configuration: Optional[pulumi.Input['ScalingPolicyTargetTrackingScalingPolicyConfigurationArgs']] = None):\n pulumi.set(__self__, \"policy_name\", policy_name)\n pulumi.set(__self__, \"policy_type\", policy_type)\n if resource_id is not None:\n pulumi.set(__self__, \"resource_id\", resource_id)\n if scalable_dimension is not None:\n pulumi.set(__self__, \"scalable_dimension\", scalable_dimension)\n if scaling_target_id is not None:\n pulumi.set(__self__, \"scaling_target_id\", scaling_target_id)\n if service_namespace is not None:\n pulumi.set(__self__, \"service_namespace\", service_namespace)\n if step_scaling_policy_configuration is not None:\n pulumi.set(__self__, \"step_scaling_policy_configuration\", step_scaling_policy_configuration)\n if target_tracking_scaling_policy_configuration is not None:\n pulumi.set(__self__, \"target_tracking_scaling_policy_configuration\", target_tracking_scaling_policy_configuration)", "title": "" }, { "docid": "4ceb2c3493768fa71596c93bc0f33fcf", "score": "0.5746212", "text": "def __init__(__self__, *,\n defense_scene: Optional[pulumi.Input[str]] = None,\n policy_name: Optional[pulumi.Input[str]] = None,\n policy_type: Optional[pulumi.Input[str]] = None,\n status: Optional[pulumi.Input[str]] = None):\n if defense_scene is not None:\n pulumi.set(__self__, \"defense_scene\", defense_scene)\n if policy_name is not None:\n pulumi.set(__self__, \"policy_name\", policy_name)\n if policy_type is not None:\n pulumi.set(__self__, \"policy_type\", policy_type)\n if status is not None:\n pulumi.set(__self__, \"status\", status)", "title": "" }, { "docid": "1cbaf63373c30c67c7a81139f97542e6", "score": "0.57228535", "text": "def test_custom_classifiers():\n #*** Instantiate policy, specifying\n #*** a particular main_policy file to use that has no custom classifiers:\n policy = policy_module.Policy(config,\n pol_dir_default=\"config/tests/regression\",\n pol_dir_user=\"config/tests/regression\",\n pol_filename=\"main_policy_regression_static.yaml\")\n assert policy.tc_rules.custom_classifiers == []\n\n #*** Instantiate policy, specifying\n #*** a custom statistical main_policy file to use that has a\n #*** custom classifier:\n policy = policy_module.Policy(config,\n pol_dir_default=\"config/tests/regression\",\n pol_dir_user=\"config/tests/foo\",\n pol_filename=\"main_policy_regression_statistical.yaml\")\n assert policy.tc_rules.custom_classifiers == ['statistical_qos_bandwidth_1']", "title": "" }, { "docid": "fc388564f3c5b9d58b8fe4795087a9df", "score": "0.56829774", "text": "def create_traffic_policy(Name=None, Document=None, Comment=None):\n pass", "title": "" }, { "docid": "436df54cd69ea697df8cb9f1ef559439", "score": "0.567155", "text": "def test_get_default_policy_param_name_failing0(param, default_params_Policy):\n match=\"Received unexpected parameter: {0}\".format(param)\n with pytest.raises(ValueError, match=match):\n get_default_policy_param_name(param, default_params_Policy)", "title": "" }, { "docid": "b06a9c046cf13bd3b7c5419662fd46eb", "score": "0.5649757", "text": "def __init__(__self__,\n resource_name: str,\n args: Optional[QosPolicyArgs] = None,\n opts: Optional[pulumi.ResourceOptions] = None):\n ...", "title": "" }, { "docid": "513ba453d42947fa308a528b6dccc043", "score": "0.5644154", "text": "def test_grading_policy(self):\n\n grading_policy = self.presenter.grading_policy()\n self.assertListEqual(grading_policy, self.factory.presented_grading_policy)\n\n percent = self.presenter.get_max_policy_display_percent(grading_policy)\n self.assertEqual(100, percent)\n\n percent = self.presenter.get_max_policy_display_percent([{'weight': 0.0}, {'weight': 1.0}, {'weight': 0.04}])\n self.assertEqual(90, percent)", "title": "" }, { "docid": "2de703e2d6598fdee54395107dd22333", "score": "0.5632154", "text": "def set_default_policy(\n localized_policy_version_id: str,\n x_additional_headers: Optional[Dict[str, str]] = None,\n **kwargs\n):\n request = SetDefaultPolicy.create(\n localized_policy_version_id=localized_policy_version_id,\n )\n return run_request(request, additional_headers=x_additional_headers, **kwargs)", "title": "" }, { "docid": "fb7f116c0d41807c449eed68e276f604", "score": "0.56313854", "text": "def test_default_setting(self):\n self._run_and_test(self._hparams)", "title": "" }, { "docid": "7dabc7ead9f263b6e3a552e42836470b", "score": "0.5625134", "text": "def test_change_alert_policy_order(self):\n pass", "title": "" }, { "docid": "099332bb8066e946505c21a8a98fed89", "score": "0.56238997", "text": "def test_validate_policy_create_with_invalid_type_fails(self):\n for prop in ['blob', 'type']:\n request_to_validate = {prop: False}\n self.assertRaises(exception.SchemaValidationError,\n self.create_policy_validator.validate,\n request_to_validate)", "title": "" }, { "docid": "1791ed0309aeb005e20a8bae29bacf1c", "score": "0.56208473", "text": "def test_1120(self):\n s = d1_cli.impl.replication_policy.ReplicationPolicy()\n s.set_replication_allowed(True)\n assert s.get_replication_allowed()\n s.set_number_of_replicas(0)\n assert not s.get_replication_allowed()", "title": "" }, { "docid": "0fbf7f802a8c96ddd4e6df2af4eb7853", "score": "0.56111753", "text": "def test_unknown_data(self):\n data = {\"UNKNOWN\": \"just testing\"}\n with self.assertRaises(TypeError):\n RequestPolicy.from_dict(data)", "title": "" }, { "docid": "1c595e406842b74151b5aee223780754", "score": "0.5608266", "text": "def __init__(self, policy, player):\n self._policy = policy\n self._player = player", "title": "" }, { "docid": "83d60d1f6fb5bd3f37d8b441d92d18f2", "score": "0.5608042", "text": "def _policy(self, current_state):\n\n\t\tif current_state not in self.policy_parameters:\n\t\t\tself.policy_parameters[current_state] = self.env.action_space.sample()\n\t\t\n\t\treturn self.policy_parameters[current_state]", "title": "" }, { "docid": "f3d72219af351c69a56bb7dcb7f715c5", "score": "0.5606771", "text": "def testDefaultValues(self):\n\t\tad = ActuatorData()\n\t\t\n\t\tself.assertEquals(ad.getCommand(), ActuatorData.DEFAULT_COMMAND)\n\t\tself.assertEquals(ad.getStatusCode(), ActuatorData.DEFAULT_STATUS)", "title": "" }, { "docid": "4e0f31dc92357f20bcb253930037df4f", "score": "0.5587501", "text": "def test_default_param(self):\n self.assert_transform(req={}, fwd={\"def1\": \"val_def1\",\n \"def2\": \"val_def2\"})", "title": "" }, { "docid": "3ace15e948375dadd16cd0031cab1ce7", "score": "0.558581", "text": "def __init__(__self__, *,\n name: str,\n policy_type: str,\n resource_type: str,\n values: Sequence['outputs.CostAllocationProportionResponse']):\n pulumi.set(__self__, \"name\", name)\n pulumi.set(__self__, \"policy_type\", policy_type)\n pulumi.set(__self__, \"resource_type\", resource_type)\n pulumi.set(__self__, \"values\", values)", "title": "" }, { "docid": "1b2ecdffe951521f2ae62f0622d0c56a", "score": "0.558549", "text": "def test_init(self):\n # Create the algorithm object.\n policy_params = self.init_parameters.copy()\n policy_params['_init_setup_model'] = False\n alg = OffPolicyRLAlgorithm(**policy_params)\n\n # Test the attribute values.\n self.assertEqual(alg.policy, self.init_parameters['policy'])\n self.assertEqual(alg.eval_env, self.init_parameters['eval_env'])\n self.assertEqual(alg.nb_train_steps,\n self.init_parameters['nb_train_steps'])\n self.assertEqual(alg.nb_rollout_steps,\n self.init_parameters['nb_rollout_steps'])\n self.assertEqual(alg.nb_eval_episodes,\n self.init_parameters['nb_eval_episodes'])\n self.assertEqual(alg.reward_scale,\n self.init_parameters['reward_scale'])\n self.assertEqual(alg.render, self.init_parameters['render'])\n self.assertEqual(alg.render_eval, self.init_parameters['render_eval'])\n self.assertEqual(alg.verbose, self.init_parameters['verbose'])", "title": "" }, { "docid": "bb22124ce7a047760f04b4e7f3df55d3", "score": "0.5584042", "text": "def test_create_policy_active_status(self, l7policy_create_service):\n\n svc = l7policy_create_service\n builder = LBaaSBuilder(mock.MagicMock(), mock.MagicMock())\n builder._assure_l7policies_created(svc)\n assert svc['l7policies'][0]['provisioning_status'] == 'PENDING_CREATE'\n assert svc['loadbalancer']['provisioning_status'] == 'ACTIVE'", "title": "" }, { "docid": "86481ccbf821225d690223216fec0911", "score": "0.55832696", "text": "def test_creation(self):\n self.assertEquals('test', self.instance.name)\n self.assertEquals(None, self.instance.value)", "title": "" }, { "docid": "9db09681700083f75258578a5494c5df", "score": "0.55818343", "text": "def test_validate_policy_without_blob_fails(self):\n request_to_validate = {'type': 'application/json'}\n self.assertRaises(exception.SchemaValidationError,\n self.create_policy_validator.validate,\n request_to_validate)", "title": "" }, { "docid": "1a4fc5541432f7b0f22c6833b42c38f5", "score": "0.5574718", "text": "def test_gets_default_restrictedanswerset(self):\n model = {\n \"job_mode\": \"batch\",\n \"request_type\": \"image_label_area_select\",\n \"unsafe_content\": False,\n \"task_bid_price\": 1,\n \"oracle_stake\": 0.1,\n \"expiration_date\": 0,\n \"minimum_trust_server\": 0.1,\n \"minimum_trust_client\": 0.1,\n \"requester_accuracy_target\": 0.1,\n \"recording_oracle_addr\": REC_ORACLE,\n \"reputation_oracle_addr\": REP_ORACLE,\n \"reputation_agent_addr\": REP_ORACLE,\n \"instant_result_delivery_webhook\": CALLBACK_URL,\n \"requester_question\": {\"en\": \"How much money are we to make\"},\n \"requester_question_example\": FAKE_URL,\n \"job_total_tasks\": 5,\n \"taskdata_uri\": FAKE_URL,\n }\n\n manifest = create_manifest(model)\n\n func = validate_func(manifest)\n # Return new object for pydantic library\n if test_mode == PYDANTIC:\n manifest = func(True)\n else:\n func()\n\n self.assertGreater(len(manifest.to_primitive()[\"requester_restricted_answer_set\"].keys()), 0)", "title": "" }, { "docid": "d4ce2c74252b15b0ca46a06f73699d54", "score": "0.5570457", "text": "def _register_policy(self, policy):\n pass", "title": "" }, { "docid": "0ea541aaf2f64d55ee22f1e5c5202157", "score": "0.5556629", "text": "def test_default_param(self):\n self.assert_transform(req={}, fwd={\"set1\": \"val_set1\",\n \"set2\": \"val_set2\"})", "title": "" }, { "docid": "ef211a48fa30074915d6b32ce96979c2", "score": "0.555269", "text": "def __init__(__self__, *,\n consider_warning_as_error: Optional[bool] = None,\n default_service_type_health_policy: Optional['outputs.ArmServiceTypeHealthPolicyResponse'] = None,\n max_percent_unhealthy_deployed_applications: Optional[int] = None,\n service_type_health_policy_map: Optional[Mapping[str, 'outputs.ArmServiceTypeHealthPolicyResponse']] = None):\n if consider_warning_as_error is None:\n consider_warning_as_error = False\n if consider_warning_as_error is not None:\n pulumi.set(__self__, \"consider_warning_as_error\", consider_warning_as_error)\n if default_service_type_health_policy is not None:\n pulumi.set(__self__, \"default_service_type_health_policy\", default_service_type_health_policy)\n if max_percent_unhealthy_deployed_applications is None:\n max_percent_unhealthy_deployed_applications = 0\n if max_percent_unhealthy_deployed_applications is not None:\n pulumi.set(__self__, \"max_percent_unhealthy_deployed_applications\", max_percent_unhealthy_deployed_applications)\n if service_type_health_policy_map is not None:\n pulumi.set(__self__, \"service_type_health_policy_map\", service_type_health_policy_map)", "title": "" }, { "docid": "c02843de1a39137abe12adef9cc6cb5d", "score": "0.5549321", "text": "def create_policy(config, group_id, group_name, notes, policy, cloudlet_type):\n base_url, session = init_config(config.edgerc, config.section)\n\n cloudlet_object = Cloudlet(base_url, config.account_key)\n utility_object = Utility()\n policy_name = policy\n group_id = group_id\n group_name = group_name\n\n if group_id:\n if group_id.startswith('grp_'):\n group_id = group_id.split('_')[1]\n try:\n group_id = int(group_id)\n except:\n root_logger.info(\"group-id must be a number or start with grp_\")\n exit(-1)\n\n cloudlet_type = cloudlet_type.upper()\n if notes:\n description = notes\n else:\n #notes not specified, create our own default description\n description = str(policy_name) + ' (Created by Cloudlet CLI)'\n\n\n if group_id and group_name:\n root_logger.info(\"Please specify either group-id or group-name.\")\n exit(-1)\n\n if not group_id and not group_name:\n root_logger.info(\"Please specify either group-id or group-name.\")\n exit(-1)\n\n #verify valid cloudlet type code\n if cloudlet_type not in utility_object.do_cloudlet_code_map().keys():\n root_logger.info('ERROR: ' + cloudlet_type + ' is not a valid cloudlet type code')\n keys = []\n for key in utility_object.do_cloudlet_code_map():\n keys.append(key)\n print('Cloudlet Type Codes: ' + str(keys))\n exit(-1)\n else:\n cloudlet_id = utility_object.do_cloudlet_code_map()[cloudlet_type]\n\n #group name passed, so check to see if it exists\n if group_name:\n found_group = False\n root_logger.info(\"...searching for group: \" + str(group_name))\n group_response = cloudlet_object.get_groups(session)\n if group_response.status_code == 200:\n for every_group in group_response.json():\n if every_group['groupName'].upper() == group_name.upper():\n group_id = every_group['groupId']\n root_logger.info(\"...found group-id: \" + str(every_group['groupId']))\n found_group = True\n pass\n if not(found_group):\n root_logger.info(\"ERROR: Unable to find group: \" + str(group_name))\n exit(-1)\n else:\n #group-id is passed, so use it\n pass\n\n policy_data = dict()\n policy_data['cloudletId'] = cloudlet_id\n policy_data['groupId'] = group_id\n policy_data['name'] = policy_name\n policy_data['description'] = description\n\n create_response = cloudlet_object.create_clone_policy(session, json.dumps(policy_data))\n\n if create_response.status_code == 201:\n print(str(create_response.json()['policyId']))\n pass\n else:\n root_logger.info('ERROR: Unable to create policy')\n root_logger.info(json.dumps(create_response.json(), indent=4))\n\n return 0", "title": "" }, { "docid": "5a452ef423a69c01292a4fd029ddfbea", "score": "0.55472714", "text": "def __init__(__self__,\n resource_name: str,\n args: VaultPolicyArgs,\n opts: Optional[pulumi.ResourceOptions] = None):\n ...", "title": "" }, { "docid": "654ec5940a12d2a6d84e907b9a2f0c32", "score": "0.5542282", "text": "def test_delete_scaling_policy(self):\n pass", "title": "" }, { "docid": "1d3465ec3a88edc74467e87199a9927d", "score": "0.5529133", "text": "def test_default_source_parameters(self):\n expected_parameters = {\n \"landing_url\": \"\",\n \"password\": \"\",\n \"private_token\": \"\",\n \"severities\": [],\n \"url\": \"\",\n \"username\": \"\",\n }\n self.assertEqual(expected_parameters, default_source_parameters(self.database, \"security_warnings\", \"snyk\"))", "title": "" }, { "docid": "636b96cf37ab0fba3f49c0e25784db19", "score": "0.5517332", "text": "def __init__(__self__, *,\n defense_scene: pulumi.Input[str],\n policy_name: pulumi.Input[str],\n policy_type: pulumi.Input[str],\n status: pulumi.Input[str]):\n pulumi.set(__self__, \"defense_scene\", defense_scene)\n pulumi.set(__self__, \"policy_name\", policy_name)\n pulumi.set(__self__, \"policy_type\", policy_type)\n pulumi.set(__self__, \"status\", status)", "title": "" }, { "docid": "69537d2acfed2846f62f506fdfc77d6b", "score": "0.5516997", "text": "def test_1140(self):\n s = d1_cli.impl.replication_policy.ReplicationPolicy()\n s.add_preferred([\"preferred_mn_1\"])\n s.add_preferred([\"preferred_mn_2\"])\n s.add_blocked([\"blocked_mn_1\"])\n s.add_blocked([\"blocked_mn_2\"])\n s.set_number_of_replicas(5)\n s.set_replication_allowed(True)\n s.clear()\n assert not len(s.get_preferred())\n assert not len(s.get_blocked())\n assert s.get_replication_allowed()\n assert s.get_number_of_replicas() == 3", "title": "" }, { "docid": "fb273d4750ff13bb7752935019521b9e", "score": "0.5514743", "text": "def init_policy(self):\n for i in range(len(self.end_states)):\n x, y = self.end_states[i]\n if self.board[x][y] > 0:\n self.policy[x][y] = \"Goal\"\n else:\n self.policy[x][y] = \"Bad\"", "title": "" }, { "docid": "768a5b58199bfb76b5668b4fd5e698a6", "score": "0.5514101", "text": "def setUp(self):\n super(OtterRbacTests, self).setUp()\n create_group = self.autoscale_behaviors.create_scaling_group_given(\n gc_min_entities=0)\n self.group = create_group.entity\n self.policy_webhook = self.autoscale_behaviors.create_policy_webhook(\n self.group.id, {'change': 1, 'cooldown': 0})\n self.resources.add(self.group, self.empty_scaling_group)", "title": "" }, { "docid": "c1b59879b4c50b8ca644b82de5fb91d2", "score": "0.5512931", "text": "def __init__(self, policyType, name):\n policyTypeEnum = ['fabric', 'access']\n\n if policyType not in policyTypeEnum:\n raise ValueError('Policy Type must be one of:', policyTypeEnum)\n\n self.name = name\n self.policyType = policyType\n self.descr = ''\n self.collection_policy = {}\n self.monitor_target = {}\n\n # assume that it has not been written to APIC. This is cleared if the\n # policy is just loaded from APIC or the policy is written to the APIC.\n self.modified = True", "title": "" }, { "docid": "1a828537c69337bb150f3b5172e755ea", "score": "0.5509726", "text": "def default_storage_policy_assigned(name, policy, datastore):\n log.info(\n \"Running state %s for policy '%s', datastore '%s'.\", name, policy, datastore\n )\n changes = {}\n changes_required = False\n ret = {\"name\": name, \"changes\": {}, \"result\": None, \"comment\": None}\n si = None\n try:\n si = __salt__[\"vsphere.get_service_instance_via_proxy\"]()\n existing_policy = __salt__[\"vsphere.list_default_storage_policy_of_datastore\"](\n datastore=datastore, service_instance=si\n )\n if existing_policy[\"name\"] == policy:\n comment = (\n \"Storage policy '{}' is already assigned to \"\n \"datastore '{}'. Nothing to be done.\"\n \"\".format(policy, datastore)\n )\n else:\n changes_required = True\n changes = {\n \"default_storage_policy\": {\n \"old\": existing_policy[\"name\"],\n \"new\": policy,\n }\n }\n if __opts__[\"test\"]:\n comment = \"State {} will assign storage policy '{}' to datastore '{}'.\".format(\n name, policy, datastore\n )\n else:\n __salt__[\"vsphere.assign_default_storage_policy_to_datastore\"](\n policy=policy, datastore=datastore, service_instance=si\n )\n comment = \"Storage policy '{} was assigned to datastore '{}'.\".format(\n policy, name\n )\n log.info(comment)\n except CommandExecutionError as exc:\n log.error(\"Error: %s\", exc)\n if si:\n __salt__[\"vsphere.disconnect\"](si)\n ret.update(\n {\"comment\": exc.strerror, \"result\": False if not __opts__[\"test\"] else None}\n )\n return ret\n\n ret[\"comment\"] = comment\n if changes_required:\n ret.update({\"changes\": changes, \"result\": None if __opts__[\"test\"] else True})\n else:\n ret[\"result\"] = True\n return ret", "title": "" }, { "docid": "587024b964d9bdb9e97f63fe48841f2c", "score": "0.5501955", "text": "def get_default_policy(client, scaffold=False): # pylint: disable=unused-argument\n return _scaffold_certificate_profile() if scaffold else _default_certificate_profile()", "title": "" }, { "docid": "5034b3bfea1b0e9af50b35cbeca3d7f1", "score": "0.54995465", "text": "def __init__(__self__,\n resource_name: str,\n args: WafPolicyArgs,\n opts: Optional[pulumi.ResourceOptions] = None):\n ...", "title": "" }, { "docid": "dea7d1fb71fd2cea0e7b45c1ad5a689b", "score": "0.5490454", "text": "def test_delete_alert_policy(self):\n pass", "title": "" }, { "docid": "c3a3d69761700c94b0111f570334053b", "score": "0.5487079", "text": "def __init__(__self__, *,\n type: pulumi.Input['PolicyType'],\n managed_service_data: Optional[pulumi.Input[str]] = None,\n policy_option: Optional[pulumi.Input['PolicyOptionArgs']] = None):\n pulumi.set(__self__, \"type\", type)\n if managed_service_data is not None:\n pulumi.set(__self__, \"managed_service_data\", managed_service_data)\n if policy_option is not None:\n pulumi.set(__self__, \"policy_option\", policy_option)", "title": "" }, { "docid": "ad7c95e0a00c4b204b25c15625f1d9ee", "score": "0.54831177", "text": "def set_(\n name,\n setting=None,\n policy_class=None,\n computer_policy=None,\n user_policy=None,\n cumulative_rights_assignments=True,\n adml_language=\"en-US\",\n):\n ret = {\"name\": name, \"result\": True, \"changes\": {}, \"comment\": \"\"}\n policy_classes = [\"machine\", \"computer\", \"user\", \"both\"]\n class_map = {\n \"computer\": \"Computer Configuration\",\n \"machine\": \"Computer Configuration\",\n \"user\": \"User Configuration\",\n }\n if not setting and not computer_policy and not user_policy:\n msg = (\n \"At least one of the parameters setting, computer_policy, or \"\n \"user_policy must be specified.\"\n )\n ret[\"result\"] = False\n ret[\"comment\"] = msg\n return ret\n if setting and not policy_class:\n msg = (\n \"A single policy setting was specified but the policy_class \"\n \"was not specified.\"\n )\n ret[\"result\"] = False\n ret[\"comment\"] = msg\n return ret\n if setting and (computer_policy or user_policy):\n msg = (\n \"The setting and computer_policy/user_policy parameters are \"\n \"mutually exclusive. Please specify either a policy name and \"\n \"setting or a computer_policy and/or user_policy dict\"\n )\n ret[\"result\"] = False\n ret[\"comment\"] = msg\n return ret\n if policy_class and policy_class.lower() not in policy_classes:\n msg = \"The policy_class parameter must be one of the following: {}\"\n ret[\"result\"] = False\n ret[\"comment\"] = msg\n return ret\n if not setting:\n if computer_policy and not isinstance(computer_policy, dict):\n msg = \"The computer_policy must be specified as a dict.\"\n ret[\"result\"] = False\n ret[\"comment\"] = msg\n return ret\n if user_policy and not isinstance(user_policy, dict):\n msg = \"The user_policy must be specified as a dict.\"\n ret[\"result\"] = False\n ret[\"comment\"] = msg\n return ret\n else:\n user_policy = {}\n computer_policy = {}\n if policy_class.lower() == \"both\":\n user_policy[name] = setting\n computer_policy[name] = setting\n elif policy_class.lower() == \"user\":\n user_policy[name] = setting\n elif policy_class.lower() in [\"machine\", \"computer\"]:\n computer_policy[name] = setting\n pol_data = {\n \"user\": {\"requested_policy\": user_policy, \"policy_lookup\": {}},\n \"machine\": {\"requested_policy\": computer_policy, \"policy_lookup\": {}},\n }\n\n current_policy = {}\n deprecation_comments = []\n for p_class, p_data in pol_data.items():\n if p_data[\"requested_policy\"]:\n for p_name, _ in p_data[\"requested_policy\"].items():\n lookup = __salt__[\"lgpo.get_policy_info\"](\n policy_name=p_name,\n policy_class=p_class,\n adml_language=adml_language,\n )\n if lookup[\"policy_found\"]:\n pol_data[p_class][\"policy_lookup\"][p_name] = lookup\n # Since we found the policy, let's get the current setting\n # as well\n current_policy.setdefault(class_map[p_class], {})\n current_policy[class_map[p_class]][p_name] = __salt__[\n \"lgpo.get_policy\"\n ](\n policy_name=p_name,\n policy_class=p_class,\n adml_language=adml_language,\n return_value_only=True,\n )\n # Validate element names\n if isinstance(p_data[\"requested_policy\"][p_name], dict):\n valid_names = []\n for element in lookup[\"policy_elements\"]:\n valid_names.extend(element[\"element_aliases\"])\n for e_name in p_data[\"requested_policy\"][p_name]:\n if e_name not in valid_names:\n new_e_name = e_name.split(\":\")[-1].strip()\n # If we find an invalid name, test the new\n # format. If found, add to deprecation comments\n # and bail\n if new_e_name in valid_names:\n msg = (\n '\"{}\" is no longer valid.\\n'\n 'Please use \"{}\" instead.'\n \"\".format(e_name, new_e_name)\n )\n deprecation_comments.append(msg)\n else:\n msg = \"Invalid element name: {}\".format(e_name)\n ret[\"comment\"] = \"\\n\".join(\n [ret[\"comment\"], msg]\n ).strip()\n ret[\"result\"] = False\n else:\n ret[\"comment\"] = \"\\n\".join(\n [ret[\"comment\"], lookup[\"message\"]]\n ).strip()\n ret[\"result\"] = False\n if not ret[\"result\"]:\n if deprecation_comments:\n deprecation_comments.insert(\n 0, \"The LGPO module changed the way it gets policy element names.\"\n )\n deprecation_comments.append(ret[\"comment\"])\n ret[\"comment\"] = \"\\n\".join(deprecation_comments).strip()\n return ret\n\n log.debug(\"pol_data == %s\", pol_data)\n log.debug(\"current policy == %s\", current_policy)\n\n # compare policies\n policy_changes = []\n for p_class, p_data in pol_data.items():\n requested_policy = p_data.get(\"requested_policy\")\n if requested_policy:\n for p_name, p_setting in requested_policy.items():\n if p_name in current_policy[class_map[p_class]]:\n # compare the requested and current policies\n log.debug(\n \"need to compare %s from current/requested policy\", p_name\n )\n\n # resolve user names in the requested policy and the current\n # policy so that we are comparing apples to apples\n if p_data[\"policy_lookup\"][p_name][\"rights_assignment\"]:\n resolved_names = []\n for name in p_data[\"requested_policy\"][p_name]:\n resolved_names.append(\n salt.utils.win_functions.get_sam_name(name)\n )\n p_data[\"requested_policy\"][p_name] = resolved_names\n resolved_names = []\n for name in current_policy[class_map[p_class]][p_name]:\n resolved_names.append(\n salt.utils.win_functions.get_sam_name(name)\n )\n current_policy[class_map[p_class]][p_name] = resolved_names\n\n changes = False\n requested_policy_json = salt.utils.json.dumps(\n p_data[\"requested_policy\"][p_name], sort_keys=True\n )\n current_policy_json = salt.utils.json.dumps(\n current_policy[class_map[p_class]][p_name], sort_keys=True\n )\n\n requested_policy_check = salt.utils.json.loads(\n requested_policy_json\n )\n current_policy_check = salt.utils.json.loads(current_policy_json)\n\n # Are the requested and current policies identical\n policies_are_equal = _compare_policies(\n requested_policy_check, current_policy_check\n )\n\n if not policies_are_equal:\n if (\n p_data[\"policy_lookup\"][p_name][\"rights_assignment\"]\n and cumulative_rights_assignments\n ):\n for user in p_data[\"requested_policy\"][p_name]:\n if (\n user\n not in current_policy[class_map[p_class]][p_name]\n ):\n user = salt.utils.win_functions.get_sam_name(user)\n if (\n user\n not in current_policy[class_map[p_class]][\n p_name\n ]\n ):\n changes = True\n else:\n changes = True\n if changes:\n log.debug(\"%s current policy != requested policy\", p_name)\n log.debug(\n \"We compared %s to %s\",\n requested_policy_json,\n current_policy_json,\n )\n policy_changes.append(p_name)\n else:\n msg = '\"{}\" is already set'.format(p_name)\n log.debug(msg)\n else:\n policy_changes.append(p_name)\n log.debug(\"policy %s is not set, we will configure it\", p_name)\n if __opts__[\"test\"]:\n if policy_changes:\n msg = \"The following policies are set to change:\\n{}\".format(\n \"\\n\".join(policy_changes)\n )\n ret[\"result\"] = None\n else:\n msg = \"All specified policies are properly configured\"\n deprecation_comments.append(msg)\n ret[\"comment\"] = \"\\n\".join(deprecation_comments).strip()\n else:\n if policy_changes:\n _ret = __salt__[\"lgpo.set\"](\n computer_policy=pol_data[\"machine\"][\"requested_policy\"],\n user_policy=pol_data[\"user\"][\"requested_policy\"],\n cumulative_rights_assignments=cumulative_rights_assignments,\n adml_language=adml_language,\n )\n if _ret:\n ret[\"result\"] = _ret\n new_policy = {}\n for p_class, p_data in pol_data.items():\n if p_data[\"requested_policy\"]:\n for p_name, p_setting in p_data[\"requested_policy\"].items():\n new_policy.setdefault(class_map[p_class], {})\n new_policy[class_map[p_class]][p_name] = __salt__[\n \"lgpo.get_policy\"\n ](\n policy_name=p_name,\n policy_class=p_class,\n adml_language=adml_language,\n return_value_only=True,\n )\n ret[\"changes\"] = salt.utils.dictdiffer.deep_diff(\n old=current_policy, new=new_policy\n )\n if ret[\"changes\"]:\n msg = \"The following policies changed:\\n{}\".format(\n \"\\n\".join(policy_changes)\n )\n else:\n msg = \"Failed to set the following policies:\\n{}\".format(\n \"\\n\".join(policy_changes)\n )\n ret[\"result\"] = False\n else:\n msg = (\n \"Errors occurred while attempting to configure policies: {}\".format(\n _ret\n )\n )\n ret[\"result\"] = False\n deprecation_comments.append(msg)\n ret[\"comment\"] = \"\\n\".join(deprecation_comments).strip()\n else:\n msg = \"All specified policies are properly configured\"\n deprecation_comments.append(msg)\n ret[\"comment\"] = \"\\n\".join(deprecation_comments).strip()\n\n return ret", "title": "" }, { "docid": "6887a7f59a35c6a04d5d41aaac766b07", "score": "0.54816043", "text": "def create_new_policy(self, name: str = None, description: str = None, priority_level: str = None,\n policy: dict = None):\n suffix_url = 'integrationServices/v3/policy'\n body = {\n \"policyInfo\": assign_params(\n name=name,\n description=description,\n priorityLevel=priority_level,\n policy=policy,\n version=CURRENT_VERSION_OF_THE_POLICY_API\n )\n }\n return self._http_request(method='POST', url_suffix=suffix_url, headers=self.policy_headers,\n json_data=body)", "title": "" }, { "docid": "d5249a4fb17399214bda46571efdf4f1", "score": "0.5477529", "text": "def __init__(__self__,\n resource_name: str,\n opts: Optional[pulumi.ResourceOptions] = None,\n defense_scene: Optional[pulumi.Input[str]] = None,\n policy_name: Optional[pulumi.Input[str]] = None,\n policy_type: Optional[pulumi.Input[str]] = None,\n status: Optional[pulumi.Input[str]] = None,\n __props__=None):\n ...", "title": "" }, { "docid": "d9e47f70f589a2c32b9c6995f09d9f13", "score": "0.54768485", "text": "def test_get_defaults():\n\n # Arrange\n num_boids = 50\n x_limits = -450, 50\n y_limits = 300, 600\n x_velocity_limits = 0, 10\n y_velocity_limits = -20, 20\n\n # Act\n parameters = BoidsSetupParameters.get_defaults()\n\n # Assert\n assert parameters.num_boids == num_boids\n assert parameters.x_limits == x_limits\n assert parameters.y_limits == y_limits\n assert parameters.x_velocity_limits == x_velocity_limits\n assert parameters.y_velocity_limits == y_velocity_limits", "title": "" }, { "docid": "652582f67c45a831d467cad43fc0a0b3", "score": "0.5460447", "text": "def test_cypress_defaults():\n # TODO how to make this work when the module is installed?\n _ = Parameters.create({\"PARAMETERS\": \"./defaults/cypress.cfg\"}, [])", "title": "" }, { "docid": "f607f19b1db657c80e655c1f53315b6f", "score": "0.5459023", "text": "def test_cantCreateWithoutRequiredValues(self):\n txn = self.pool.connection()\n te = yield self.failUnlessFailure(TestAutoRecord.create(txn),\n TypeError)\n self.assertIn(\"required attribute 'epsilon' not passed\", str(te))", "title": "" }, { "docid": "d7da3bb2d10c3b3e1be1fde851467b03", "score": "0.5454637", "text": "def test_valid_post_failed_set_policy(loggedin_app):\n form = {\n 'project': 'test',\n 'access': 'roles/compute.instanceAdmin',\n 'period': '15',\n 'target': 'test4',\n 'domain': 'example.com',\n 'csrf_token': 'not validated in tests',\n }\n\n url = '{}/{}:getIamPolicy'.format(CLOUD_RM, 'test')\n responses.add(\n responses.POST, url, status=200,\n json={\n 'bindings': [\n {\n 'role': 'roles/owner',\n 'members': [\n 'user:[email protected]',\n 'user:[email protected]',\n ],\n },\n {\n 'role': 'roles/storage.admin',\n 'condition': {\n 'expression': 'request.time < timestamp(\\'2018-05-04T00:00:00.00+00:00\\')', # noqa: E501\n 'title': 'testing',\n },\n 'members': [\n 'user:[email protected]',\n ],\n },\n ],\n 'version': 1,\n 'etag': 'test',\n })\n\n url = '{}/{}:setIamPolicy'.format(CLOUD_RM, 'test')\n responses.add(\n responses.POST, url, status=400,\n json={\n 'error': {\n 'code': 400,\n 'detail': [{\n '@type': 'type.googleapis.com/google.rpc.BadRequest',\n 'fieldViolations': [\n {'description': 'Invalid JSON payload'}],\n }],\n 'status': 'INVALID_ARGUMENT',\n 'message': 'Invalid JSON payload received.',\n }})\n\n res = loggedin_app.post('/', data=form, follow_redirects=True)\n assert res.status_code == 200\n assert res.content_type == 'text/html; charset=utf-8'\n assert len(responses.calls) == 2\n assert 'could not apply new policy'.lower() in \\\n res.get_data(as_text=True).lower()", "title": "" }, { "docid": "674b8bf6ecae695af9079e08b72f6614", "score": "0.5451183", "text": "def test_get_default_policy_param_name_failing1(default_params_Policy):\n param = \"ID_BenefitSurtax_Switch_idx\"\n match = \"Parsing {}: Expected integer for index but got {}\".format(param, \"idx\")\n with pytest.raises(ValueError, match=match):\n get_default_policy_param_name(param, default_params_Policy)", "title": "" }, { "docid": "53a6f5d730fa8aaf7bc62e9e039a64cc", "score": "0.54496896", "text": "def test_infinite_retry_policy_default_parameters(infinite_retry_policy):\n assert isinstance(\n infinite_retry_policy, RequestsStampede.policy.retry.AbstractRetryPolicy\n )\n\n assert isinstance(infinite_retry_policy.attempts, float)\n assert infinite_retry_policy.attempts == math.inf", "title": "" }, { "docid": "0045c914baf0d8216a8911d673e01e40", "score": "0.5445618", "text": "def test_create_validdefault(monkeypatch):\n\n monkeypatch.setattr(s2_ovr, 'sentinel_bucket', sentinel_bucket)\n\n assert s2_ovr.create(sentinel_scene, ovrSize=128)", "title": "" }, { "docid": "b956be5832c0515f21af377b5739e08b", "score": "0.5439153", "text": "def test_get_auto_scaling_policy_list(self):\n pass", "title": "" }, { "docid": "7b310c5154941b154c5150e60dc60000", "score": "0.5436726", "text": "def __init__(__self__, *,\n default_service_type_health_policy: Optional['outputs.ServiceTypeHealthPolicyResponse'] = None,\n service_type_health_policies: Optional[Mapping[str, 'outputs.ServiceTypeHealthPolicyResponse']] = None):\n if default_service_type_health_policy is not None:\n pulumi.set(__self__, \"default_service_type_health_policy\", default_service_type_health_policy)\n if service_type_health_policies is not None:\n pulumi.set(__self__, \"service_type_health_policies\", service_type_health_policies)", "title": "" }, { "docid": "c5fcffe6bddd8672494032c72344da3b", "score": "0.54359615", "text": "def set_policy(self, policy_name, policy_value):\n\n # Risk probability and impact are multiplied together\n # Productivity costs are added together\n\n # self.dict[policy_name] = self.load_policy(policy_name)(policy_value)\n self.dict[policy_name] = policy_value", "title": "" }, { "docid": "2a6defc7ff6ad5272c5ad1963bc35b73", "score": "0.5425875", "text": "def ptp_parameter_create(self, values):", "title": "" }, { "docid": "d7d7aef79b4e85cea69bfde1a9715865", "score": "0.5424839", "text": "def _create_capability(self, name, urls, group=None, default=True):\n group = group or self._create_group('test')\n capability = ProtectedCapability.objects.create(\n default=default,\n title=name,\n slug=slugify(name),\n protected_resources=json.dumps(urls),\n group=group)\n return capability", "title": "" }, { "docid": "18c9db7950ab9029051d9ac2b2dc9620", "score": "0.54228216", "text": "def test_validate_policy_without_type_fails(self):\n request_to_validate = {'blob': 'some blob information'}\n self.assertRaises(exception.SchemaValidationError,\n self.create_policy_validator.validate,\n request_to_validate)", "title": "" }, { "docid": "5e7af85be4689b7c5e594e14b7ef4406", "score": "0.5416214", "text": "def initNet(policy):\n\n\tfor m in policy.parameters():\n\t nn.init.normal(m, mean=0, std=0.2)\n\n\treturn policy", "title": "" }, { "docid": "aa0af5fab6b3339a58d3069f981b494e", "score": "0.5411907", "text": "def __init__(self, default_params=None, restrictions=None):\n self._locked = False\n self._restrictions = []\n if restrictions:\n self._restrictions = restrictions\n if default_params is None:\n default_params = {}\n self.override(default_params, is_strict=False)\n self.validate()", "title": "" }, { "docid": "b819a74044a86afeb740ad0d1e3b2f20", "score": "0.5410457", "text": "def test_can_construct_acquire_command_with_default_values(self):\n acq_command = Acquire(duration=10)\n\n self.assertEqual(acq_command.duration, 10)\n self.assertEqual(acq_command.discriminator, None)\n self.assertEqual(acq_command.kernel, None)", "title": "" }, { "docid": "86b981c74e72391f212cea1938385f90", "score": "0.5409782", "text": "def test_webapp_defaults():\n # TODO how to make this work when the module is installed?\n _ = Parameters.create({\"PARAMETERS\": \"./defaults/webapp.cfg\"}, [])", "title": "" }, { "docid": "1f9c5771ce42a008912ff31dc65bc111", "score": "0.54089916", "text": "def adm_type_good():\n return {\n \"name\": \"Policy for Rate Control\",\n \"policy_type_id\": 6660666,\n \"description\": \"This policy is associated with rate control. An instance of the policy specifies the traffic class to which it applies and parameters to use to control how much it must be throttled in case of an overload. Each instance of the policy that is created MUST be associated with a unique class ID (identifyed by the key 'class', which is used by the xAPP to differentiate traffic. If an agent tries to create a policy with the SAME class id, it will be rejected by the xAPP, even if it has a unique policy instance id. \",\n \"create_schema\": {\n \"$schema\": \"http://json-schema.org/draft-07/schema#\",\n \"type\": \"object\",\n \"additionalProperties\": False,\n \"properties\": {\n \"class\": {\n \"type\": \"integer\",\n \"minimum\": 1,\n \"maximum\": 256,\n \"description\": \"integer id representing class to which we are applying policy\",\n },\n \"enforce\": {\n \"type\": \"boolean\",\n \"description\": \"Whether to enable or disable enforcement of policy on this class\",\n },\n \"window_length\": {\n \"type\": \"integer\",\n \"minimum\": 15,\n \"maximum\": 300,\n \"description\": \"Sliding window length in seconds\",\n },\n \"trigger_threshold\": {\"type\": \"integer\", \"minimum\": 1},\n \"blocking_rate\": {\"type\": \"number\", \"minimum\": 0, \"maximum\": 100},\n },\n \"required\": [\"class\", \"enforce\", \"blocking_rate\", \"trigger_threshold\", \"window_length\"],\n },\n }", "title": "" }, { "docid": "861d09a11726ee15bc1abee87855d90e", "score": "0.5408342", "text": "def test_default(self):\n pv_command = PersistentValue(value=0.5 - 0.5j)\n\n self.assertEqual(pv_command.value, 0.5-0.5j)\n self.assertEqual(pv_command.duration, 0)", "title": "" }, { "docid": "d28df88700eec81e7547360dac3eb4a8", "score": "0.54083174", "text": "def test_createFillsInPKey(self):\n txn = self.pool.connection()\n tr = yield TestAutoRecord.create(txn, epsilon=u'specified')\n tr2 = yield TestAutoRecord.create(txn, epsilon=u'also specified')\n self.assertEquals(tr.phi, 1)\n self.assertEquals(tr2.phi, 2)", "title": "" } ]
ea53c144dfd2ef432f2ae9576ba9d3b4
Run testing command. Raises Exception If there are any failed tests.
[ { "docid": "9d3392e632071b2ee08ddeb10b5d9578", "score": "0.7361913", "text": "def _run_tests() -> None:\r\n logger.info('testing command started.')\r\n stdout: str = _run_command(\r\n command=(\r\n 'pytest --cov=./apysc tests/ -v -s --workers auto '\r\n '--cov-report term-missing'\r\n ))\r\n if ' failed, ' in stdout:\r\n raise Exception('There are failed tests.')\r\n _save_coverage(stdout=stdout)", "title": "" } ]
[ { "docid": "d92ce6264f9f77f50e3bc91a4cb02d68", "score": "0.78962797", "text": "def run_tests(self):\n # import here, cause outside the eggs aren't loaded\n import tox\n errcode = tox.cmdline(self.test_args)\n sys.exit(errcode)", "title": "" }, { "docid": "d92ce6264f9f77f50e3bc91a4cb02d68", "score": "0.78962797", "text": "def run_tests(self):\n # import here, cause outside the eggs aren't loaded\n import tox\n errcode = tox.cmdline(self.test_args)\n sys.exit(errcode)", "title": "" }, { "docid": "bee16d52b71556d7ae0e3527ef205627", "score": "0.77716166", "text": "def run_test(self):\n try:\n ioutils.call_pytest(self.location)\n except CalledProcessError as ex:\n if ex.returncode == 5:\n log.warning('No tests were found')\n log.warning('This is not considered fatal but is VERY STRONGLY discouraged')\n log.warning('Resuming in 2 seconds')\n sleep(2)\n return\n log.error('Tests Failed')\n exit(1)\n log.success('Tests passed')", "title": "" }, { "docid": "9effd233067e1c527d84e2321beb06ca", "score": "0.72708046", "text": "def run_tests(self):\n runner = self.runner_class(**self.options)\n try:\n runner.run(self.test)\n except KeyboardInterrupt:\n # Clean up environment before exiting.\n runner.tear_down_environment()\n raise", "title": "" }, { "docid": "7a97137f9fdafd83cfd42e2709074d35", "score": "0.72538805", "text": "def test():\n # Run all the tests before aborting.\n failures = test_lint() + test_unittests()\n if failures:\n logging.error('Some tests failed! See above for details.')\n for (step, pkg) in failures:\n logging.error(' %s failed %s phase.', pkg, step)\n sys.exit(1)", "title": "" }, { "docid": "1da5e5315a233d2c00bc615f4af34708", "score": "0.7064208", "text": "def run_tests():", "title": "" }, { "docid": "65a8b5888b0cb721f5756ccb24f53963", "score": "0.703894", "text": "def run_tests(self):\n # import here, because outside the required eggs aren't loaded yet\n import pytest\n\n sys.exit(pytest.main(self.test_args))", "title": "" }, { "docid": "3af2a45be48c23b56bdcbc2bff291922", "score": "0.7038171", "text": "def run_tests(self):\n # Import here, because outside the required eggs aren't loaded yet\n import pytest\n sys.exit(pytest.main(self.test_args))", "title": "" }, { "docid": "910ae1dda25d8a57b4e1ded31ee26947", "score": "0.7034154", "text": "def test_run_command(self):\n command = \"ls\"\n result = run_command(command)\n self.assert_equal(result[0], 0)", "title": "" }, { "docid": "514740848c45d9f726b80cb3cfbe920c", "score": "0.7033975", "text": "def test_and_run_command(self):\n self.build()\n exe = self.getBuildArtifact(\"a.out\")\n self.runCmd(\"file \" + exe, CURRENT_EXECUTABLE_SET)\n\n # Break inside the main.\n lldbutil.run_break_set_by_symbol(\n self, \"main\", num_expected_locations=1)\n\n self.runCmd(\"run\", RUN_SUCCEEDED)\n\n # The stop reason of the thread should be breakpoint.\n self.expect(\"thread list\", STOPPED_DUE_TO_BREAKPOINT,\n substrs=['stopped',\n 'stop reason = breakpoint'])\n\n # The breakpoint should have a hit count of 1.\n self.expect(\"breakpoint list -f\", BREAKPOINT_HIT_ONCE,\n substrs=[' resolved, hit count = 1'])\n\n self.runCmd(\"next\")\n self.runCmd(\"next\")\n\n # Try frame variable.\n self.expect(\"frame variable index\", VARIABLES_DISPLAYED_CORRECTLY,\n substrs=['(int32_t) index = 512'])\n\n # Try an interpreted expression.\n self.expect(\"expr (index + 512)\", VARIABLES_DISPLAYED_CORRECTLY,\n substrs=['1024'])\n\n # Try a JITted expression.\n self.expect(\n \"expr (int)getpid(); (index - 256)\",\n VARIABLES_DISPLAYED_CORRECTLY,\n substrs=['256'])\n\n self.runCmd(\"kill\")", "title": "" }, { "docid": "2a396fe04dea697cd0976224e16ff357", "score": "0.69653946", "text": "def run_test(self, testcase, name, options):", "title": "" }, { "docid": "2a396fe04dea697cd0976224e16ff357", "score": "0.69653946", "text": "def run_test(self, testcase, name, options):", "title": "" }, { "docid": "44a7cac461e2062e7c114b87a4de0255", "score": "0.69580805", "text": "def run_tests():\n\n args = parse_args()\n config = read_config(args)\n for test_name in config.sections():\n print \"Running system test: %s\" % test_name\n systest = Systest(test_name, config)\n result = systest.run()\n print \"Result: %s\" % result\n if result == 'ERROR':\n print systest.error_message\n print \"\"", "title": "" }, { "docid": "99b2070859e8171b9b00aab314d35d0a", "score": "0.69170725", "text": "def test(coverage=False):\n tests = subprocess.call(['python', '-c', \"import tests as tests; tests.run()\"])\n sys.exit(tests)", "title": "" }, { "docid": "cdaad5f4f2e1f4f5b011cb60de791af3", "score": "0.6913164", "text": "def test_command():\n click.echo('Test Test.')", "title": "" }, { "docid": "4657b5ce8318f64de32dd14a049bb96b", "score": "0.68747467", "text": "def run(self):\n cmd = 'coverage3 run setup.py pytest %s' % self.get_args()\n cmd += '&& coverage3 report'\n try:\n check_call(cmd, shell=True)\n except CalledProcessError as exc:\n print(exc)\n print('Coverage tests failed. Fix the errors above and try again.')\n sys.exit(-1)", "title": "" }, { "docid": "52daa1a9a266fbafbe71f95bf7f0ca33", "score": "0.68618476", "text": "def test_cli():", "title": "" }, { "docid": "b233a22aeb9e45cb2bb4495177b732e2", "score": "0.68420464", "text": "def handle(self, *args, **options):\n os.system(\"pytest -o log_cli=TRUE -v -rf\")", "title": "" }, { "docid": "7727ab700c58af5696cec6ef488837c8", "score": "0.6796798", "text": "def test_run_a_simple_test():", "title": "" }, { "docid": "5f83d4a6738821311af1f39d14581099", "score": "0.67913383", "text": "def test_cli(self):\n self.standard_cli_test()", "title": "" }, { "docid": "fc2cb21b500cdc03bbc90212a11349aa", "score": "0.6773894", "text": "def test():\n tests = unittest.TestLoader().discover(\"project/tests\", pattern=\"test*.py\")\n result = unittest.TextTestRunner(verbosity=2).run(tests)\n if result.wasSuccessful():\n return 0\n sys.exit(result)", "title": "" }, { "docid": "a523fd95ec24c1454e75a0cd573b89bc", "score": "0.67549413", "text": "def action(self, args):\n\n inputs = InputValues(args)\n\n call_dir = os.getcwd()\n\n do_list_tests = inputs.get('list_tests')\n\n test_case_name = inputs.get('test_case_name')\n run_all_tests = test_case_name is None\n\n test_dir = as_path(\n inputs.get(\n 'test_dir', default=call_dir, is_path=True),\n 'Source files directory', is_dir=True, preexists=True)\n\n test_case_names = []\n if run_all_tests:\n for test_case in os.listdir(path=test_dir):\n if os.path.exists(\n os.path.join(test_dir, test_case, \"expected\")\n ):\n test_case_names.append(test_case)\n else:\n test_case_names.append(test_case_name)\n\n test_case_names.sort()\n\n status = 0\n\n if do_list_tests:\n for test_case_name in test_case_names:\n print(test_case_name)\n exit(0)\n\n failed_tests = []\n\n for test_case_name in test_case_names:\n\n test_case_dir = os.path.join(test_dir, test_case_name)\n if not os.path.exists(test_case_dir):\n raise OasisException(f\"Test directory does not exist: {test_case_name}\")\n\n run_dir = as_path(inputs.get('run_dir', is_path=True), 'Run directory', is_dir=True, preexists=False)\n if run_dir is None:\n with tempfile.TemporaryDirectory() as tmp_run_dir:\n test_result = om().run_fm_test(test_case_dir, tmp_run_dir)\n else:\n run_dir = os.path.join(run_dir, test_case_name)\n test_result = om().run_fm_test(test_case_dir, run_dir)\n\n if not test_result:\n failed_tests.append(test_case_name)\n if status == 0:\n status = 1\n\n if len(failed_tests) == 0:\n self.logger.info(\"All tests passed\")\n else:\n self.logger.info(\"{} test failed: \".format(len(failed_tests)))\n [self.logger.info(n) for n in failed_tests]\n\n exit(status)", "title": "" }, { "docid": "57aea89fc4ca52fa5b557ba515bd4ec0", "score": "0.674928", "text": "def test_command_run(self, mock_get_dot_path):\n mock_get_dot_path.return_value = \"\"\n with assert_raises(SystemExit) as cm:\n main.run_command(None, None)\n assert_in(\"no backup and/or files folder found\", cm.exception.args[0])", "title": "" }, { "docid": "de1ce77334192aac0c5dd8d819ee1f6d", "score": "0.67435825", "text": "def test(self):\n exes = [\n \"db_checkpoint\",\n \"db_deadlock\",\n \"db_dump\",\n \"db_load\",\n \"db_printlog\",\n \"db_stat\",\n \"db_upgrade\",\n \"db_verify\",\n ]\n for exe in exes:\n reason = \"test version of {0} is {1}\".format(exe, self.spec.version)\n self.run_test(\n exe,\n [\"-V\"],\n [self.spec.version.string],\n installed=True,\n purpose=reason,\n skip_missing=True,\n )", "title": "" }, { "docid": "0855f4307cdcca9adf1c58b330434369", "score": "0.67293453", "text": "def _run_tests(self, args):\n self._log(constants.VALIDATE_LOG_LEVEL_INFO, \"{0}Running tests{0}\".format(constants.LOG_DIVIDER))\n\n # Get absolute path to package\n path_package = os.path.abspath(args.package)\n # Ensure the package directory exists and we have READ access\n sdk_helpers.validate_dir_paths(os.R_OK, path_package)\n\n # get values for tox args if they exists otherwise set to default\n tox_args = args.tox_args if hasattr(args, \"tox_args\") else None # default is None\n\n # check if tox tests installed and run tox if so\n tox_tests_valid_or_skipped, issues = self._validate_tox_tests(path_package, tox_args, args.settings)\n self.VALIDATE_ISSUES[\"tests\"] = issues\n self.SUMMARY_LIST += issues\n\n for issue in issues:\n self._log(issue.get_logging_level(), issue.error_str())\n\n self._print_status(constants.VALIDATE_LOG_LEVEL_INFO, \"tests\", tox_tests_valid_or_skipped)", "title": "" }, { "docid": "6a982478f629f643d1f7b3027e3a5a72", "score": "0.6727022", "text": "def testCommandExecution(self):\n cmd_valid = TestMeController.TestCommand(self.env,\n name='valid',\n number=10)\n tmc = TestMeController(self.env)\n self.assert_true(tmc.process_command(cmd_valid))", "title": "" }, { "docid": "cec7ff696d01975cbd3e78a2ca1a753a", "score": "0.6725478", "text": "def do_tests():\n unittest.main(verbosity=2)", "title": "" }, { "docid": "48f8dbc6559a4daa1da7150744eccee9", "score": "0.67243457", "text": "def run_cmd(self, xfail=False):\n start_time = datetime.datetime.now()\n try:\n if not os.environ.get('TESTING', False):\n out = subprocess.check_output(self.cmd,\n stderr=subprocess.STDOUT,\n cwd=self.working_dir)\n else:\n out = \"Skipped for testing mode.\"\n end_time = datetime.datetime.now()\n except subprocess.CalledProcessError as e:\n self.returncode = e.returncode\n self.stdout = e.output\n if xfail:\n env.log(self)\n else:\n env.error(self)\n else:\n self.returncode = 0\n self.stdout = out\n env.log(self)\n env.debug(\"Command took {} seconds\".format(\n (end_time - start_time).seconds))", "title": "" }, { "docid": "95e4cf11d590b7991745729ad9ccb957", "score": "0.6713553", "text": "def do_unittests(self, subcmd, opts, *args):\n #print(subcmd, opts, args)\n sys.argv = [sys.argv[0]] + list(args)\n unittest.main()", "title": "" }, { "docid": "04469a364232d288a04a4da833d01479", "score": "0.6693241", "text": "def _check_command_test(self, cmd_to_test):\n if os.access(cmd_to_test, os.X_OK):\n cmd = cmd_to_test + \" -V\"\n _exit_code, output = exec_wait(\n _cmd=cmd,\n _output_capture=True,\n _timeout=self.config['timeout']\n )\n self._logger.debug(\"{func} - cmd |{cmd}| - exit_code |{exit_code}| output |{output}| \".format(\n func=\"_check_command_test\",\n cmd=cmd,\n exit_code=_exit_code,\n output=output))\n if _exit_code != 0:\n _message = self._MESSAGES_LIST[\"e000017\"].format(cmd, output)\n self._logger.error(\"{0}\".format(_message))\n raise exceptions.PgCommandUnAvailable(_message)\n else:\n _message = self._MESSAGES_LIST[\"e000016\"].format(cmd_to_test)\n self._logger.error(\"{0}\".format(_message))\n raise exceptions.PgCommandNotExecutable(_message)", "title": "" }, { "docid": "bd8333604106e041536aeb5125faf9c3", "score": "0.668864", "text": "def _runTests( option, opt_str, value, parser ):\n print \"Running tests...\"\n _testPrng()\n _testPkm()\n print \"All tests complete!\"\n exit( 0 )", "title": "" }, { "docid": "7ec6e8433b24cc33a8e2348463804a59", "score": "0.6674357", "text": "def test_main():\n with pytest.raises(SystemExit) as _:\n cli.main()", "title": "" }, { "docid": "816eb71aeb61b7f92bbc5b0d210f2b90", "score": "0.6641976", "text": "def run_tests(self):\n\n if __name__ == \"__main__\":\n sys.exit('The runner cannot be executed directly.'\n ' You need to import it within project specific runner. Session terminated.')\n\n # cleanup previous results\n self.reporter.cleanup_results()\n\n # import execution class\n executor_class = self.shishito_support.get_module('platform_execution')\n # executor_class = getattr(import_module(platform_path), 'ControlExecution')\n executor = executor_class(self.shishito_support, self.test_timestamp)\n\n # run test\n exit_code = executor.run_tests()\n\n # archive results + generate combined report\n self.reporter.archive_results()\n self.reporter.generate_combined_report()\n\n # upload results to QAStats test management app\n qastats_credentials = self.shishito_support.get_opt('qastats')\n if qastats_credentials:\n try:\n qas_user, qas_password = qastats_credentials.split(':', 1)\n except (AttributeError, ValueError):\n raise ValueError('QAStats credentials were not specified! Unable to connect to QAStats.')\n\n qastats = QAStats(qas_user, qas_password, self.test_timestamp, self.epoch, self.test_build)\n qastats.post_results()\n\n # upload results to TestRail test management app\n test_rail_credentials = self.shishito_support.get_opt('test_rail')\n if test_rail_credentials:\n try:\n tr_user, tr_password = test_rail_credentials.split(':', 1)\n except (AttributeError, ValueError):\n raise ValueError('TestRail credentials were not specified! Unable to connect to TestRail.')\n\n test_rail = TestRail(tr_user, tr_password, self.test_timestamp, self.test_build)\n test_rail.post_results()\n\n return exit_code", "title": "" }, { "docid": "a912dd9e8f2daf3462239a09ef30909f", "score": "0.6634059", "text": "def run(self) -> None:\n try:\n results = self.run_r_tests()\n except subprocess.CalledProcessError as e:\n raise TestError(e.stderr) from e\n for test_file, result in results.items():\n for res in result:\n test = self.test_class(self, test_file, res)\n print(test.run(), flush=True)", "title": "" }, { "docid": "04df5606eee71183a4a5a62dc9df76c2", "score": "0.66239446", "text": "def main():\n program_name = os.path.basename(sys.argv[0])\n\n initialize_debugging(program_name)\n process_environment_variables()\n arguments = process_command_line()\n\n if len(arguments) == 0:\n logging.warning(\"Please specify at least 1 test file to process\")\n display_help()\n sys.exit(0)\n\n if is_privileged():\n print(\"It's not recommended to run this utility as a privileged user\")\n print(\n \"and you should definitely avoid doing so when running unverified test suites!\"\n )\n if not parameters[\"Auto confirm\"]:\n print(\n \"However you'll get the chance to review each command to be executed...\"\n )\n if not ask_for_confirmation(\n \"Please confirm execution (y[es]): \", (\"y\", \"yes\")\n ):\n print(\"Better safe than sorry!\")\n sys.exit(0)\n\n for filename in arguments:\n if not os.path.isfile(filename):\n logging.error(\"'%s' is not a file name\", filename)\n else:\n try:\n test_file = defusedxml.minidom.parse(filename)\n except:\n logging.critical(\"XML file error\")\n sys.exit(1)\n\n # Get the root element of the document:\n test_suite = test_file.documentElement\n\n # Get the name of the program we'll be testing:\n program_tested = os.path.basename(filename).replace(\".xml\", \"\")\n if test_suite.hasAttribute(\"program\"):\n program_tested = test_suite.getAttribute(\"program\").strip()\n\n color_print(\"Testing the '%s' command:\" % program_tested, colorama.Style.BRIGHT)\n\n # Get the processor required for this file and verify if it's OK:\n if test_suite.hasAttribute(\"processor\"):\n if not verify_processor(test_suite.getAttribute(\"processor\")):\n logging.critical(\"This test file requires a different or more recent processor\")\n sys.exit(1)\n\n # Determine if the original command will have to be executed:\n execute_original_command = False\n original_command_full_path = \"\"\n if not parameters[\"Skip original command\"]:\n if (\n not os.path.isdir(program_tested + \".orig\")\n or parameters[\"Overwrite results\"]\n ):\n execute_original_command = True\n if parameters[\"Original command path\"] == \"\":\n original_command_full_path = shutil.which(program_tested)\n else:\n original_command_full_path = shutil.which(\n program_tested, path=parameters[\"Original command path\"]\n )\n if original_command_full_path is None:\n logging.critical(\"Original command not found\")\n sys.exit(1)\n else:\n logging.debug(\n \"Original command found at: %s\", original_command_full_path\n )\n\n # Determine if the new command will have to be executed:\n execute_new_command = False\n new_command_full_path = \"\"\n if parameters[\"New command path\"] != \"\":\n if (\n not os.path.isdir(program_tested + \".new\")\n or parameters[\"Overwrite results\"]\n ):\n execute_new_command = True\n new_command_full_path = shutil.which(\n program_tested, path=parameters[\"New command path\"]\n )\n if new_command_full_path is None:\n logging.critical(\"New command not found\")\n sys.exit(1)\n else:\n logging.debug(\"New command found at: %s\", new_command_full_path)\n\n # Get all the test cases in the test suite:\n test_cases = test_suite.getElementsByTagName(\"test-case\")\n\n # If we are to keep results, note some system information\n # for next time & place we'll make comparisons:\n original_command_md5 = \"\"\n new_command_md5 = \"\"\n if parameters[\"Keep results\"]:\n if execute_original_command:\n original_command_md5 = describe_test_environment(\n program_tested + \".orig\", original_command_full_path\n )\n if execute_new_command:\n new_command_md5 = describe_test_environment(\n program_tested + \".new\", new_command_full_path\n )\n\n # But at the minimum check that we are not testing the same command:\n else:\n if execute_original_command:\n original_command_md5 = get_file_digest(original_command_full_path)\n if execute_new_command:\n new_command_md5 = get_file_digest(new_command_full_path)\n if original_command_md5 == new_command_md5:\n logging.warning(\"The commands are the same! Disabling new command run\")\n execute_new_command = False\n\n # Process each test case:\n test_number = 0\n skipped_count = 0\n different_count = 0\n same_count = 0\n for test_case in test_cases:\n test_name, pre, stdin, cmd, timeout, post = read_test_case(test_case)\n test_number += 1\n print(' Test #{} \"{}\"'.format(test_number, test_name))\n\n # Confirm test execution (in case you are not the author of the test suite):\n if not parameters[\"Auto confirm\"]:\n if not confirm_test(pre, stdin, cmd, post):\n color_print(\" Skipping test\", colorama.Fore.YELLOW)\n skipped_count += 1\n continue\n\n # Execute tests:\n results1 = None\n if execute_original_command:\n results1, post_output1 = execute_test(\n program_tested + \".orig\",\n test_number,\n pre,\n stdin,\n original_command_full_path,\n cmd,\n timeout,\n post,\n )\n elif not parameters[\"Skip original command\"]:\n results1, post_output1 = load_previous_results(\n program_tested + \".orig\", test_number\n )\n results2 = None\n if execute_new_command:\n results2, post_output2 = execute_test(\n program_tested + \".new\",\n test_number,\n pre,\n stdin,\n new_command_full_path,\n cmd,\n timeout,\n post,\n )\n\n # Compare tests results:\n if results1 and results2:\n same = True\n if results1.returncode != results2.returncode:\n same = remind_command(same, cmd)\n color_print(\n \" Return codes are different!\",\n colorama.Fore.RED + colorama.Style.BRIGHT,\n )\n if not parameters[\"Quiet differences\"]:\n print(\" Original = {}\".format(results1.returncode))\n print(\" New = {}\".format(results2.returncode))\n if results1.stdout != results2.stdout:\n same = remind_command(same, cmd)\n color_print(\n \" Standard output is different!\",\n colorama.Fore.RED + colorama.Style.BRIGHT,\n )\n if not parameters[\"Quiet differences\"]:\n diff = difflib.unified_diff(\n str(results1.stdout).split(os.linesep),\n str(results2.stdout).split(os.linesep),\n fromfile=\"Original stdout\",\n tofile=\"New stdout\",\n )\n for line in diff:\n print(line)\n if results1.stderr != results2.stderr:\n same = remind_command(same, cmd)\n color_print(\n \" Standard error output is different!\",\n colorama.Fore.RED + colorama.Style.BRIGHT,\n )\n if not parameters[\"Quiet differences\"]:\n diff = difflib.unified_diff(\n str(results1.stderr).split(os.linesep),\n str(results2.stderr).split(os.linesep),\n fromfile=\"Original stderr\",\n tofile=\"New stderr\",\n )\n for line in diff:\n print(line)\n if post_output1 != post_output2:\n same = remind_command(same, cmd)\n color_print(\n \" Post commands output is different!\",\n colorama.Fore.RED + colorama.Style.BRIGHT,\n )\n if not parameters[\"Quiet differences\"]:\n diff = difflib.unified_diff(\n str(post_output1).split(os.linesep),\n str(post_output2).split(os.linesep),\n fromfile=\"Original post output\",\n tofile=\"New post output\",\n )\n for line in diff:\n print(line)\n if same:\n same_count += 1\n color_print(\" Same results\", colorama.Fore.GREEN)\n else:\n different_count += 1\n\n # Print test suite results:\n if not parameters[\"Skip original command\"] and execute_new_command:\n color_print(\"Results:\", colorama.Style.BRIGHT)\n if different_count:\n color_print(\n \" {} out of {} test cases have different results\".format(\n different_count, same_count + different_count\n ),\n colorama.Style.BRIGHT,\n )\n else:\n color_print(\n \" All {} test cases have the same results\".format(same_count),\n colorama.Fore.GREEN,\n )\n if skipped_count:\n color_print(\n \" {} test cases skipped\".format(skipped_count),\n colorama.Fore.YELLOW,\n )\n\n sys.exit(0)", "title": "" }, { "docid": "24ca50a7f5254fea4a85b04fb5c2e9fc", "score": "0.6589736", "text": "def run_python_tests(args):\n\tinstall_test_harness(args)\n\tcommand = \"nosetests -w /var/www/flaskapps/etl/ETL -v --exe --with-xunit --xunit-file=%s/python_log_%s.xml --cover-xml --cover-xml-file=%s/python_coverage_%s.xml\" % (LOG_FOLDER, datetime.datetime.now().strftime(\"%Y-%m-%d\"), LOG_FOLDER, datetime.datetime.now().strftime(\"%Y=%m-%d\"))\n\tsubprocess.call(command, shell=True)", "title": "" }, { "docid": "a933262a48cf3d9d847764379608d6ff", "score": "0.6586305", "text": "def test_commands(host, cmd):\n # For now this is just a basic smoke test.\n assert host.command(f\"{cmd:s} --version\").rc == 0\n return", "title": "" }, { "docid": "c5b045494fe42762fe31c8df4bd4dea8", "score": "0.657972", "text": "def test_checks_pass():\n call_command('check')", "title": "" }, { "docid": "4d99f0a2ea968238e5428f57b9f77da5", "score": "0.6574354", "text": "def test_cli(self):\n runner = CliRunner()\n result = runner.invoke(pyetl, ['run', '--help'])\n self.assertEqual(result.exit_code, 0)", "title": "" }, { "docid": "2929b972833a0faa91041f7e66beb6c6", "score": "0.65677583", "text": "def main():\n\tall_sections = get_sections()\n\n\t(options, args) = parse_options(all_sections.keys())\n\tif args:\n\t\tlogger = logging.getLogger('test')\n\t\tlogger.error('Unused arguments: %r', args)\n\t\tsys.exit(2)\n\n\tsetup_environment()\n\tsetup_debug(options.verbose)\n\n\tif options.sections:\n\t\tselected_sections = options.sections\n\telse:\n\t\tselected_sections = all_sections.keys()\n\ttests = get_tests(selected_sections)\n\n\ttest_set = TestSet(tests)\n\n\tif options.dry:\n\t\ttest_environment = TestEnvironment(interactive=options.interactive)\n\telse:\n\t\ttest_environment = TestEnvironment(interactive=options.interactive,\n\t\t\t\tlogfile=options.logfile)\n\ttest_environment.tag(require=options.tags_required,\n\t\t\tignore=options.tags_ignored,\n\t\t\tprohibit=options.tags_prohibited)\n\tif options.exposure:\n\t\ttest_environment.set_exposure(options.exposure)\n\ttest_set.set_environment(test_environment)\n\ttest_set.set_prefix(options.count)\n\ttest_set.set_format(options.format)\n\ttest_set.run_tests(options.filter, options.dry)", "title": "" }, { "docid": "3b37c9dbecb2ec881742322697c59362", "score": "0.65576965", "text": "def test_execute_cmd_illigal_cmd(self):\n cmd = 'not a command'\n with self.assertRaises(SystemExit) as se:\n execute_cmd(cmd)\n\n self.assertEqual(se.exception.code, 2)", "title": "" }, { "docid": "a261802f0cd3ae5f7a4a55466d362418", "score": "0.6556596", "text": "def run_test(test_name):\n\n print 'Running test_%s...' % test_name\n os.system('./test_%s.py' % test_name)\n print", "title": "" }, { "docid": "670a7e4d59bf79dc64a5b613d8738c8e", "score": "0.6516186", "text": "def run_test(self):\n return 0", "title": "" }, { "docid": "4a27a6675d871b046d2b26a8c8c94e2b", "score": "0.6509636", "text": "def main():\n test_parser = argparse.ArgumentParser(\n usage='python runtests.py [-h] [pytest_args]',\n description=\"Helper script to run pywinpty's test suite\")\n test_parser.add_argument('--run-slow', action='store_true', default=False,\n help='Run the slow tests')\n _, pytest_args = test_parser.parse_known_args()\n run_pytest(extra_args=pytest_args)", "title": "" }, { "docid": "de53ed439c615e749e2f5eb5cbcab418", "score": "0.65025634", "text": "def runtests():\r\n from Cython.Debugger.Tests import test_libpython_in_gdb\r\n\r\n success_libcython = run_unittest_in_module(__name__)\r\n success_libpython = run_unittest_in_module(test_libpython_in_gdb.__name__)\r\n\r\n if not success_libcython or not success_libpython:\r\n sys.exit(2)", "title": "" }, { "docid": "61df8dcdeefd52a5e92d54175d6d8a42", "score": "0.64965844", "text": "def test_successful_run(self):\n self.generic_test_successful_run(['task', 'run', 'west/bozo/test/hello', 'ls'], None)", "title": "" }, { "docid": "ec3d6e8a13a1738b7bdd331ef6f5a274", "score": "0.649298", "text": "def _run_tests(self, solution):\n base_command = self._command_formatter(\n solution.answer.programming_language)\n for test in solution.tests:\n cin = test.input_data\n command = base_command + cin\n err_res = self._run_container(solution, command)\n test_result = self.err_code_validation(err_res, test, solution)\n if test_result == -1:\n return\n solution.passed += 1\n\n self.db_manager.add_status(\n solution.user_solution_id,\n \"Задание выполнено.\",\n solution.passed,\n None,\n None)\n return", "title": "" }, { "docid": "81e25542cc4d1d0a58fc87f3dce20eab", "score": "0.6489881", "text": "def test():\n import pytest\n\n try:\n return pytest.main([\"--pyargs\", \"pysim\"])\n except SystemExit as e:\n return e.code", "title": "" }, { "docid": "5633da17632e9e3b6fe3ef3b553b2597", "score": "0.64766484", "text": "def test():\n # Allow selecting specific submodule\n args = \" --verbosity={0} -s {1}\".format(2, 'tests/')\n # Use pty so the process buffers \"correctly\"\n run('nosetests --rednose' + args, pty=True)", "title": "" }, { "docid": "e459445a20ec45794c633fb9adc01351", "score": "0.6469454", "text": "def test_with_command(self):\n self.build()\n self.simulate_conditional_break_by_user()", "title": "" }, { "docid": "65f60d26242eab494fc3a7e94c9285b3", "score": "0.6462872", "text": "def run_all_unit_tests():", "title": "" }, { "docid": "00e1adb461f07843e2d397a24b2a20f1", "score": "0.6462057", "text": "def subprocess_run(self, *args):\n return self.testdir.runpytest_subprocess(*args)", "title": "" }, { "docid": "4ffb946b5b2bd1b01c522f2bacdae87c", "score": "0.6457123", "text": "def run_test(self, setup=True):\n\n try:\n\n if setup:\n self.setup()\n self._setup_internal()\n\n self.prepare()\n\n self._exp_state = \"Testing\"\n print(\"Start test.\")\n\n self.test()\n\n self.end_test()\n self._end_test_internal()\n\n self._exp_state = \"Tested\"\n print(\"Testing complete.\")\n\n except Exception as e:\n\n self._exp_state = \"Error\"\n self.process_err(e)", "title": "" }, { "docid": "604876f8b9a758411c81be16b9f0f0ad", "score": "0.64496994", "text": "def run_unittest_option(self):\n option = self.options.known.unittest\n\n if option and utils.is_valid_directory(self.target_path):\n\n command = create_unittest_command(\n self.target_path,\n self.__exec_base\n )\n\n # needed to scope report creation\n changed = utils.has_changed_directory(self.target_path)\n\n\n utils.log(QA_LOG_TMPL.format(\n changed,\n self.target_path,\n utils.get_current_directory()\n ))\n\n self.process.execute(command)", "title": "" }, { "docid": "7a8889e0bd59bee1a8c8248495507873", "score": "0.64457494", "text": "def runTest(self):\n self.test_main()", "title": "" }, { "docid": "b641610ff0bb8e6d3b949c5a324fc732", "score": "0.64436865", "text": "def test_cmd(cmd):\n return get_return_code('command -v %s' % cmd)", "title": "" }, { "docid": "2492f1c0f6324e5d68a2299cba5b523e", "score": "0.64420485", "text": "def run_tests(self):\n\n eix = bb.wrap(local.path('/usr/bin/eix'), self)\n _eix = bb.watch(eix)\n _eix(\"clang\")", "title": "" }, { "docid": "9d3180f5459dfaba47e2f33536f57bfe", "score": "0.6439506", "text": "def main():\n parser = create(OptionParser)\n parser.add_option('-f', '--filter', metavar='FILE',\n help='execute commands from FILE')\n parser.add_option('-d', '--debug', action='store_true',\n default=False, help='enable debugging mode')\n parser.add_option('-v', '--verbose', action='store_const',\n dest='verbosity', default=0, const=10,\n help='be verbose',)\n opts, args = parser.parse_args()\n\n if opts.filter:\n try:\n cmdin = file(opts.filter)\n except IOError, e:\n sys.stderr.write('error: %s\\n' % e.strerror)\n sys.exit(1)\n else:\n cmdin = sys.stdin\n\n context = create(TestContext, cmdin)\n context.settings['cli:debug'] = opts.debug\n context.settings['cli:verbosity'] = opts.verbosity\n\n if len(args) == 0:\n context.execute_loop()\n else:\n command = ' '.join(args) + '\\n'\n context.execute_string(command)\n\n sys.exit(context.status)", "title": "" }, { "docid": "dba7749437a74f8fee1b8cc2727657c0", "score": "0.64378595", "text": "def test_stdout(self):\n print \"Running: %s - %s\" % (self.id(), self.shortDescription())\n # add a cleanup method to run after tearDown()\n self.addCleanup(self.cleanup_remote_commands)\n for node in g.config[\"nodes\"]:\n rcode, rout, rerr = g.run(node, \"ls -ld /etc\")\n self.assertEqual(rcode, 0)\n self.assertTrue(rout)\n self.assertFalse(rerr)", "title": "" }, { "docid": "f56578ab1cb41a9e203d51e21adb4cca", "score": "0.6436566", "text": "def test_call(self):\n result = my_cli([\"hello\"])\n self.assertEqual(result, 0)", "title": "" }, { "docid": "c366b87e561cdb76b80e34c5d01f0e97", "score": "0.6427889", "text": "def run_all_tests(executor_path, flags, tests_dir_path):\n print('\\nTesting {0} {1}'.format(executor_path, ' '.join(flags)))\n\n if not os.path.exists(executor_path):\n print('ERROR -- {0} not found\\n'.format(executor_path))\n return False\n\n starttime = time.time()\n errorcount = 0\n\n for i, test in enumerate(discover_tests(dir_path=tests_dir_path), start=1):\n print('Running test #{:0>3} {:.<30}'.format(i, '[' + test.name + ']'),\n end='', flush=True)\n\n # Figure out what to feed into stdin and what output to expect; encode\n # them as bytes.\n stdin_feed = test.params.get('feed-in', '').encode('utf-8')\n expected_out = test.params.get('expect-out', '').encode('utf-8')\n\n try:\n if executor_path.endswith('.py'):\n executor_invocation = ['python', executor_path]\n else:\n executor_invocation = [executor_path]\n\n subproc = subprocess.run(\n executor_invocation + flags + [test.program_path],\n timeout=5,\n stdout=subprocess.PIPE,\n input=stdin_feed)\n\n if subproc.returncode != 0:\n errorcount += 1\n print('ERROR')\n print('---- Executor returned: {0}'.format(subproc.returncode))\n print('---- Output: {0}'.format(subproc.stdout))\n elif subproc.stdout != expected_out:\n errorcount += 1\n print('ERROR')\n print('---- Expected output: {0}'.format(expected_out))\n print('---- Output: {0}'.format(subproc.stdout))\n else:\n print('OK')\n except subprocess.TimeoutExpired as e:\n errorcount += 1\n print('ERROR -- timeout')\n except FileNotFoundError as e:\n errorcount += 1\n print('ERROR -- file not found')\n\n if errorcount == 0:\n print('---- All tests ran OK ----')\n else:\n print('---- Tests had %s errors ----' % errorcount)\n return False\n\n print('Elapsed: %.4s sec' % (time.time() - starttime,))\n return True", "title": "" }, { "docid": "2657e97fbfe5f6b6df31fd85699822fc", "score": "0.64188784", "text": "def test_basic(self):\n self.install()\n self.runtests()", "title": "" }, { "docid": "f683054b3e769f90275eb87e8c7fb9c7", "score": "0.6417513", "text": "def run_tests(self, config=None):\n if config is None:\n config = self.config\n pythonpath = self.pythonpath\n self.datatree.clear()\n tempfilename = get_conf_path('unittest.results')\n self.testrunner = self.framework_registry.create_runner(\n config.framework, self, tempfilename)\n self.testrunner.sig_finished.connect(self.process_finished)\n\n try:\n self.testrunner.start(config, pythonpath)\n except RuntimeError:\n QMessageBox.critical(self,\n _(\"Error\"), _(\"Process failed to start\"))\n else:\n self.set_running_state(True)\n self.status_label.setText(_('<b>Running tests ...<b>'))", "title": "" }, { "docid": "44aec96d6fd683e78c311ff37dd76724", "score": "0.6413444", "text": "def run_test(self):\n raise NotImplementedError", "title": "" }, { "docid": "744da6bee005927260f8b3ee65b03484", "score": "0.64040244", "text": "def test_plugin_subcommand(self):\n runner = CliRunner()\n result = runner.invoke(pyetl, ['run', 'test'])\n self.assertEqual(result.exit_code, 0)\n self.assertIn('Everything went well.', result.output)", "title": "" }, { "docid": "8d29999be321c6f0530bc60e1d50756b", "score": "0.6392917", "text": "def test():\n from unittest import TestLoader, TextTestRunner\n suite = TestLoader().discover('tests')\n TextTestRunner(verbosity=2, buffer=False).run(suite)", "title": "" }, { "docid": "8d29999be321c6f0530bc60e1d50756b", "score": "0.6392917", "text": "def test():\n from unittest import TestLoader, TextTestRunner\n suite = TestLoader().discover('tests')\n TextTestRunner(verbosity=2, buffer=False).run(suite)", "title": "" }, { "docid": "cb392ba1146b6b4224af4e3e52d3a53d", "score": "0.6367721", "text": "def main(self):\n\n assert hasattr(self, \"num_nodes\"), \"Test must set self.num_nodes in set_test_params()\"\n\n try:\n self.setup()\n self.run_test()\n except JSONRPCException:\n self.log.exception(\"JSONRPC error\")\n self.success = TestStatus.FAILED\n except SkipTest as e:\n self.log.warning(\"Test Skipped: %s\" % e.message)\n self.success = TestStatus.SKIPPED\n except AssertionError:\n self.log.exception(\"Assertion failed\")\n self.success = TestStatus.FAILED\n except KeyError:\n self.log.exception(\"Key error\")\n self.success = TestStatus.FAILED\n except subprocess.CalledProcessError as e:\n self.log.exception(\"Called Process failed with '{}'\".format(e.output))\n self.success = TestStatus.FAILED\n except Exception:\n self.log.exception(\"Unexpected exception caught during testing\")\n self.success = TestStatus.FAILED\n except KeyboardInterrupt:\n self.log.warning(\"Exiting after keyboard interrupt\")\n self.success = TestStatus.FAILED\n finally:\n exit_code = self.shutdown()\n sys.exit(exit_code)", "title": "" }, { "docid": "f96662e251f6f17385574b19dfa70ccc", "score": "0.6364367", "text": "def test():\n tests = unittest.TestLoader().discover('project/tests', pattern='test*.py')\n result = xmlrunner.XMLTestRunner(output='test-reports').run(tests)\n if result.wasSuccessful():\n return sys.exit(0)\n return sys.exit(1)", "title": "" }, { "docid": "bf3bb587d3794d1d975eab2a53851c27", "score": "0.636193", "text": "def test(mysql, use_migrations):\n command = 'pytest ./tests/'\n\n if mysql:\n command = 'TESTING_USE_DB=True ' + command\n if use_migrations:\n command = 'TESTING_USE_MIGRATIONS=True ' + command\n\n if os.system(command):\n raise RuntimeError('Tests could not be run.')", "title": "" }, { "docid": "173fe438a890d47cd8e199128ba320e7", "score": "0.6357408", "text": "def test_(self):\n for test_config in self.tests:\n self.run_single_test(test_config, show_passed=False)", "title": "" }, { "docid": "51b7106dd99baab935095e5cd1fb105f", "score": "0.63542473", "text": "def main():\n \n parser = argparse.ArgumentParser(description=\"Run the Seattle Testbed unit tests.\")\n\n # -f, -m, -a are mutually exclusive options\n group = parser.add_mutually_exclusive_group(required=True)\n group.add_argument(\"-a\", \"--all\", action=\"store_true\", dest=\"run_all\",\n help=\"Run all tests in this directory\")\n group.add_argument(\"-m\", \"--module\", type=str, action=\"store\",\n dest=\"module_name\", help=\"Run all tests for the given module\")\n group.add_argument(\"-f\", \"--file\", \"--files\", type=str, action=\"store\",\n dest=\"file_name\", nargs=\"+\", help=\"Run given test file(s) in alphabetical order, including their module's setup/subprocess/shutdown scripts\")\n\n # -s, -t can coexist just fine\n parser.add_argument(\"-t\", \"--time\", action=\"store_true\", dest=\"show_time\",\n help=\"Display the time taken to execute a test\")\n parser.add_argument(\"-s\", \"--security-layer\", \"--security-layers\", \n action=\"store\", dest=\"security_layers\", nargs=\"+\",\n help=\"Execute tests with a security layer (or layers)\")\n\n options = parser.parse_args()\n\n \n # Generate sorted list of valid unit test file names from all files \n # in the current working directory.\n all_files = glob.glob(\"*\")\n valid_files = filter_files(all_files)\n valid_files.sort()\n\n\n # Check if the show_time option is on.\n if options.show_time:\n global SHOW_TIME\n SHOW_TIME = True\n\n # Run tests for a list of file names (could contain just a single file)\n if options.file_name: \n # Verify that all files are from the same module. (Otherwise we can't \n # pick which setup/subprocess/shutdown scripts to run.)\n requested_modules = set()\n for file_name in options.file_name:\n module_name, descriptor = parse_file_name(file_name)\n requested_modules.add(module_name)\n\n if len(requested_modules) != 1:\n print \"Error: Please restrict your choice of test cases to a single \"\n print \"module when using the -f / --file / --files option.\"\n print \"Modules you requested were\", \", \".join(requested_modules)\n print\n return 1\n\n # Ensure alphabetical order of test cases that were supplied\n options.file_name.sort()\n\n # The test_module code is really poorly structured. I need to tell it to \n # consider the shutdown, setup, and subprocess scripts...\n module_file_list = filter_files(valid_files, module = module_name)\n \n files_to_use = (options.file_name + \n filter_files(module_file_list, descriptor='setup') + \n filter_files(module_file_list, descriptor='subprocess') + \n filter_files(module_file_list, descriptor='shutdown'))\n\n test_module(module_name, files_to_use, options.security_layers)\n\n\n # Test an entire module\n if options.module_name:\n module_file_list = filter_files(valid_files, module = options.module_name)\n test_module(options.module_name, module_file_list, options.security_layers)\n \n # Test all files\n if options.run_all:\n test_all(valid_files, options.security_layers)\n\n # Finally, set our exit status to be the number of failed tests.\n # This is required for continuous integration frameworks (see \n # SeattleTestbed/utf#61), and useful for shell scripting in general. \n # I'll cap the exit status at 125 to prevent potential 8-bit int \n # overflows on repos with lots of tests, and also stay clear of \n # POSIXly and conventionally used reserved/\"magic\" values.\n exit_status = min(len(failed_tests), 125)\n sys.exit(exit_status)", "title": "" }, { "docid": "ca83ec5d4428aabdc7aacf860e6a1fba", "score": "0.6352076", "text": "def runtests(test_labels=None):\n # Used as setup test_suite: must either exit or return a TestSuite\n failures = setup_and_run_tests(test_labels)\n sys.exit(bool(failures))", "title": "" }, { "docid": "097e0590f2c1c81e069319e54699884b", "score": "0.635143", "text": "def _execute_test(self):\n try:\n self._setup()\n self._run_openmoc()\n results = self._get_results()\n self._write_results(results)\n self._compare_results()\n finally:\n self._cleanup()", "title": "" }, { "docid": "e36f883077f6ba44bc677ac07dd21c99", "score": "0.63512856", "text": "def runTest(self):\n self.assertTrue(self.test_runner._run_test(self.testcase_dict))", "title": "" }, { "docid": "edce7dc1042015bff976bcd81db87fb8", "score": "0.63453346", "text": "def runTest(self):\n self.setUp()\n self.delayDisplay(' Tests Passed! ')", "title": "" }, { "docid": "a04c10d368507b3e5fcd70345ce9ebc4", "score": "0.63452417", "text": "def test_run_retval0():\n retval, stdout, stderr = util.run_command([\"true\"])\n assert retval == 0", "title": "" }, { "docid": "7966f9a51edbcb6c936b08d0572bdc87", "score": "0.63411367", "text": "def run_simulation_test(repo, *additional_args):\n proc = subprocess.Popen([\n VPYTHON, os.path.join(repo.recipes_root_path, 'recipes.py'), 'test',\n ] + list(additional_args), stdout=subprocess.PIPE, stderr=subprocess.STDOUT)\n output, _ = proc.communicate()\n retcode = proc.returncode\n return retcode, output", "title": "" }, { "docid": "d1ec0e96d2950bb2667b42c8ae818207", "score": "0.63233286", "text": "def runTest(self):\n self.test_main()", "title": "" }, { "docid": "be9e89539e1fa59131231810faad3fea", "score": "0.6320077", "text": "def test_execute_cmd_echo(self):\n cmd = '(exit 1) | echo running the test'\n self.assertEqual(execute_cmd(cmd), 'running the test\\n')", "title": "" }, { "docid": "2e5190d6c33697e5ce1b6a0d32998ae6", "score": "0.63148767", "text": "def execute_test(\n test_directory,\n test_number,\n pre_commands,\n standard_input,\n full_command_path,\n command_line,\n timeout,\n post_commands,\n):\n logging.debug(\"execute_test(): test_directory=%s\", test_directory)\n logging.debug(\"execute_test(): test_number=%s\", str(test_number))\n logging.debug(\"execute_test(): pre_commands=%s\", \" \".join(pre_commands))\n logging.debug(\"execute_test(): standard_input=%s\", \" \".join(standard_input))\n logging.debug(\"execute_test(): full_command_path=%s\", full_command_path)\n logging.debug(\"execute_test(): command_line=%s\", command_line)\n logging.debug(\"execute_test(): timeout=%d\", timeout)\n logging.debug(\"execute_test(): post_commands=%s\", \" \".join(post_commands))\n\n # Making the test directories and getting inside:\n directory = test_directory + os.sep + str(test_number) + os.sep + \"tmp\"\n if not os.path.isdir(directory):\n try:\n os.makedirs(directory)\n except OSError as error:\n logging.critical(\n 'Unable to create the \"%s\" directory: %s', directory, error\n )\n sys.exit(1)\n os.chdir(directory)\n\n # Executing commands defined in the \"pre\" section:\n for line in pre_commands:\n pre_results = subprocess.run(line, shell=True, check=False)\n if pre_results.returncode != 0:\n logging.warning(\n \"Pre command '%s' returned %d\", line, pre_results.returncode\n )\n\n # Inserting the full command path in the command line at the first command reference:\n command_basename = os.path.basename(full_command_path)\n command_dirname = os.path.dirname(full_command_path)\n line = \"\"\n if command_line.startswith(command_basename):\n line = command_dirname + os.sep + command_line\n elif \" \" + command_basename in command_line:\n line = command_line.replace(\n \" \" + command_basename, \" \" + command_dirname + os.sep + command_basename, 1\n )\n elif \"\\t\" + command_basename in command_line:\n line = command_line.replace(\n \"\\t\" + command_basename,\n \"\\t\" + command_dirname + os.sep + command_basename,\n 1,\n )\n elif \";\" + command_basename in command_line:\n line = command_line.replace(\n \";\" + command_basename, \";\" + command_dirname + os.sep + command_basename, 1\n )\n logging.debug(\"execute_test(): modified command_line=%s\", line)\n\n # Executing command defined in the \"cmd\" section, keeping results if requested:\n if not timeout:\n timeout = parameters[\"Timeout\"]\n start_time = time.time_ns()\n try:\n if standard_input:\n one_line_input = os.linesep.join(standard_input) + os.linesep\n results = subprocess.run(\n line,\n shell=True,\n text=True,\n input=one_line_input,\n capture_output=True,\n timeout=timeout,\n check=False,\n )\n else:\n results = subprocess.run(\n line,\n shell=True,\n text=True,\n capture_output=True,\n timeout=timeout,\n check=False,\n )\n except subprocess.TimeoutExpired as status:\n standard_output = \"\"\n if status.stdout:\n standard_output = status.stdout.decode(\"utf-8\")\n standard_error_output = \"\"\n if status.stderr:\n standard_error_output = status.stderr.decode(\"utf-8\")\n results = subprocess.CompletedProcess(\n status.cmd, 0, standard_output, standard_error_output\n )\n elapsed_time = time.time_ns() - start_time\n logging.debug(\"execute_test(): results:\")\n logging.debug(results)\n if parameters[\"Keep results\"]:\n with open(os.pardir + os.sep + \"returncode\", \"w\") as file:\n file.write(str(results.returncode))\n with open(os.pardir + os.sep + \"stdout\", \"w\") as file:\n file.write(results.stdout)\n with open(os.pardir + os.sep + \"stderr\", \"w\") as file:\n file.write(results.stderr)\n with open(os.pardir + os.sep + \"time\", \"w\") as file:\n file.write(\n \"Elapsed time in s = {}{}\".format(elapsed_time / 1000000000, os.linesep)\n )\n file.write(\"Load average = {}{}\".format(os.getloadavg(), os.linesep))\n\n # Executing commands defined in the \"post\" section and collecting their output:\n post_output = \"\"\n for line in post_commands:\n post_results = subprocess.run(\n line, shell=True, text=True, capture_output=True, check=False\n )\n if post_results.returncode != 0:\n logging.warning(\n \"Post command '%s' returned %d\", line, post_results.returncode\n )\n if post_results.stdout:\n post_output = post_output + post_results.stdout\n if parameters[\"Keep results\"]:\n with open(os.pardir + os.sep + \"post\", \"w\") as file:\n file.write(post_output)\n\n # Removing unneeded directories\n if parameters[\"Keep results\"]:\n os.chdir(os.pardir)\n shutil.rmtree(\"tmp\")\n os.chdir(os.pardir + os.sep + os.pardir)\n else:\n os.chdir(os.pardir + os.sep + os.pardir + os.sep + os.pardir)\n shutil.rmtree(test_directory)\n\n return results, post_output", "title": "" }, { "docid": "202e356f599dc3fa502b04ee513d959b", "score": "0.63139296", "text": "def make_test(self):\n super(Vim, self).make_test()\n # if compile failed, skip this step\n if not self.compileError:\n print(f\"Repeats: {self.repeats}\")\n with self.conn.cd(self.path):\n # Disable tests that fail locally but pass on Travis on latest revision (05a627c3d, Apr 2023)\n # Unclear if container missing some dependency or container issue (like mounting)\n # Coverage and related metrics should be unaffected\n for i in range(self.repeats):\n result = self.conn.run(f\"timeout -s SIGKILL {self.timeout} su regular -c 'export TEST_MAY_FAIL=Test_strftime,Test_opt_set_keycode,Test_set_completion,Test_disassemble_closure_in_loop && make test'\", warn=True)\n # SIGKILL due to subprocesses (su regular -c ...) not being killed by SIGTERM (a vim-only mem error, rev f4c5fcb)\n if result.failed:\n self.maketestError = result.return_code\n self.exit_status_list.append(result.return_code)", "title": "" }, { "docid": "12e4958a8ffc4e1a0f0b53c13ea09a40", "score": "0.63093585", "text": "def run_part2_tests():\n for testname, test in functionality_tests:\n print(\"============\")\n print(\"Running test\", testname)\n try:\n score = StudentTester(\"client\").run_test(test)\n if score >= .99999:\n print(\"\\tTest Passes\")\n else:\n print(\"\\tTest FAILED.\")\n print(\"\\t\"+test.__doc__)\n except:\n print(\"\\tTest FAILED.\")\n print(\"\\t\"+test.__doc__)\n traceback.print_exc()\n print(\"\\n\\n\")", "title": "" }, { "docid": "b0324066fb0af8e31e68f94f16d0b0c8", "score": "0.6289412", "text": "def main():\n parser = argparse.ArgumentParser(description='Run host based unit tests.')\n parser.add_argument(\n '--enable_xml',\n type=str2bool,\n dest='enable_xml',\n nargs='?',\n const=True,\n default=False,\n help='Whether to output structured XML log output in out/dist/gtest directory')\n parser.add_argument(\n '-j',\n type=int,\n nargs='?',\n dest='num_tasks',\n const=-1,\n default=-1,\n help='Number of tasks to run at the same time')\n parser.add_argument(\n 'rest', nargs=argparse.REMAINDER, help='-- args, other gtest arguments for each individual test')\n args = parser.parse_args()\n\n build_target('MODULES-IN-system-bt', args.num_tasks)\n TEST_ROOT = get_native_test_root_or_die()\n test_results = []\n for test in HOST_TESTS:\n test_cmd = get_test_cmd_or_die(TEST_ROOT, test, args.enable_xml, args.rest)\n if subprocess.call(test_cmd) != 0:\n test_results.append(False)\n else:\n test_results.append(True)\n if not all(test_results):\n failures = [i for i, x in enumerate(test_results) if not x]\n for index in failures:\n print 'TEST FAILLED: ' + HOST_TESTS[index]\n sys.exit(0)\n print 'TEST PASSED ' + str(len(test_results)) + ' tests were run'\n\n dist_dir = get_android_dist_dir_or_die()\n log_output_path = os.path.join(dist_dir, 'gtest/coverage')\n cmd_path = os.path.join(get_android_root_or_die(), 'system/bt/test')\n print cmd_path\n cmd = [\n os.path.join(cmd_path, 'gen_coverage.py'),\n '--skip-html',\n '--json-file',\n '-o',\n log_output_path,\n ]\n subprocess.call(cmd)\n\n sys.exit(0)", "title": "" }, { "docid": "b182c5bab1284bab0a7a9bd05dc8e0e0", "score": "0.62892556", "text": "def execute(args):\n try:\n result = _reproduce_crash(args.testcase, args.build_dir)\n except ReproduceToolException as exception:\n print(exception)\n return\n\n if result.is_crash():\n status_message = 'Test case reproduced successfully.'\n else:\n status_message = 'Unable to reproduce desired crash.'\n\n print('{status_message} Output:\\n\\n{output}'.format(\n status_message=status_message, output=result.get_stacktrace()))", "title": "" }, { "docid": "6c6c2961a83006f323654105ff5aa8ea", "score": "0.62721", "text": "def test_process_run_fails(self):\n\n return_code = 1\n for print_cmd in [True, False]:\n for print_out in [True, False]:\n for exit_on_error in [True, False]:\n self.run_popen_assertions(\n return_code, print_cmd, print_out, exit_on_error)", "title": "" }, { "docid": "b26ece021e699ec0b399f51fcd24151f", "score": "0.6263248", "text": "def test():\n suite = unittest.TestLoader().loadTestsFromTestCase(TestUtils)\n runtime = unittest.TextTestRunner(verbosity=2).run(suite)\n return runtime.wasSuccessful()", "title": "" }, { "docid": "b26ece021e699ec0b399f51fcd24151f", "score": "0.6263248", "text": "def test():\n suite = unittest.TestLoader().loadTestsFromTestCase(TestUtils)\n runtime = unittest.TextTestRunner(verbosity=2).run(suite)\n return runtime.wasSuccessful()", "title": "" }, { "docid": "db8f4130d1995747918c4ae30c2b7663", "score": "0.6261596", "text": "def run_test(command, dirname, example, content, files):\n output = None\n inputs = []\n\n match = re.search('-o example(\\d+)\\.(\\w+)', content)\n if match:\n output = 'example' + match.group(1) + '.' + match.group(2)\n\n matches = re.finditer('example(\\d+)\\.(\\w+)', content)\n for match in matches:\n inputs.append('example' + match.group(1) + '.' + match.group(2))\n inputs.remove(output)\n\n path = os.path.join('results', dirname, example)\n os.makedirs(path, exist_ok=True)\n for filename in inputs:\n with open(os.path.join(path, filename), 'w') as f:\n f.write(files[filename])\n expected = re.sub('example', 'expected', output)\n with open(os.path.join(path, expected), 'w') as f:\n f.write(files[output])\n\n try:\n p = subprocess.run(command + content[2:], cwd=path, shell=True)\n if p.returncode != 0:\n print('ERROR', example)\n return 'ERROR'\n\n p = subprocess.run(['diff', output, expected], cwd=path)\n if p.returncode == 0:\n print('PASS', example)\n return 'SUCCESS'\n else:\n print('FAIL', example)\n return 'FAILURE'\n except:\n print('ERROR', example)\n return 'ERROR'", "title": "" }, { "docid": "c86dec66ba380dbc95fb230ae95ffa50", "score": "0.6257007", "text": "def run():\n print(TESTING_STARTS)\n\n _test_file_with_data_only('1/1')\n\n print(ENDED_SUCCESSFULLY)", "title": "" }, { "docid": "8a029cd3b352afc03e9c07b345cbb627", "score": "0.6254334", "text": "def run(self):\n for command in TestCoverage, Linter:\n command(*self._args, **self._kwargs).run()", "title": "" }, { "docid": "39a1d623e697d679001e7f87cb865f30", "score": "0.62459016", "text": "def test():\n options = '--cov=project/tests'\n\n pytest.main([options])\n sys.exit(0)", "title": "" }, { "docid": "6631568f6fc565da7329c7aebc43bf1c", "score": "0.6242909", "text": "def test_runtask():\n call_command('runtask', 'complain')\n call_command('runtask', 'praise')", "title": "" }, { "docid": "e90316368fd69db9ff37b36cca040d84", "score": "0.62421185", "text": "def test():\n import unittest\n tests = unittest.TestLoader().discover('tests')\n unittest.TextTestRunner(verbosity=2).run(tests)", "title": "" }, { "docid": "e90316368fd69db9ff37b36cca040d84", "score": "0.62421185", "text": "def test():\n import unittest\n tests = unittest.TestLoader().discover('tests')\n unittest.TextTestRunner(verbosity=2).run(tests)", "title": "" }, { "docid": "e90316368fd69db9ff37b36cca040d84", "score": "0.62421185", "text": "def test():\n import unittest\n tests = unittest.TestLoader().discover('tests')\n unittest.TextTestRunner(verbosity=2).run(tests)", "title": "" }, { "docid": "e90316368fd69db9ff37b36cca040d84", "score": "0.62421185", "text": "def test():\n import unittest\n tests = unittest.TestLoader().discover('tests')\n unittest.TextTestRunner(verbosity=2).run(tests)", "title": "" }, { "docid": "e90316368fd69db9ff37b36cca040d84", "score": "0.62421185", "text": "def test():\n import unittest\n tests = unittest.TestLoader().discover('tests')\n unittest.TextTestRunner(verbosity=2).run(tests)", "title": "" }, { "docid": "e90316368fd69db9ff37b36cca040d84", "score": "0.62421185", "text": "def test():\n import unittest\n tests = unittest.TestLoader().discover('tests')\n unittest.TextTestRunner(verbosity=2).run(tests)", "title": "" }, { "docid": "e90316368fd69db9ff37b36cca040d84", "score": "0.62421185", "text": "def test():\n import unittest\n tests = unittest.TestLoader().discover('tests')\n unittest.TextTestRunner(verbosity=2).run(tests)", "title": "" } ]
023824f8b4f2c854626422b86deda60a
The name of an already existing project
[ { "docid": "9ba58577ef24966af47b261a7f6d5f52", "score": "0.738892", "text": "def project_name(self) -> pulumi.Output[Optional[str]]:\n return pulumi.get(self, \"project_name\")", "title": "" } ]
[ { "docid": "9830d1238f46a7354a4b7bb521948ce9", "score": "0.81454676", "text": "def project_name(self) -> str:\n ...", "title": "" }, { "docid": "0cbb6fa22a254b64ffb7d9c2d600262d", "score": "0.8130705", "text": "def project_name() -> str:\n return PROJECT_NAME", "title": "" }, { "docid": "ef8d1cf4f3a3c4682bc12f6cf4888001", "score": "0.77122873", "text": "def project_name(self):\n return self.__project_name", "title": "" }, { "docid": "7fee646fb8aab8c264d8b5e7774685b1", "score": "0.771194", "text": "def project_name(cls) -> str:\n return jsii.sget(cls, \"projectName\")", "title": "" }, { "docid": "7fee646fb8aab8c264d8b5e7774685b1", "score": "0.771194", "text": "def project_name(cls) -> str:\n return jsii.sget(cls, \"projectName\")", "title": "" }, { "docid": "fd1d449ae2f61a7ce4b1db08761a4e2d", "score": "0.7675512", "text": "def project_name():\n return os.environ[constants.ENV_VARIABLES.HOPSWORKS_PROJECT_NAME_ENV_VAR]", "title": "" }, { "docid": "d596388a9585bde58ce6f8fdf8e370a0", "score": "0.7602071", "text": "def project_name(self) -> str:\n return jsii.get(self, \"projectName\")", "title": "" }, { "docid": "d596388a9585bde58ce6f8fdf8e370a0", "score": "0.7602071", "text": "def project_name(self) -> str:\n return jsii.get(self, \"projectName\")", "title": "" }, { "docid": "032afb5b0cfd94e1488eb28cd1d2ff2f", "score": "0.7515606", "text": "def project_name(self):\n return self.get(\"project_name\")", "title": "" }, { "docid": "5687deeacfed6a0cfc9f67fd3639fcc5", "score": "0.7504496", "text": "def project_name(self):\n\n if version >= \"3000\":\n project_name = sublime.active_window().project_file_name()\n else:\n project_name = None\n\n if project_name is None:\n folders = sublime.active_window().folders()\n if len(folders) > 0:\n project_name = folders[0].rsplit(os.sep, 1)[1]\n else:\n project_name = project_name.rsplit(os.sep, 1)[1].split('.')[0]\n\n return project_name", "title": "" }, { "docid": "2cb02e0b00ae87c2802b90da7554d115", "score": "0.7452787", "text": "def projectName(self):\n names = self.__name.split('/')\n if len(names) == 3:\n project_name = names[0]\n else:\n project_name = None\n return project_name", "title": "" }, { "docid": "a48315af9fcbf79165c0e1c73428ccce", "score": "0.7451909", "text": "def get_name(self):\n\n project_name = state.get_context_variable('PROJECT_NAME')\n prompt = \"Please enter a name for the virtual environment you want \"\\\n \"to create, leave blank to name it {0}: \".format(project_name)\n name = self.gather(prompt)\n if not name:\n name = project_name\n\n return name", "title": "" }, { "docid": "ac23cc891135a806600a54cba8484d8c", "score": "0.74466753", "text": "def ProjectName(self):\n return PROJECT_NAME", "title": "" }, { "docid": "0617c61c51b4f03be987af5f0d0e12d6", "score": "0.74328566", "text": "def project_name(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"project_name\")", "title": "" }, { "docid": "4cd6d2bfc7eba96e6f9ef9de98372663", "score": "0.7426142", "text": "def GetProjectName():\n if VariableExists('g:snips_project'):\n return GetVariableValue('g:snips_project')\n if VariableExists('g:snips_project_name'):\n return GetVariableValue('g:snips_project_name')\n return InferProjectName()", "title": "" }, { "docid": "5bcbb8fef5dd295130080109c97a6637", "score": "0.7421652", "text": "def project_name(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"project_name\")", "title": "" }, { "docid": "5bcbb8fef5dd295130080109c97a6637", "score": "0.7421652", "text": "def project_name(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"project_name\")", "title": "" }, { "docid": "1db2f315009f2e0ff0b7261f78ef09c7", "score": "0.7304578", "text": "def project_name(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"project_name\")", "title": "" }, { "docid": "1db2f315009f2e0ff0b7261f78ef09c7", "score": "0.7304578", "text": "def project_name(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"project_name\")", "title": "" }, { "docid": "1db2f315009f2e0ff0b7261f78ef09c7", "score": "0.7304578", "text": "def project_name(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"project_name\")", "title": "" }, { "docid": "171d0bcbf33203923210fe01592740a9", "score": "0.7240464", "text": "def project_name(self):\n if self.__project_file_path is None:\n return None\n return os.path.splitext(os.path.basename(self.__project_file_path))[0]", "title": "" }, { "docid": "15f3093ef481bb7af7bcc3dbc8108dae", "score": "0.72384816", "text": "def project_name(self):\n return self._project_name", "title": "" }, { "docid": "06fd3cd29469ca5ff3a56c3f3393e385", "score": "0.71570873", "text": "def _generate_project_name() :\n\n project_list = []\n projects = list_all_GOLD_projects()\n \n ## Increment if projects exist\n if projects and len(projects) > 0 :\n for project in projects :\n if re.match('^lp\\d+',project[1]) :\n project_list.append(project[1])\n\n def atoi(text):\n return int(text) if text.isdigit() else text\n\n def natural_keys(text):\n return [ atoi(c) for c in re.split('(\\d+)', text) ]\n\n project_list.sort(key=natural_keys)\n \n last_project = re.split('(\\d+)', project_list[-1])\n digit = int(last_project[1])+1\n generated_project_name = \"lp\" + str(digit)\n\n ## This is the first project!\n else :\n generated_project_name = \"lp1\"\n \n print \"Generated project name \", generated_project_name\n return generated_project_name", "title": "" }, { "docid": "0528487cfafe23e051c83e5cdad89f50", "score": "0.7140842", "text": "def project_name(request, *args, **kwargs):\n project_name = DEFAULT_PROJECT_NAME\n\n\n return {\n 'project_name': getattr(settings, \"PROJECTNAME\", DEFAULT_PROJECT_NAME)\n }", "title": "" }, { "docid": "43e547ead0e82f07d200be9413c15870", "score": "0.70771384", "text": "def current_project_name(self):\r\n # check the name of current project\r\n logger.info(\"Checking the name of the current active project...\")\r\n try:\r\n # get the name of the current active project\r\n current_active_project_name = self._instance.ActiveProject.Name\r\n except Exception:\r\n logger.exception(\"Could not get the name of current active project\")\r\n raise\r\n return current_active_project_name", "title": "" }, { "docid": "7e1488f1595f2808f9798cba29d480f4", "score": "0.7068155", "text": "def __str__(self):\n return f'Project {self.name}'", "title": "" }, { "docid": "53399732c910c80e44caf802be8bbbc1", "score": "0.7051244", "text": "def project(cls):\n return Project.name", "title": "" }, { "docid": "53399732c910c80e44caf802be8bbbc1", "score": "0.7051244", "text": "def project(cls):\n return Project.name", "title": "" }, { "docid": "a4f2c03bc6bc88bf54e5d6deb9071c0f", "score": "0.70328283", "text": "def name(self):\n\n return self._project_name", "title": "" }, { "docid": "c721f31cd24d717ec4edfda0ee6ef41e", "score": "0.7027031", "text": "def name(self):\r\n return 'cyclesummaryproject'", "title": "" }, { "docid": "af2b29f687284e255ce531a1199ea854", "score": "0.69911593", "text": "def project_name(self) -> typing.Optional[str]:\n return self._values.get('project_name')", "title": "" }, { "docid": "af2b29f687284e255ce531a1199ea854", "score": "0.69911593", "text": "def project_name(self) -> typing.Optional[str]:\n return self._values.get('project_name')", "title": "" }, { "docid": "af2b29f687284e255ce531a1199ea854", "score": "0.69911593", "text": "def project_name(self) -> typing.Optional[str]:\n return self._values.get('project_name')", "title": "" }, { "docid": "fff2657ee7463bf1bad2e52b9428d0b5", "score": "0.69623977", "text": "def proj_name():\n tokens = proj_dir().split(os.path.sep)\n if 'show' in tokens:\n index = tokens.index('show')\n return tokens[index+1]\n else:\n return 'alex_test'", "title": "" }, { "docid": "4faa74c4e346c26bfe20de984f85c17a", "score": "0.69589037", "text": "def get_current_project_name(self) -> str:\n return self.current_project_name", "title": "" }, { "docid": "66f8dbc50c39dceee479536493016f70", "score": "0.68233573", "text": "def random_project_name(qprefix = \"Proj\"):\n\n num = int(round(1000000000 * random()))\n project_name_integration = \"%s-%s\" % (qprefix, num)\n return project_name_integration", "title": "" }, { "docid": "befb65fde535245c1615ee5f27867f63", "score": "0.68061775", "text": "def project(self) -> str:\n return pulumi.get(self, \"project\")", "title": "" }, { "docid": "befb65fde535245c1615ee5f27867f63", "score": "0.68061775", "text": "def project(self) -> str:\n return pulumi.get(self, \"project\")", "title": "" }, { "docid": "befb65fde535245c1615ee5f27867f63", "score": "0.68061775", "text": "def project(self) -> str:\n return pulumi.get(self, \"project\")", "title": "" }, { "docid": "befb65fde535245c1615ee5f27867f63", "score": "0.68061775", "text": "def project(self) -> str:\n return pulumi.get(self, \"project\")", "title": "" }, { "docid": "befb65fde535245c1615ee5f27867f63", "score": "0.68061775", "text": "def project(self) -> str:\n return pulumi.get(self, \"project\")", "title": "" }, { "docid": "befb65fde535245c1615ee5f27867f63", "score": "0.68061775", "text": "def project(self) -> str:\n return pulumi.get(self, \"project\")", "title": "" }, { "docid": "befb65fde535245c1615ee5f27867f63", "score": "0.68061775", "text": "def project(self) -> str:\n return pulumi.get(self, \"project\")", "title": "" }, { "docid": "b8916e49bc29be914ea40f6331539e87", "score": "0.68001044", "text": "def get_existing_project_names(self):\n return [project['name'] for project in self._zync.get_project_list()]", "title": "" }, { "docid": "04421725eb2b17c7f260a4c6f0cb539b", "score": "0.6753277", "text": "def name_from_directory(project_dir):\n # FIXME: qiproject.xml is read twice!\n # once for finding project names, and an other time for\n # loading complete configuration (with {r,}depends)\n handle_old_manifest(project_dir)\n xml = os.path.join(project_dir, \"qiproject.xml\")\n if not os.path.exists(xml):\n return os.path.basename(project_dir)\n p_cfg = qibuild.config.ProjectConfig()\n p_cfg.read(xml)\n return p_cfg.name", "title": "" }, { "docid": "2a1e9a3bbe6f119b417c2b808a179935", "score": "0.6752125", "text": "def get_project_title(self) -> str:\n return self.txt_project_title.get_text()", "title": "" }, { "docid": "8a7c46b33f58ca1f2e119bc539dec74c", "score": "0.6722467", "text": "def _check_project(project_name):\n if project_name not in projects:\n raise ValueError(\n \"Project {} does not exist, run setup_project first\".format(\n project_name\n )\n )\n projects.set_current(project_name)\n if \"biosphere\" not in databases and \"biosphere3\" not in databases:\n raise ValueError(\n \"Project {} has not been set up, run setup_project first\".format(\n project_name\n )\n )\n return project_name", "title": "" }, { "docid": "29f8c4b968ac2760f5dfbfe0aa44001a", "score": "0.67159975", "text": "def InferProjectName():\n return vim.eval(\"fnamemodify(getcwd(), ':t')\")", "title": "" }, { "docid": "8f76cfb30674b92b4f7b64387cb77671", "score": "0.67144847", "text": "def rename_project():\n if len(sys.argv) < 4:\n print(\"You must specify a project id and a new project name\")\n list_projects()\n sys.exit(11)\n\n project_id = sys.argv[2]\n new_name = \" \".join(sys.argv[3:])\n\n data = load()\n if project_id not in data.get(\"projects\", {}):\n print(\"Project {cb}{pid}{ce} not found\".format(cb=bcolors.YELLOW, pid=project_id, ce=bcolors.END))\n sys.exit(12)\n elif not project_id.isdigit():\n print(\"Not allowed to rename special project {name}\".format(name=_project_name(data, project_id)))\n else:\n data[\"projects\"][project_id][\"name\"] = new_name\n msg = \"Assigned new name to project {name}\".format(name=_project_name(data, project_id))\n print(msg)\n save(data, msg)", "title": "" }, { "docid": "0351bd233b7bd1c66f1cd109329479fd", "score": "0.6690765", "text": "def get_project_name(self, project_id):\n query = \"select name from projects where id =?\"\n result = self.cur.execute(query, [project_id])\n return result.fetchone()", "title": "" }, { "docid": "821209c5f077edcd43c8ec50df0a75a2", "score": "0.6684334", "text": "def project(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"project\")", "title": "" }, { "docid": "821209c5f077edcd43c8ec50df0a75a2", "score": "0.6684334", "text": "def project(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"project\")", "title": "" }, { "docid": "821209c5f077edcd43c8ec50df0a75a2", "score": "0.6684334", "text": "def project(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"project\")", "title": "" }, { "docid": "821209c5f077edcd43c8ec50df0a75a2", "score": "0.6684334", "text": "def project(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"project\")", "title": "" }, { "docid": "821209c5f077edcd43c8ec50df0a75a2", "score": "0.6684334", "text": "def project(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"project\")", "title": "" }, { "docid": "821209c5f077edcd43c8ec50df0a75a2", "score": "0.6684334", "text": "def project(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"project\")", "title": "" }, { "docid": "821209c5f077edcd43c8ec50df0a75a2", "score": "0.6684334", "text": "def project(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"project\")", "title": "" }, { "docid": "821209c5f077edcd43c8ec50df0a75a2", "score": "0.6684334", "text": "def project(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"project\")", "title": "" }, { "docid": "821209c5f077edcd43c8ec50df0a75a2", "score": "0.6684334", "text": "def project(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"project\")", "title": "" }, { "docid": "821209c5f077edcd43c8ec50df0a75a2", "score": "0.6684334", "text": "def project(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"project\")", "title": "" }, { "docid": "821209c5f077edcd43c8ec50df0a75a2", "score": "0.6684334", "text": "def project(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"project\")", "title": "" }, { "docid": "821209c5f077edcd43c8ec50df0a75a2", "score": "0.6684334", "text": "def project(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"project\")", "title": "" }, { "docid": "821209c5f077edcd43c8ec50df0a75a2", "score": "0.6684334", "text": "def project(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"project\")", "title": "" }, { "docid": "821209c5f077edcd43c8ec50df0a75a2", "score": "0.6684334", "text": "def project(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"project\")", "title": "" }, { "docid": "821209c5f077edcd43c8ec50df0a75a2", "score": "0.6684334", "text": "def project(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"project\")", "title": "" }, { "docid": "821209c5f077edcd43c8ec50df0a75a2", "score": "0.6684334", "text": "def project(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"project\")", "title": "" }, { "docid": "821209c5f077edcd43c8ec50df0a75a2", "score": "0.6684334", "text": "def project(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"project\")", "title": "" }, { "docid": "821209c5f077edcd43c8ec50df0a75a2", "score": "0.6684334", "text": "def project(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"project\")", "title": "" }, { "docid": "821209c5f077edcd43c8ec50df0a75a2", "score": "0.6684334", "text": "def project(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"project\")", "title": "" }, { "docid": "821209c5f077edcd43c8ec50df0a75a2", "score": "0.6684334", "text": "def project(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"project\")", "title": "" }, { "docid": "821209c5f077edcd43c8ec50df0a75a2", "score": "0.6684334", "text": "def project(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"project\")", "title": "" }, { "docid": "821209c5f077edcd43c8ec50df0a75a2", "score": "0.6684334", "text": "def project(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"project\")", "title": "" }, { "docid": "821209c5f077edcd43c8ec50df0a75a2", "score": "0.6684334", "text": "def project(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"project\")", "title": "" }, { "docid": "33b76c36fc65cdb87cba58bf1f6b71a6", "score": "0.6633513", "text": "def test_create_project_that_already_exists(self) -> None:\n _ = run_command('tgl create project \"project name for tests\"')\n\n output = run_command('tgl create project \"project name for tests\"')\n self.assertIn('ERROR: Project could not be created.', output)\n self.assertIn('Response: \"Name has already been taken\"', output)", "title": "" }, { "docid": "2d19e696c681df39766740bb27d76643", "score": "0.6624594", "text": "def GetName(self):\n return self.__project.name", "title": "" }, { "docid": "667b619883885112bea69c71a71e8459", "score": "0.66093117", "text": "def name(self):\r\n return 'walksummaryproject'", "title": "" }, { "docid": "ac2dd7e5a46c8f858700550b39ed51b2", "score": "0.6608596", "text": "def get_package_name() -> str:\n with Path(\"pyproject.toml\").open(\"rb\") as f:\n return tomli.load(f)[\"project\"][\"name\"]", "title": "" }, { "docid": "6e5f4993b89e06796b254a23403bfd89", "score": "0.65953124", "text": "def infer_name(self):\n if CONFIG_KEY not in self:\n return\n if hasattr(self[CONFIG_KEY], \"name\"):\n if \" \" in self[CONFIG_KEY].name:\n raise InvalidConfigFileException(\n \"Specified Project name ({}) contains whitespace\".format(\n self[CONFIG_KEY].name\n )\n )\n return self[CONFIG_KEY].name.replace(\" \", \"_\")\n if not self[CONFIG_FILE_KEY]:\n raise NotImplementedError(\n \"Project name inference isn't supported \"\n \"on a project that lacks a config file.\"\n )\n config_folder = os.path.dirname(self[CONFIG_FILE_KEY])\n project_name = os.path.basename(config_folder)\n if project_name == METADATA_KEY:\n project_name = os.path.basename(os.path.dirname(config_folder))\n return project_name.replace(\" \", \"_\")", "title": "" }, { "docid": "ff61fd203d009b123120babd8ded0f24", "score": "0.65863454", "text": "def _getNewsName(self, project):\r\n name = project.directory.basename().title()\r\n if name == 'Twisted':\r\n name = 'Core'\r\n return name", "title": "" }, { "docid": "d5338a5f0788c591629d7e8008ea79c0", "score": "0.655212", "text": "def project(self):\n if self.project_rel is None:\n return None\n return self.project_rel.name", "title": "" }, { "docid": "41bfb7fc6fb2e84ffe517041c79913ce", "score": "0.6542438", "text": "def project_id():\n return os.environ[constants.ENV_VARIABLES.HOPSWORKS_PROJECT_ID_ENV_VAR]", "title": "" }, { "docid": "3ff977af2c59a398029a3c3df582f0a1", "score": "0.6541721", "text": "def project(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"project\")", "title": "" }, { "docid": "3ff977af2c59a398029a3c3df582f0a1", "score": "0.6541721", "text": "def project(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"project\")", "title": "" }, { "docid": "386e4326d826c6d58e308e701aeaa799", "score": "0.6493412", "text": "def create_project(self, project_name):\n project_info = self.get_prj_info(project_name)\n\n if 'Did you use correct project name?' in str(project_info):\n url = '/api/v1/project'\n post_body = {\n \"entryType\": \"INTERNAL\",\n \"projectName\": project_name\n }\n return self.send_request(\"POST\", url, json.dumps(post_body), status_codes=[201])\n return 'Project already exist'", "title": "" }, { "docid": "ff4cc85dc5ec820fc6cf2dda38fc79eb", "score": "0.64865124", "text": "def get_project_name(self, project_id):\n projects = self.list_projects()\n return projects[project_id]['name']", "title": "" }, { "docid": "d0d731d4bcd6be985e34ae63989df228", "score": "0.64779615", "text": "def get_project(self, name=None):\n for project in self.projects:\n if project.name.lower() == name.lower():\n return project", "title": "" }, { "docid": "1ee821712ff97590a3b6fc5113024767", "score": "0.64757156", "text": "def project(self):\n return self.project_rel.name", "title": "" }, { "docid": "3e5912e66095ae0a9ddd761f080eb3d6", "score": "0.6470656", "text": "def _construct_project_name(self):\n if self.signup_type == \"organisation\":\n # TODO(adriant): One option later may be to allow unicode:\n # slugify(value, allow_unicode=True)\n project_name = str(slugify(self.company_name))\n elif self.signup_type == \"individual\":\n # TODO(adriant): same as above.\n project_name = str(slugify(self.name))\n\n return project_name", "title": "" }, { "docid": "00d641927231cac7881708af72a645e8", "score": "0.6463618", "text": "def FindProjectByName(projectName, apiToken=None):\r\n toggl = pytoggl.TogglAPI(apiToken=apiToken)\r\n workspaces = toggl.workspaces.getAll()\r\n \r\n project = None\r\n for workspace in workspaces:\r\n projects = toggl.workspaces.getProjects(workspace)\r\n \r\n projectsWithMatchingName = [project for project in projects if project.name == projectName]\r\n if len(projectsWithMatchingName) > 0:\r\n project = projectsWithMatchingName[0]\r\n break\r\n return project", "title": "" }, { "docid": "5e507de135730b145c1bb762167fcd67", "score": "0.6462611", "text": "def show_project():\n return \"Welcome to Project Blue Book.\\n\"", "title": "" }, { "docid": "47fbc67ba6697ae0f41d61085f793633", "score": "0.64624107", "text": "def short_project_name(full_project_name):\n return full_project_name.split('/')[-1]", "title": "" }, { "docid": "47fbc67ba6697ae0f41d61085f793633", "score": "0.64624107", "text": "def short_project_name(full_project_name):\n return full_project_name.split('/')[-1]", "title": "" }, { "docid": "b85aac280caab14cd0a3003a8c504cb6", "score": "0.64517266", "text": "def path_to_project(self):\n if self.year and self.number:\n if self.name:\n name = self.get_full_name()\n else:\n name = self.name_from_number()\n if name:\n return os.path.join(PROJECT_ROOT, name)\n else:\n msg = \"Check project number and name\"\n logger.error(msg)\n self.fileError = msg\n return False", "title": "" }, { "docid": "4c209a6b6675786cc88907aa8d901e8d", "score": "0.6419241", "text": "def prj_window_get_project_name(self, sym):\n return self.winapp.get_text(sym)", "title": "" }, { "docid": "2236d3615fd7f814dc82b2db4dc3debb", "score": "0.6412079", "text": "def setProjectName(self):\n # update oproject\n # pass the project name to the oproject\n try:\n Global.getOproject().setProjectName(str(self.editProjectName.text()))\n Global.setProjectName(str(self.editProjectName.text()))\n # Give action to the Project Validation Button\n self.btnProjectValidation.setEnabled(True)\n except:\n #Unauthorized char\"\n QMessageBox.about(\n self, u\"Caratère invalide\", u\"Le nom du projet ne peut-pas contenir de caractères accentués ou spéciaux\")\n\n self.editProjectName.setText(\"\")", "title": "" }, { "docid": "fc62d096caa3f775bd75a7fead710f29", "score": "0.63832957", "text": "def get_id(self):\n try:\n return str(self.config['project'])\n except KeyError:\n raise LookupError(\n \"Unable to determine project id .\"\n \"Are you sure '{}' is a signac project path?\".format(\n os.path.abspath(self.config.get('project_dir', os.getcwd()))))", "title": "" }, { "docid": "2cf68ff38d8a14ec2caaebeee1f33466", "score": "0.6373161", "text": "def _get_project_name(data, project_id):\n try:\n return data[\"projects\"][project_id][\"name\"]\n except:\n return None", "title": "" }, { "docid": "343e92bc6126429c03da13d0e4d1cb1b", "score": "0.63442475", "text": "def get_project_name(branch):\n proj = local('pwd', capture=True).split('/')[-1]\n if branch == 'master':\n tstamp = time.strftime(\"%Y%m%d%H%M\")\n project_name = proj+'-'+branch+'-'+tstamp\n else:\n project_name = proj+'-'+branch\n return project_name", "title": "" }, { "docid": "0e3719bfd0f4f448c083ea0d96c3f83e", "score": "0.6339195", "text": "def displayName(self):\r\n return self.tr('Create QGIS Cycle Summary Project for Cyclemon')", "title": "" }, { "docid": "dc6bf2d61f4bb009ab3c302d6acbef7d", "score": "0.63313687", "text": "def project(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"project\")", "title": "" } ]
023be0204b65a30b3822649b328a09be
r"""Writes a checkpoint to the file system.
[ { "docid": "3e4f7a53e6fef623357078b309a28868", "score": "0.67102903", "text": "def __call__(\n self,\n checkpoint: Union[Checkpoint, Summary],\n ) -> Union[Checkpoint, Summary]:\n return self.write(checkpoint)", "title": "" } ]
[ { "docid": "cfa0f7d273333324ccf8a8aed844a768", "score": "0.7748798", "text": "def write_checkpoint(self):\n\n print('checkpointing at',self.tm_minutes,'minutes')\n success=write_fits(self.checkpoint_file, self.data)", "title": "" }, { "docid": "00f05d164a5452b257b48bd4fe59e01e", "score": "0.76853496", "text": "def save(self, checkpoint):\n pass", "title": "" }, { "docid": "7732db6da317d75983b9ea1cc48ade87", "score": "0.73511595", "text": "def save_checkpoint(self, checkpoint: Mapping[str, Any], path: str):\n save_checkpoint(checkpoint=checkpoint, path=path)", "title": "" }, { "docid": "54a4b2fac4d9b02fc1f65596ec81e34b", "score": "0.71814287", "text": "def save_checkpoint(self, folder, filename):\n pass", "title": "" }, { "docid": "100d67350393e709017c55239d105043", "score": "0.7154954", "text": "def save(self, checkpoint):\n if checkpoint.client_id() not in self._checkpoints:\n self._checkpoints[checkpoint.client_id()] = []\n\n checkpoint = checkpoint.copy()\n self._checkpoints[checkpoint.client_id()].append(checkpoint)\n\n self._write('{}: Saved checkpoint (performance: {})'.format(checkpoint.client_id(), checkpoint.metric_value()))\n self._write(json.dumps(checkpoint.hyperparameters(), indent=2))", "title": "" }, { "docid": "6d9e8b1a2c58d8c32492ce84c6b71fde", "score": "0.7089061", "text": "def save(self) -> None:\n try:\n path = os.path.abspath(asksaveasfilename(parent=self.master,\n defaultextension=\".check\"))\n self.checkpoint.save(path)\n self.setstatus(\"Successfully saved a checkpoint\")\n except (OSError, IOError):\n self.setstatus(\"Can't save into that location\")\n except AttributeError:\n self.setstatus(\"Nothing to save\")", "title": "" }, { "docid": "1ead549cb45499e9e0c50876317ae9b8", "score": "0.7077468", "text": "def _save_checkpoint(self, output_dir, step):\n checkpoint_path = f\"{output_dir}/checkpoint-{step}\"\n if os.path.exists(checkpoint_path):\n self._wandb.save(f\"{checkpoint_path}/*\")", "title": "" }, { "docid": "b9df34df78b47192d805da16a8a10e1a", "score": "0.69763863", "text": "def save_checkpoint(self, file_name) -> None:\n checkpoint = {\n \"high_level_state_dict\": self.high_level.state_dict(),\n \"low_level_state_dict\": self.low_level.state_dict(),\n \"config\": self.config,\n }\n torch.save(checkpoint, os.path.join(self.config.CHECKPOINT_FOLDER, file_name))", "title": "" }, { "docid": "723257076fee79bec82ead81efc1e42c", "score": "0.6964286", "text": "def save_checkpoint(self, checkpoint_dir, stanza, last_run, google_worksheet_updated):\n \n self.save_checkpoint_data(checkpoint_dir, stanza, { 'last_run' : last_run,\n 'google_worksheet_updated' : google_worksheet_updated\n })", "title": "" }, { "docid": "8f7466332d25852e86ae34f83e570c59", "score": "0.67875475", "text": "def save_checkpoint(state, filename=\"my_checkpoint.pth.tar\"):\n print(\"=> Saving checkpoint\")\n torch.save(state, filename)", "title": "" }, { "docid": "1dd287e2055cfd11f5ae713047409f00", "score": "0.67859924", "text": "def save_checkpoint(self, file_name: str = None):\n if file_name is None:\n file_name = \\\n f\"Epoch[{self.current_epoch}]-Step[{self.current_iter}].pt\"\n\n file_name = self.checkpoint_dir / file_name\n state = {\n 'epoch': self.current_epoch,\n 'iter': self.current_iter,\n 'best_perplexity': self.best_perplexity,\n 'model_state': self.model.state_dict(),\n 'optimizer': self.opt.state_dict(),\n 'scheduler': self.scheduler.state_dict()\n }\n torch.save(state, file_name)\n logging.info(f\"Checkpoint saved @ {file_name}\")", "title": "" }, { "docid": "059fc086ed406efee546a789540f64a4", "score": "0.6695892", "text": "def save(self, checkpoint):\n if comm.get_rank() == 0:\n logger.warning(\"Save checkpoint to %s\" % checkpoint)\n checkpoint = os.path.expanduser(checkpoint)\n if self.rank == 0:\n state = {\n \"model\": self.model.state_dict(),\n \"optimizer\": self.optimizer.state_dict()\n }\n torch.save(state, checkpoint)\n\n comm.synchronize()", "title": "" }, { "docid": "d525bf60bc3f2637c6618dfe3ad5f9e6", "score": "0.6685885", "text": "def save_checkpoint(self):\n if self.ct is None:\n pn.state.warning(\"No CT to save\")\n else:\n imars_save_checkpoint(data=self.ct, outputbase=self.temp_root, name=self.recn_name, omegas=self.omegas)", "title": "" }, { "docid": "9eb2afb3e38c3db567d85ab271653ec7", "score": "0.66279835", "text": "def save_ckpt(self, name=None):\n if name is None:\n save_path = os.path.join(self.model_dir, \"ckpt_epoch{}.pth\".format(self.clock.epoch))\n print(\"Checkpoint saved at {}\".format(save_path))\n else:\n save_path = os.path.join(self.model_dir, \"{}.pth\".format(name))\n if isinstance(self.net, nn.DataParallel):\n torch.save({\n 'clock': self.clock.make_checkpoint(),\n 'model_state_dict': self.net.module.cpu().state_dict(),\n 'optimizer_state_dict': self.optimizer.state_dict(),\n 'scheduler_state_dict': self.scheduler.state_dict(),\n }, save_path)\n else:\n torch.save({\n 'clock': self.clock.make_checkpoint(),\n 'model_state_dict': self.net.cpu().state_dict(),\n 'optimizer_state_dict': self.optimizer.state_dict(),\n 'scheduler_state_dict': self.scheduler.state_dict(),\n }, save_path)\n self.net.cuda()", "title": "" }, { "docid": "f0ac6596d28b6586be22d7db610a6db7", "score": "0.6617067", "text": "def write(\n self,\n checkpoint: Union[Checkpoint, Summary],\n prefix: Optional[str] = None,\n ) -> Union[Checkpoint, Summary]:\n if not isinstance(checkpoint, (Checkpoint, Summary)):\n raise TypeError(\n f\"Cannot write a checkpoint to `{self.output_dir}`. \"\n \"Expecting checkpoint of `Checkpoint` or `Summary` type, \"\n f\"but have got `{type(checkpoint)}`.\"\n )\n\n if prefix is None:\n prefix = regular_checkpoint_prefix()\n\n checkpoint_file = CheckpointFile(\n model_params=util.originate(checkpoint.model_params),\n model_state=util.originate(checkpoint.model_state),\n optimizer_state=util.originate(checkpoint.optimizer_state),\n step=checkpoint.step,\n metrics=checkpoint.metrics if isinstance(checkpoint, Summary) else {},\n )\n\n checkpoints.save_checkpoint(\n self.output_dir,\n target=checkpoint_file,\n step=checkpoint.step,\n prefix=prefix,\n keep=self.keep,\n overwrite=self.overwrite,\n )\n\n return checkpoint", "title": "" }, { "docid": "f4cafc2ab8591cbd9cd2be93b531d554", "score": "0.6615688", "text": "def save_checkpoint(state, loss):\n fname = \"checkpoint_\" + time.strftime(\"%Y%m%d-%H%M%S\") + \"_\" + str(loss.item()) + \".pth.tar\"\n torch.save(state, get_rel_path(os.path.join(CHECKPOINTS_FOLDER, fname))) # save checkpoint\n print(\"$$$ Saved a new checkpoint\\n\")", "title": "" }, { "docid": "e2e0b4004b73f889b304e32aed2e1c13", "score": "0.66001064", "text": "def save_checkpoint(self, checkpoint: Dict[str, Any], filepath: _PATH) -> None:\n # Related Issue: https://github.com/pytorch/xla/issues/2773\n if _OMEGACONF_AVAILABLE:\n checkpoint = apply_to_collection(checkpoint, (DictConfig, ListConfig), OmegaConf.to_container)\n self.save({k: v for k, v in checkpoint.items() if k != \"callbacks\"}, filepath)", "title": "" }, { "docid": "868ac628095b24c41a28082cf90f1317", "score": "0.6594783", "text": "def checkpoint(self, name):", "title": "" }, { "docid": "2fce3e1a4c5da263d6926d7173a5d75f", "score": "0.65719545", "text": "def save(self, step):\n if self._saver:\n ckpt_dir = os.path.join(\n reporter.logging_directory(), 'checkpoints')\n os.makedirs(ckpt_dir, exist_ok=True)\n save = os.path.join(ckpt_dir, self._scope + '.ckpt')\n self._saver.save(tf.get_default_session(), save, step)", "title": "" }, { "docid": "74460a0016980bc5ef74daac356dbdd0", "score": "0.6562529", "text": "def save(self, checkpoint):\n state_dict = self.get_state_dict()\n torch.save(state_dict, checkpoint)\n return checkpoint", "title": "" }, { "docid": "e76e9afe4a4083649a36d93216b6c2ae", "score": "0.6545549", "text": "def save(self, checkpoint_file_path: str) -> None:\n save_state({\"model\": self.state_dict()}, checkpoint_file_path)", "title": "" }, { "docid": "2536b3bc9233bc5f8cb7e23999c9059c", "score": "0.65326697", "text": "def save_checkpoint(self, checkpoint: Dict[str, Any], filepath: str) -> None:\n app_state = AppState()\n # dump states as a checkpoint dictionary object\n # TrainingTypePlugin.on_save() just seems to return the same thing.\n # checkpoint = self.on_save(checkpoint)\n if self.is_global_zero or app_state.data_parallel_rank == 0:\n try:\n # write the checkpoint dictionary on the file\n atomic_save(checkpoint, filepath)\n except AttributeError as err:\n key = pl.LightningModule.CHECKPOINT_HYPER_PARAMS_KEY\n checkpoint.pop(key, None)\n rank_zero_warn(f\"Warning, `{key}` dropped from checkpoint. An attribute is not picklable: {err}\")\n atomic_save(checkpoint, filepath)", "title": "" }, { "docid": "81d0af735827e9a9ec99b8e899c5749b", "score": "0.6505762", "text": "def save(self, checkpointname='checkpoint'):\n weightspath, paramspath, configpath = self.datapaths(checkpointname)\n self.config.timestamp = time.time()\n self.network.save_weights(weightspath)\n state = self.state if hasattr(self.state, '__iter__') else [self.state]\n Config(self.parameters(*state)).save(paramspath)\n Config(self.config).save(configpath)\n return join(self.basepath, checkpointname)", "title": "" }, { "docid": "d223bc417e4fd21a0d09f2369eb22c89", "score": "0.65033174", "text": "def _save_checkpoint(self):\n filename = self.checkpoint_path\n filename += 'checkpoint_model.pt'\n save_dict = {}\n save_dict['model_def'] = self.model\n save_dict['optimizer_state_dict'] = self.optimizer.state_dict()\n save_dict['loss'] = self.loss\n torch.save(save_dict, filename)", "title": "" }, { "docid": "52509cbc85429ba021ce5b1673a890bd", "score": "0.6453847", "text": "def save_checkpoint(self, checkpoint_dir: str, iteration_number: int) -> None:\n # Try to create checkpoint directory if it doesn't exist.\n dopamine_utils.save_checkpoint(\n checkpoint_dir, iteration_number,\n functools.partial(dqn_agent.JaxDQNAgent.bundle_and_checkpoint,\n self))\n # Get rid of old checkpoints if necessary.\n if self._checkpoint_duration is not None:\n dopamine_utils.clean_up_old_checkpoints(\n checkpoint_dir, iteration_number,\n checkpoint_duration=self._checkpoint_duration)", "title": "" }, { "docid": "d335e858efb8460dcc3167601df64e53", "score": "0.6410473", "text": "def save_checkpoint(self, config, population, species_set):\n filename = '{0}{1}'.format(self.__checkpoint_path, self.__generation)\n print(\"Saving checkpoint to {0}\".format(filename))\n with gzip.open(filename, 'w', compresslevel=5) as f:\n data = (self.__generation, config, population, species_set, random.getstate())\n pickle.dump(data, f, protocol=pickle.HIGHEST_PROTOCOL)\n self.__generation += 1", "title": "" }, { "docid": "035defbf54cdef1c93347061ab2bdcac", "score": "0.6410462", "text": "def save_checkpoint(self, filepath: _PATH, weights_only: bool = False) -> None:\n self.checkpoint_connector.save_checkpoint(filepath, weights_only)", "title": "" }, { "docid": "b186fa4265bb8ad9d1208f75759487a1", "score": "0.6392106", "text": "def save_ckpt(self, name=None):\n if name is None:\n save_path = os.path.join(self.model_dir, \"ckpt_epoch{}.pth\".format(self.clock.step))\n print(\"Saving checkpoint epoch {}...\".format(self.clock.step))\n else:\n save_path = os.path.join(self.model_dir, \"{}.pth\".format(name))\n\n torch.save({\n 'clock': self.clock.make_checkpoint(),\n 'netG_state_dict': self.netG.cpu().state_dict(),\n 'netD_state_dict': self.netD.cpu().state_dict(),\n 'optimizerG_state_dict': self.optimizerG.state_dict(),\n 'optimizerD_state_dict': self.optimizerD.state_dict(),\n }, save_path)\n\n self.netG.cuda()\n self.netD.cuda()", "title": "" }, { "docid": "3b643a3df0420b37313a38030f2a2fe3", "score": "0.6386703", "text": "def _write_file(self):\n self._store.write()", "title": "" }, { "docid": "611b29520ab316c52bd3420b03a55767", "score": "0.63820857", "text": "def save_checkpoint(self, file_name: str = None, save_optimizer_state=None, delete_previous=False):\n if file_name is None:\n file_name = f\"Epoch[{self.current_epoch}]-Step[{self.current_iter}].pt\"\n\n file_name = self.checkpoint_dir / file_name\n state = {\n \"iter\": self.current_iter,\n \"best_accuracy\": self.best_accuracy,\n \"best_loss\": self.best_loss,\n \"model_state\": self.model_state()\n }\n if (save_optimizer_state is not None and save_optimizer_state) or self.config.save_optimizer_state:\n state[\"optimizer\"] = self.optimizer_state()\n state = self.save_other_state_information(state)\n # delete previous checkpoint to avoid hogging space\n if delete_previous and self.config.delete_previous_checkpoint:\n previous_checkpoint = self.get_last_checkpoint_path()\n if previous_checkpoint is not None and previous_checkpoint.is_file():\n previous_checkpoint.unlink()\n torch.save(state, file_name)\n self.write_metrics()\n self.logger.info(f\"Checkpoint saved @ {file_name}\")", "title": "" }, { "docid": "a9f4e98bd1be3235f2068d9c68db72ab", "score": "0.6363164", "text": "def save_checkpoint(self):\n with DelayedKeyboardInterrupt():\n model_name = type(self.model).__name__\n ckpt_path = os.path.join(self.workspace_path, 'checkpoints')\n file_path = f\"{ckpt_path}/{model_name}_ep{self.epoch:04d}.pth.tar\"\n best_path = f\"{ckpt_path}/{model_name}_best.pth.tar\"\n os.makedirs(ckpt_path, exist_ok=True)\n\n self.stats[\"Checkpoints\"].append(file_path)\n\n if len(self.stats[\"Checkpoints\"]) > self.max_keep_ckpt:\n old_ckpt = self.stats[\"Checkpoints\"].pop(0)\n if os.path.exists(old_ckpt):\n os.remove(old_ckpt)\n self.log.info(f\"Removed old checkpoint {old_ckpt}\")\n\n state = {\n 'epoch': self.epoch,\n 'global_step': self.global_step,\n 'model_name': model_name,\n 'model': self.model.state_dict(),\n 'optimizer' : self.optimizer.state_dict(),\n 'lr_scheduler': self.lr_scheduler.state_dict(),\n 'stats' : self.stats,\n }\n\n if self.stats[\"BestResult\"] is None or self.metrics[0].better(self.stats[\"EvalResults\"][-1], self.stats[\"BestResult\"]):\n self.stats[\"BestResult\"] = self.stats[\"EvalResults\"][-1]\n torch.save(state, best_path)\n self.log.info(f\"Saved Best checkpoint.\")\n \n torch.save(state, file_path)\n self.log.info(f\"Saved checkpoint {self.epoch} successfully.\")", "title": "" }, { "docid": "2e0397bc37e31b3c10aeb7eb9ea3ea51", "score": "0.6346842", "text": "def save_at_exit(self, name=\"checkpoint_end\", **kwargs):\n\n if not name.endswith(\".pth.tar\"):\n name += \".pth.tar\"\n\n def save_fnc():\n self.save_checkpoint(name, **kwargs)\n print(\"Checkpoint saved securely... =)\")\n\n atexit.register(save_fnc)", "title": "" }, { "docid": "cd9c32a50ddf4d6071371a55769c240a", "score": "0.63406205", "text": "def save(self):\n save_dir = os.path.join(\".\", *self.config[\"checkpoint_dir\"], self.config[\"env_name\"])\n helper.mkdir(save_dir)\n current_date_time = helper.get_current_date_time()\n current_date_time = current_date_time.replace(\" \", \"__\").replace(\"/\", \"_\").replace(\":\", \"_\")\n\n torch.save(self.model.state_dict(), os.path.join(save_dir, \"ckpt_\" + current_date_time))", "title": "" }, { "docid": "c9810d03aec11a0c7f4bed1d29f1261e", "score": "0.6339612", "text": "def save_checkpoint(self, out_dir):\n filepath = osp.join(out_dir, self.model.name)\n # save model\n self.model.save_weights(filepath, save_format='tf')\n # save optimizer state\n opt_state = self.optimizer.get_weights()\n np.save('{}.opt'.format(filepath), opt_state)\n # save runner state\n with open('{}.runner.json'.format(filepath), 'w') as f:\n json.dump({'epoch':'{}'.format(self.epoch+1), 'iter':'{}'.format(self.iter)}, f)\n self.logger.info('Saved checkpoint at: {}'.format(filepath))", "title": "" }, { "docid": "5ce8c95523a4d2ac18c630e6fd625b61", "score": "0.63086545", "text": "def save_checkpoint(self, config, population, species_set, generation):\n self.filename_prefix = osp.join(\n self.output_dir,\n 'checkpoints',\n 'generation_{:04d}'.format(generation),\n 'checkpoint'\n )\n Path(self.filename_prefix).parent.mkdir(parents=True, exist_ok=True)\n super().save_checkpoint(config, population, species_set, generation)", "title": "" }, { "docid": "d4158f6f658bd79439e1e15eed7dc73e", "score": "0.6284704", "text": "def save_checkpoint(self, state, filename='checkpoint.pth.tar'):\n directory = \"runs/%s/\" % (self.data_set)\n if not os.path.exists(directory):\n os.makedirs(directory)\n filename = directory + filename\n torch.save(state, filename)\n\n # shutil.copyfile(filename, 'runs/%s/' % self.data_set + 'model_best.pth.tar')", "title": "" }, { "docid": "9552c31a68f4f8e832fe078796e8de87", "score": "0.6262841", "text": "def save_checkpoint(self, model):\n torch.save(model.state_dict(), self.filename)", "title": "" }, { "docid": "9c16eb4585eb3d127aade127715fad54", "score": "0.62581104", "text": "def export_checkpoint(self, export_dir):\n raise NotImplementedError", "title": "" }, { "docid": "31b41b4ae7e473c8eb69da038e254f00", "score": "0.62530535", "text": "def checkpoint(self):\n pass", "title": "" }, { "docid": "1d4474d95fe9ae51a4dc2dd0280b5d4e", "score": "0.62440187", "text": "def try_checkpoint(self):\n\n should_checkpoint, icheck = self.should_checkpoint()\n\n if should_checkpoint:\n self.write_checkpoint()\n self.checkpointed[icheck]=True", "title": "" }, { "docid": "d1426866d9e0165c3d12f7341cd6683f", "score": "0.6231865", "text": "def save_checkpoint(self):\n if not self.params.is_master:\n return\n\n # huggingface saves (more useful in our case for finetuning)\n\n logger.info(f\"Saving epoch {self.epoch} ...\")\n path = os.path.join(self.params.dump_path, f\"paddle-{self.epoch}\")\n if not os.path.exists(path): os.makedirs(path)\n model_to_save = self.model._layers if hasattr(self.model, '_layers') else self.model\n # model_to_save = self.model._layers if isinstance(\n # self.model, paddle.DataParallel) else self.model\n model_to_save.save_pretrained(path)\n self.tokenizer.save_pretrained(path)", "title": "" }, { "docid": "71d602afb650635654c894fe42963ed0", "score": "0.6202308", "text": "def save_checkpoint(self, epoch, info='', test_reward=None):\n state = {\n 'info': info,\n 'epoch': epoch,\n 'agent_policy': self.model.agent_policy.state_dict(),\n 'agent_value': self.model.agent_value.state_dict(),\n 'optimizer_policy': self.optimizer_policy.state_dict(),\n 'optimizer_value': self.optimizer_value.state_dict(),\n 'test_reward': test_reward\n }\n os.makedirs(self.checkpoint_dir, exist_ok=True)\n ckp_name = 'best-checkpoint.pth' if info == 'best' else f'checkpoint-epoch{epoch}_{info}.pth'\n filename = os.path.join(self.checkpoint_dir, ckp_name)\n torch.save(state, filename)", "title": "" }, { "docid": "3c2df4fb1ed53fa5afa0490efd8702ab", "score": "0.61705345", "text": "def write(self):\n\n new_bookmark = self.new_bookmark + timedelta(seconds=1)\n new_bookmark = new_bookmark.strftime(\"%Y-%m-%dT%H:%M:%SZ\")\n try:\n with open(self.bm_file, \"w\") as open_file:\n open_file.write(new_bookmark)\n open_file.flush()\n logging.debug(\"Updated bookmark file: %s\", new_bookmark)\n except OSError:\n logging.error(\"Bookmark file could not be written\")", "title": "" }, { "docid": "b16621d3c2077e5d9b778d59cef38d9c", "score": "0.61675155", "text": "def save_checkpoint(self, weights=None, state=None, slots=None):\n if self._output_dir is None:\n _log('Did not save checkpoint as output_dir is None', stdout=False)\n return\n weights = self._model_in_training.weights if weights is None else weights\n state = self._model_in_training.state if state is None else state\n slots = self._task.optimizer.slots if slots is None else slots\n flat_weights, flat_state = tl.flatten_weights_and_state(weights, state)\n d = {\n 'step': self.current_step,\n 'flat_weights': flat_weights,\n 'flat_state': flat_state,\n 'slots': slots,\n 'input_signature': self._batch_signature,\n 'version_timestamp': 'Jun-29-2020' # To update in the future if needed.\n }\n ckpt_file = os.path.join(self._output_dir, 'model.pkl.gz')\n pickle_to_file(d, ckpt_file, gzip=True)", "title": "" }, { "docid": "7b949290029b9e75e3ff45718e110c12", "score": "0.6149599", "text": "def save_checkpoint(checkpoints_dir, saved_fn, model_state_dict, utils_state_dict, epoch):\n model_save_path = os.path.join(checkpoints_dir, 'Model_{}_epoch_{}.pth'.format(saved_fn, epoch))\n utils_save_path = os.path.join(checkpoints_dir, 'Utils_{}_epoch_{}.pth'.format(saved_fn, epoch))\n\n torch.save(model_state_dict, model_save_path)\n torch.save(utils_state_dict, utils_save_path)\n\n print('save a checkpoint at {}'.format(model_save_path))", "title": "" }, { "docid": "9e5bae613f9293dd85aba7cc71f8c447", "score": "0.61347246", "text": "def save_checkpoint(state, is_best, checkpoint_dir, logger=None):\n\n def log_info(message):\n if logger is not None:\n logger.info(message)\n\n if not os.path.exists(checkpoint_dir):\n log_info(\n f\"Checkpoint directory does not exists. Creating {checkpoint_dir}\")\n os.mkdir(checkpoint_dir)\n\n last_file_path = os.path.join(checkpoint_dir, 'last_checkpoint.pytorch')\n log_info(f\"Saving last checkpoint to '{last_file_path}'\")\n torch.save(state, last_file_path)\n if is_best:\n best_file_path = os.path.join(checkpoint_dir, 'best_checkpoint.pytorch')\n log_info(f\"Saving best checkpoint to '{best_file_path}'\")\n shutil.copyfile(last_file_path, best_file_path)", "title": "" }, { "docid": "1a0f252f69cc40725d089d7858a02d24", "score": "0.61279327", "text": "def _save_checkpoint_record(self, checkpoint_dir: str, iteration: int):\n checkpoint_record_latest = os.path.join(checkpoint_dir,\n \"checkpoint_latest\")\n checkpoint_record_best = os.path.join(checkpoint_dir, \"checkpoint_best\")\n\n with open(checkpoint_record_best, \"w\") as handle:\n for i in self.best_records.keys():\n handle.write(\"model_checkpoint_path:{}\\n\".format(i))\n with open(checkpoint_record_latest, \"w\") as handle:\n for i in self.latest_records:\n handle.write(\"model_checkpoint_path:{}\\n\".format(i))", "title": "" }, { "docid": "0a162dc697945d0e0837dd928ed5573a", "score": "0.6124216", "text": "def save_checkpoint(self, state, is_best, filename='checkpoint.pth.tar'):\n filename = os.path.join(self.experiment_dir, filename)\n torch.save(state, filename)\n acc_log = state['acc_log']\n acc_log.to_csv(os.path.join(self.experiment_dir, 'accuracy_log.txt'),index=False)\n if is_best:\n best_pred = state['best_pred']\n with open(os.path.join(self.experiment_dir, 'best_pred.txt'), 'w') as f:\n f.write(str(best_pred))\n shutil.copyfile(filename, os.path.join(self.experiment_dir, 'best_model.pth.tar'))", "title": "" }, { "docid": "43800c863ee3bc35bd3ff10598b51ccf", "score": "0.61106896", "text": "def save_checkpoint(state, args, is_best, filename='checkpoint.pth.tar'):\n directory = \"runs/%s/%s/%s/\"%(args.dataset, args.model, args.checkname)\n if not os.path.exists(directory):\n os.makedirs(directory)\n filename = directory + filename\n torch.save(state, filename)\n if is_best:\n shutil.copyfile(filename, directory + 'model_best.pth.tar')", "title": "" }, { "docid": "783e5843834d1dd9c803531b98d65aeb", "score": "0.610768", "text": "def save(self, checkpoint_dir):\n self._save_replay_pool(checkpoint_dir)\n self._save_sampler(checkpoint_dir)\n self._save_value_functions(checkpoint_dir)\n self._save_policy(checkpoint_dir)\n self._save_algorithm(checkpoint_dir)\n\n return os.path.join(checkpoint_dir, '')", "title": "" }, { "docid": "a1461cf9b479a1ce139422da35ef6fb3", "score": "0.6104264", "text": "def save_checkpoint(self):\n if not self.params.is_master:\n return\n\n # huggingface saves (more useful in our case for finetuning)\n\n logger.info(f\"Saving epoch {self.epoch} ...\")\n path = os.path.join(self.params.dump_path, f\"huggingface-{self.epoch}\")\n if not os.path.exists(path): os.makedirs(path)\n model_to_save = self.model.module if hasattr(self.model, 'module') else self.model\n model_to_save.save_pretrained(path)\n self.tokenizer.save_pretrained(path)", "title": "" }, { "docid": "a4f4a5d295f0c5caeb3e51c5ecb37d57", "score": "0.60852444", "text": "def save_checkpoint(self, label: str = None) -> None:\n\n checkpoint_dir = pathlib.Path(self._checkpoint_dir)\n\n # Determine path to checkpoint file\n if label is not None:\n # Label explicitly specified!\n\n # Numerical labels are reserved for step counts (see below)\n label = cast(str, label)\n assert not label.isdigit()\n\n experiment_name = cast(\"Buddy\", self)._experiment_name\n path = checkpoint_dir / f\"{experiment_name}-{label}.ckpt\"\n else:\n # Automatically decide label using global step count\n optimizer_steps = cast(\"_BuddyOptimizer\", self).optimizer_steps\n path = (\n checkpoint_dir\n / f\"{cast('Buddy', self)._experiment_name}-{optimizer_steps:016d}.ckpt\"\n )\n\n if (\n self._checkpoint_unlabeled_files\n and path == self._checkpoint_unlabeled_files[-1]\n ):\n cast(\"Buddy\", self)._print(\"Skipping redundant checkpoint save\")\n return\n\n # Create directory if it doesn't exist yet\n if not checkpoint_dir.exists():\n checkpoint_dir.mkdir(parents=True)\n\n # Create state to save. This includes:\n # > Model state\n # > Optimizers\n # > Training steps\n # > Buddy configuration\n optimizer_states = {}\n\n for name, optimizer in cast(\"_BuddyOptimizer\", self)._optimizer_dict.items():\n optimizer_states[name] = optimizer.state_dict()\n\n state = {\n \"optimizer_config\": dataclasses.asdict(\n cast(\"_BuddyOptimizer\", self)._optimizer_config\n ),\n \"optimizer_states\": optimizer_states,\n \"state_dict\": cast(\"Buddy\", self).model.state_dict(),\n }\n\n # Ignore SIGINT (eg ctrl+c) events while we save to disk...\n try:\n orig_handler = signal.getsignal(signal.SIGINT)\n signal.signal(signal.SIGINT, lambda _sig, _frame: None)\n except ValueError as e: # pragma: no cover\n # signal throws a ValueError if we're not in the main thread\n cast(\"Buddy\", self)._print(\"Error while attaching SIGINT handler:\", e)\n orig_handler = None\n\n # Checkpoint saving\n # > rename is POSIX-compliant and atomic\n tmp_path = checkpoint_dir / f\"tmp-{np.random.randint(1e10)}.ckpt\"\n torch.save(state, tmp_path, pickle_module=dill)\n tmp_path.rename(path)\n cast(\"Buddy\", self)._print(\"Saved checkpoint to path:\", path)\n\n # Restore SIGINT handler\n if orig_handler is not None:\n signal.signal(signal.SIGINT, orig_handler)\n\n # If unlabeled, add to list\n if label is None:\n self._checkpoint_unlabeled_files.append(path)\n\n # Prune checkpoint files\n while len(self._checkpoint_unlabeled_files) > self._checkpoint_max_to_keep:\n self._checkpoint_unlabeled_files.pop(0).unlink()", "title": "" }, { "docid": "f01e87c3b7755f5e02161be04015d55d", "score": "0.6083777", "text": "def checkpoint(self, ckpt_folder: str, max_ckpt: Optional[int] = None):\n\n # Get checkpoints before saving the new one (is torch.save synchronous?)\n filenames, _ = checkpoints_in_folder(ckpt_folder)\n\n # Save checkpoint\n path = os.path.join(ckpt_folder, \"model_{}.pt\".format(self.global_step))\n torch.save(self.state_dict(), path)\n\n # Return if we're supposed to keep all checkpoints\n if max_ckpt is None or max_ckpt == -1:\n return\n\n # Delete all old checkpoints except for the last max_ckpt-1\n if len(filenames) < max_ckpt - 1:\n return\n for i in range(len(filenames) - max_ckpt + 1):\n path = os.path.join(ckpt_folder, filenames[i])\n try:\n os.remove(path)\n except OSError:\n pass", "title": "" }, { "docid": "efba7d589964ee680aca38da3544dd98", "score": "0.6064613", "text": "def save_checkpoint(state, is_best, checkpoint_dir):\n\n if not os.path.exists(checkpoint_dir):\n print(f\"Checkpoint directory does not exists. Creating {checkpoint_dir}\")\n os.mkdir(checkpoint_dir)\n\n last_file_path = os.path.join(checkpoint_dir, 'last_checkpoint.pytorch')\n print(f\"Saving last checkpoint to '{last_file_path}'\")\n torch.save(state, last_file_path)\n if is_best:\n best_file_path = os.path.join(checkpoint_dir, 'best_checkpoint.pytorch')\n print(f\"Saving best checkpoint to '{best_file_path}'\")\n shutil.copyfile(last_file_path, best_file_path)", "title": "" }, { "docid": "6e418826e9485c85c0b279f9d9e8facc", "score": "0.60597503", "text": "def save_checkpoint(self, checkpoint: Dict[str, Any], path, storage_options: Optional[Any] = None) -> None:\n if storage_options is not None:\n raise TypeError(\n \"`Trainer.save_checkpoint(..., storage_options=...)` with `storage_options` arg\"\n f\" is not supported for `{self.__class__.__name__}`.\"\n )\n\n if \"state_dict\" in checkpoint:\n if self.trainable_param_names:\n updated_params = {}\n for name, param in checkpoint[\"state_dict\"].items():\n adjusted_name = name.replace(\"model.\", \"\", 1)\n if adjusted_name in self.model_name_to_id and self.model_name_to_id[adjusted_name] == 0:\n updated_params[name] = param\n if any(\n [re.match(trainable_param_name, name) for trainable_param_name in self.trainable_param_names]\n ):\n updated_params[name] = param\n else:\n updated_params = checkpoint[\"state_dict\"]\n\n checkpoint[\"state_dict\"] = updated_params\n\n fs = get_filesystem(path)\n fs.makedirs(os.path.dirname(path), exist_ok=True)\n try:\n # write the checkpoint dictionary on the file\n _atomic_save(checkpoint, path)\n except AttributeError as err:\n # todo (sean): is this try catch necessary still?\n # https://github.com/Lightning-AI/lightning/pull/431\n key = pl.LightningModule.CHECKPOINT_HYPER_PARAMS_KEY\n checkpoint.pop(key, None)\n rank_zero_warn(f\"Warning, `{key}` dropped from checkpoint. An attribute is not picklable: {err}\")\n _atomic_save(checkpoint, path)", "title": "" }, { "docid": "672461fc9b993c548bcbbf90eab612be", "score": "0.6039078", "text": "def save_checkpoint(self, path: str, **kwargs):\n if self.distributed:\n encoder = self.online_net.module.encoder\n projector = self.online_net.module.projector\n predictor = self.online_predictor.module # XXX: check\n else:\n encoder = self.online_net.encoder\n projector = self.online_net.projector\n predictor = self.online_predictor\n\n ckpt = {\n 'encoder': encoder.state_dict(),\n 'projector': projector.state_dict(),\n 'predictor': predictor.state_dict(),\n 'target_net': self.target_net.state_dict(),\n 'optimizer': self.optimizer.state_dict(),\n 'scheduler': self.scheduler.state_dict(),\n }\n if kwargs:\n ckpt.update(kwargs)\n torch.save(ckpt, path)", "title": "" }, { "docid": "8ff157dcc61bcb4ecd30decda4fb8d8e", "score": "0.60293645", "text": "def save(self, file: str):\n CheckpointCallback.save(file_path=file, pl_module=self.method, update_dict=self.get_checkpoint_update_dict())", "title": "" }, { "docid": "419d260826a342c340cfc0c856be7b97", "score": "0.60279113", "text": "def save_checkpoint(state, filename, is_best):\n if is_best:\n print(\"=> Saving new checkpoint\")\n torch.save(state, filename)\n else:\n print(\"=> Validation Accuracy did not improve\")", "title": "" }, { "docid": "1e79f49a6839078647b7a010b4a15037", "score": "0.6016968", "text": "def save_checkpoint(state, is_best, filename='checkpoint.pth.tar'):\n torch.save(state, filename)", "title": "" }, { "docid": "686e562a48f746918536a8405887dfa0", "score": "0.60123426", "text": "def save_checkpoint(state, is_best, checkpoint):\n filepath = os.path.join(checkpoint, f\"epoch{state['epoch']}.pth.tar\")\n if not os.path.exists(checkpoint):\n print(\"Checkpoint Directory does not exist! Making directory {}\".format(checkpoint))\n os.mkdir(checkpoint)\n else:\n print(\"Checkpoint Directory exists! \")\n torch.save(state, filepath)\n if is_best:\n shutil.copyfile(filepath, os.path.join(checkpoint, 'best.pth.tar'))", "title": "" }, { "docid": "54bccd85eb4e43c89005a19a63cac82b", "score": "0.60111845", "text": "def save_checkpoint(state, is_best, file_name='/output/checkpoint.pth.tar'):\n if is_best:\n print (\"=> Saving a new best\")\n torch.save(state, file_name) # save checkpoint\n else:\n print (\"=> Validation Accuracy did not improve\")", "title": "" }, { "docid": "76eb7bac3ad0cd8271feb9767f92cc05", "score": "0.5999684", "text": "def save_checkpoint(state, is_best, checkpoint):\n filepath = os.path.join(checkpoint, 'last.pth.tar')\n if not os.path.exists(checkpoint):\n print(\"Checkpoint Directory does not exist! Making directory {}\".format(checkpoint))\n os.mkdir(checkpoint)\n else:\n print(\"Checkpoint Directory exists! \")\n torch.save(state, filepath)\n if is_best:\n shutil.copyfile(filepath, os.path.join(checkpoint, 'best.pth.tar'))", "title": "" }, { "docid": "85f6a301b078221ab0af2411ae100a2d", "score": "0.5986551", "text": "def checkpoint(self):\n self.logger.critical('Checkpointing nested sampling')\n with open(self.resume_file,\"wb\") as f:\n pickle.dump(self, f)", "title": "" }, { "docid": "0d0b11dc83539c6a53b2c9f4d428e1e4", "score": "0.5984946", "text": "def save_to_file(self) -> None:\n pass", "title": "" }, { "docid": "db3d9231bf47ca8bb377b636db53a5d7", "score": "0.59820044", "text": "def save_checkpoint(state, is_best, file_path, file_name='checkpoint.pth.tar'):\n\n save_path = file_path + '/' + file_name\n torch.save(state, save_path)\n if is_best:\n shutil.copyfile(save_path, file_path + '/model_best.pth.tar')", "title": "" }, { "docid": "05d1f9d6d95aebcf1519ed829f71ec1f", "score": "0.5975684", "text": "def save_checkpoint(self, folder, filename_no):\n filename = f\"no{filename_no}.neural.data\"\n filepath = os.path.join(folder, filename)\n if not os.path.exists(folder):\n print(\"Checkpoint Directory does not exist! Making directory {}\".format(folder))\n os.mkdir(folder)\n else:\n print(\"Checkpoint Directory exists! \")\n self.model.save_weights(filepath)", "title": "" }, { "docid": "1ccc658ecf90b086bd867063ff0dd798", "score": "0.5960568", "text": "def save(\n self, version, model, is_eval_checkpoint, shard_index=0, shard_num=1\n ):\n filename = self._get_checkpoint_file(\n version, is_eval_checkpoint, shard_index, shard_num\n )\n save_pb_to_file(model, filename)\n if not is_eval_checkpoint:\n self._checkpoint_dir_list.append(os.path.dirname(filename))\n if self._max_versions:\n self._delete_old_checkpoints_if_needed()", "title": "" }, { "docid": "2daac18524eb39db388d5643ec71980b", "score": "0.59511596", "text": "def write(self, filename):\n pass", "title": "" }, { "docid": "ee74b8ede69688c3cba4a0bb14df12d1", "score": "0.59499353", "text": "def _create_checkpoint(cls,checkpoint):\n if not os.path.exists(checkpoint):\n os.makedirs(checkpoint)", "title": "" }, { "docid": "b21a0789b3a6bbf2dc6ce6ebcdd69823", "score": "0.59462816", "text": "def save_checkpoint(self, name, n_iter=None, iter_format=\"{:05d}\", prefix=False, **kwargs):\n\n if n_iter is not None:\n name = name_and_iter_to_filename(name,\n n_iter,\n \".pth.tar\",\n iter_format=iter_format,\n prefix=prefix)\n\n if not name.endswith(\".pth.tar\"):\n name += \".pth.tar\"\n\n self.save_checkpoint_static(self.checkpoint_dir, name=name, **kwargs)", "title": "" }, { "docid": "22ede438d5d2e5a0243f2b1c856cc7b0", "score": "0.59273714", "text": "def save_checkpoint(self, epoch_idx: int, best_val_loss: int = sys.maxsize):\n\n ckpt = self.version.create_checkpoint(epoch=epoch_idx, step=self.step)\n state = {\n 'model': self.model.state_dict(),\n 'optim': self.optim.state_dict(),\n 'step': self.step,\n 'epoch': epoch_idx,\n 'best_val_loss': best_val_loss,\n 'label_map': self.label_map,\n 'features': self.extractor.features,\n }\n torch.save(state, ckpt.file_path)\n\n # Also save JSON for inference UI.\n with open(self.version.info_path, 'w', encoding='utf-8') as f:\n json.dump(self.get_info(), f, indent=2, sort_keys=True)\n\n return ckpt", "title": "" }, { "docid": "1fdab0cafc52f868c03c3d8c4e2b3f45", "score": "0.5924588", "text": "def save_checkpoint(self, config, population, species_set, generation):\n filename = '{0}{1}'.format(self.filename_prefix,generation)\n print(\"Saving checkpoint to {0}\".format(filename))\n\n winnerfile = '{0}{1}'.format('neat-winner', generation)\n winner = population.best_fit_genome(-1)\n\n with gzip.open(filename, 'w', compresslevel=5) as f:\n data = (generation, config, population, species_set, random.getstate())\n pickle.dump(data, f, protocol=pickle.HIGHEST_PROTOCOL)\n\n with gzip.open(winnerfile, 'w', compresslevel=5) as f:\n pickle.dump(winner, f, protocol=pickle.HIGHEST_PROTOCOL)", "title": "" }, { "docid": "7a36b1977def4dab401c2de9697bc3c2", "score": "0.5918861", "text": "def write_checkpoint_model_items(self, chkp_file_path, chkp_model_list):\n if not os.path.exists(chkp_file_path):\n raise ValueError('checkpoint file path is wrong.')\n with open(chkp_file_path, 'w') as fp:\n fp.write('model_checkpoint_path: \"' + chkp_model_list[0] + '\"\\n')\n for idx in range(1, len(chkp_model_list)):\n fp.write('all_model_checkpoint_paths: \"' +\n chkp_model_list[idx] + '\"\\n')", "title": "" }, { "docid": "2aa8014d05429748595d0e0d57440ef8", "score": "0.59172714", "text": "def save_checkpoint(state, is_best, name, filename='checkpoint.pth.tar'):\n directory = \"runs/%s/\" % name\n if not os.path.exists(directory):\n os.makedirs(directory)\n filename = directory + filename\n torch.save(state, filename)\n if is_best:\n shutil.copyfile(filename, 'runs/%s/' % name + 'model_best.pth.tar')", "title": "" }, { "docid": "247513e3737037b9a7c514eddcbb684f", "score": "0.5897915", "text": "def _write_checklist_to_disk(self):\n with open(self.config[\"checklist file\"], \"wt\") as f:\n yaml.dump(self.checklist, f)", "title": "" }, { "docid": "79049b5262e6184bb9df8152ab420ab9", "score": "0.5895203", "text": "def save_checkpoint_to_gdrive(model_name, run=None, checkpoint=None, drive_name=\"My Drive\"):\n drive.mount('/content/gdrive')\n model_dir = \"/content/gdrive/%s/Colab Checkpoints/%s/\" % (drive_name, model_name)\n if not os.path.exists(model_dir):\n os.mkdir(model_dir)\n if run is None:\n runs = sorted(os.listdir(os.path.join(model_name, \"checkpoints\")))\n run = runs[-1]\n if not os.path.exists(model_dir + run):\n os.mkdir(model_dir + run)\n if checkpoint is None:\n checkpoint_dir = os.path.join(model_name, \"checkpoints\", run)\n checkpoint_file = tf.train.latest_checkpoint(checkpoint_dir)\n else:\n checkpoint_file = os.path.join(\n model_name, \"checkpoints\", run, checkpoint)\n files = [f for f in os.listdir(checkpoint_dir) if f.startswith(\n checkpoint_file.split(\"/\")[-1])]\n for file in files:\n src = os.path.join(model_name, \"checkpoints\", run, file)\n dest = os.path.join(model_dir, run, file)\n print(\"writing checkpoint to %s\" % dest)\n copyfile(src, dest)", "title": "" }, { "docid": "9e2c32a167f87f3088f149fc2043064b", "score": "0.58868486", "text": "def set_checkpoint(self, path, over_write=True):\n callZooFunc(self.bigdl_type, \"zooSetCheckpoint\",\n self.value,\n path,\n over_write)", "title": "" }, { "docid": "c68bc5e58d7cdcbc39b1f9481dbd3ade", "score": "0.58823013", "text": "def save_checkpoint(state, is_best, filename='checkpoint.pth.tar'):\n\tdirectory = \"runs/%s/\"%(args.name)\n\tif not os.path.exists(directory):\n\t\tos.makedirs(directory)\n\tif filename=='checkpoint.pth.tar':\n\t\tfilename = 'checkpoint_'+str(state[\"epoch\"])+'.pth.tar'\n\n\tfilename = directory + filename\n\ttorch.save(state, filename)\n\tif is_best:\n\t\tshutil.copyfile(filename, 'runs/%s/'%(args.name) + 'model_best.pth.tar')", "title": "" }, { "docid": "547c24ab83c3e5cafd4a48050df9002c", "score": "0.5878466", "text": "def save(self, model: torch.nn.Module, optimizer: Optimizer, step: int, score: float = 0.0):\n if self._save_path is None:\n raise AssertionError('Checkpoint manager must be initialized with save path for save().')\n\n self._save_checkpoint(step, model, optimizer, score)\n self._remove_old_checkpoints()\n self._update_checkpoints_file()", "title": "" }, { "docid": "b8086b9ee70bb9655d8869050a5ab10a", "score": "0.586644", "text": "def save_checkpoint(optimizer, model, epoch, filename):\n checkpoint_dict = {\n 'optimizer' : optimizer.state_dict(),\n 'model' : model.state_dict(),\n 'epoch' : epoch\n }\n torch.save(checkpoint_dict, filename)", "title": "" }, { "docid": "ec72d20e7c25ee7fc9a1058befdc5efa", "score": "0.58571273", "text": "def write(self):\n logging.debug(\"Writing bookmark.\")\n try:\n with open(self.cachefile, \"wb\") as open_cache:\n pickle.dump(self.cache, open_cache)\n logging.debug(\n \"Cache file entries written (filename:cnt): %s:%s\",\n self.cachefile,\n len(self.cachefile),\n )\n except OSError:\n logging.error(\"Cache file could not be written: %s\", self.cachefile)\n else:\n logging.info(\"Caching disabled. Touching file: %s\", self.cachefile)\n touch(self.cachefile)", "title": "" }, { "docid": "9d79c34f128b962c4ad5634c28a94590", "score": "0.58521146", "text": "def write_file(self):\n pass", "title": "" }, { "docid": "7c76c0fcc49e9401017303e24b8b0ba1", "score": "0.58324957", "text": "def save_checkpoint(\n epoch: int,\n model: nn.Module,\n model_name: str,\n optimizer: optim.Optimizer,\n dataset_name: str,\n word_map: Dict[str, int],\n checkpoint_path: str,\n checkpoint_basename: str = 'checkpoint'\n) -> None:\n state = {\n 'epoch': epoch,\n 'model': model,\n 'model_name': model_name,\n 'optimizer': optimizer,\n 'dataset_name': dataset_name,\n 'word_map': word_map\n }\n save_path = os.path.join(checkpoint_path, checkpoint_basename + '.pth.tar')\n torch.save(state, save_path)", "title": "" }, { "docid": "b58f7267d5a632fa392d8e051d19c89b", "score": "0.5827474", "text": "def save_checkpoint(state, save, path, filename):\n if save:\n print (\"=> Saving a new model\")\n if not os.path.exists(path):\n os.makedirs(path)\n torch.save(state, filename)\n print('=> saved model to {}'.format(filename))\n else:\n print (\"=> Validation Accuracy did not improve\")", "title": "" }, { "docid": "94a60f47f7a633cb1fd1fb3c3e03f2f1", "score": "0.58254796", "text": "def save_checkpoint(self, state, is_best, filename):\n ''' \n usage:\n Training_aux.save_checkpoint(\n state = {\n 'epoch': epoch,\n 'state_dict': model.state_dict(),\n 'best_prec1': best_prec1,\n 'optimizer' : optimizer.state_dict(),\n }, is_best = is_best)\n '''\n directory = \"%s/\"%(self.fsave)\n if not os.path.exists(directory):\n os.makedirs(directory)\n filename = directory + filename\n torch.save(state, filename)\n if is_best:\n shutil.copyfile(filename, '%s/'%(self.fsave) + 'modelBest.pth.tar')\n self.write_err_to_file(epoch = 0, top1 = 0, top5 = 0, trn_loss = 0, mode = 'best')\n return", "title": "" }, { "docid": "5ff47a47bfbe71bc33b57747788f722b", "score": "0.5816837", "text": "def saveModel(self, checkpointpath):\n self._saveModel(checkpointpath)", "title": "" }, { "docid": "4b539fe40f96b3ce7a48aad44b721f43", "score": "0.58147013", "text": "def save_checkpoint(state, is_best, filename='checkpoint.pth.tar'):\n directory = \"runs/%s/\" % (args.name)\n if not os.path.exists(directory):\n os.makedirs(directory)\n filename = directory + filename\n torch.save(state, filename)\n if is_best:\n shutil.copyfile(filename, 'runs/%s/' %\n (args.name) + 'model_best.pth.tar')", "title": "" }, { "docid": "8d98228d7bf794ef9ae3bd02197eaec8", "score": "0.5813748", "text": "def save_checkpoint(state, directory):\n directory = dir_path + \"/\" + directory + \"/\"\n if not os.path.exists(directory):\n os.makedirs(directory)\n filename = directory + \"/\" + str(state[\"iterations\"]) + \".pth\"\n torch.save(state, filename)", "title": "" }, { "docid": "c9da9fea29ad59dafa1cd3d2ec443c21", "score": "0.5809087", "text": "def checkpoint(self):\n torch.save(self.local_critic.state_dict(), 'checkpoint_critic_{}.pth'.format(self.agent_index))\n torch.save(self.local_actor.state_dict(), 'checkpoint_actor_{}.pth'.format(self.agent_index))\n torch.save(self.critic_optimizer.state_dict(), 'checkpoint_critic_{}_optimizer.pth'.format(self.agent_index))\n torch.save(self.actor_optimizer.state_dict(), 'checkpoint_actor_{}_optimizer.pth'.format(self.agent_index))", "title": "" }, { "docid": "ba3649cce350474b139cdcffc24b991c", "score": "0.577088", "text": "def save_checkpoint(state, is_best, checkpoint):\n filepath = os.path.join(checkpoint, 'last.pth.tar')\n safe_makedir(checkpoint)\n torch.save(state, filepath)\n if is_best:\n shutil.copyfile(filepath, os.path.join(checkpoint, 'best.pth.tar'))", "title": "" }, { "docid": "40f85c0ca3c994a628a95f96df06df32", "score": "0.576446", "text": "def save_checkpoint_current(save_path, dispnet_state, exp_pose_state, epoch, filename='checkpoint.pth.tar'):\n file_prefixes = ['dispnet', 'posenet']\n states = [dispnet_state, exp_pose_state]\n for (prefix, state) in zip(file_prefixes, states):\n torch.save(state, os.path.join(save_path, '{}_{}_{}'.format(prefix, epoch, filename)))", "title": "" }, { "docid": "ef45a1a8d1d80e26dadd0050fbb0468b", "score": "0.5757824", "text": "def save(self, filename=None, dirpath=\".\", checkpoint=True):\n if checkpoint:\n # Get the name and other relevant information\n model_name = self.checkpoint['model_name']\n epoch = self.checkpoint['epoch']\n filename = f\"model_{model_name}_epoch{epoch}.pt\" if filename is None else filename\n # Save in the appropriate directory, and create it if it doesn't exists\n Path(dirpath).mkdir(parents=True, exist_ok=True)\n # Save the best model\n path = os.path.join(dirpath, filename)\n torch.save(self.best_model, path)\n # Save its checkpoint\n checkname = f\"checkpoint_{filename.split('.')[-2].split('_')[1]}_epoch{epoch}.pt\"\n checkpath = os.path.join(dirpath, checkname)\n torch.save(self.checkpoint, checkpath)\n else:\n model_name = self.checkpoint['model_name']\n filename = f\"model_{model_name}.pt\" if filename is None else filename\n torch.save(self.model, filename)", "title": "" }, { "docid": "747d9b40501e6f2263d2199a01f7c467", "score": "0.57531255", "text": "def save_checkpoint(self, f1_score):\n model_path = '{}/epoch_{}-f1_{}.pt'.format(\n self.checkpoint_directory,\n self.cur_epoch,\n f1_score)\n\n checkpoint = {\n 'model_state_dict': self.state_dict(),\n 'optimizer_state_dict': self._optim.state_dict()\n }\n torch.save(checkpoint, model_path)", "title": "" }, { "docid": "84eeb4dda04ae91ae7249cd2a562d2a3", "score": "0.57378733", "text": "def save_checkpoint(self, filename='checkpoint.pth.tar', is_best=0):\n state = {\n 'epoch': self.current_epoch + 1,\n 'iteration': self.current_iteration,\n 'state_dict': self.model.state_dict(),\n 'optimizer': self.optimizer.state_dict(),\n }\n # Save the state\n torch.save(state, self.config.checkpoint_dir + filename)\n # If it is the best copy it to another file 'model_best.pth.tar'\n if is_best:\n shutil.copyfile(self.config.checkpoint_dir + filename,\n self.config.checkpoint_dir + 'model_best.pth.tar')", "title": "" }, { "docid": "43ad23710b2d6acb0ac1968d0c148fac", "score": "0.57336307", "text": "def save_checkpoint(state, is_best, filename='checkpoint.pth.tar', *args):\n directory = \"runs/%s/\"%(args.name)\n\n if not os.path.exists(directory):\n os.makedirs(directory)\n\n filename = directory + filename\n torch.save(state, filename)\n\n if is_best:\n shutil.copyfile(filename, 'runs/%s/'%(args.name) + 'model_best.pth.tar')", "title": "" }, { "docid": "831e0ac871be82cdda8fe364f85b4f09", "score": "0.5729638", "text": "def make_checkpoint(self):", "title": "" }, { "docid": "029eb9a62f9e54cba9729b6bda42e38c", "score": "0.5726356", "text": "def save_to_file(self):\n filename = self.FILENAME[\"PREFIX\"] + self.FILENAME[\"TSTAMP\"] + \\\n self.FILENAME[\"SUFFIX\"]\n self.classifier.save(datetime.now().strftime(filename))", "title": "" }, { "docid": "8d72a0548164d3a4ed0bd321de01a4df", "score": "0.57230484", "text": "def save_file(self):\n if self.rank == 0:\n #We should make sure we have loaded all lazy-loaded things first.\n self._load_all_multihash(self.tau_obs, \"tau_obs\")\n self._load_all_multihash(self.tau, \"tau\")\n self._load_all_multihash(self.colden, \"colden\")\n try:\n self._load_all_multihash(self.velocity, \"velocity\")\n except IOError:\n pass\n #Make sure the directory exists\n if not path.exists(path.dirname(self.savefile)):\n os.mkdir(path.dirname(self.savefile))\n #Make a backup.\n if path.exists(self.savefile):\n shutil.move(self.savefile, self.savefile+\".backup\")\n try:\n f = h5py.File(self.savefile, 'w')\n except IOError as io:\n raise IOError(\"Could not open \", self.savefile, \" for writing\") from io\n self._save_file(f)", "title": "" }, { "docid": "6f7b884e0ae84bc5dfecc08de639950c", "score": "0.5716533", "text": "def save_checkpoint(self, state, is_best):\n # print(\"[*] Saving model to {}\".format(self.ckpt_dir))\n\n filename = self.model_name + '_ckpt.pth.tar'\n ckpt_path = os.path.join(self.ckpt_dir, filename)\n torch.save(state, ckpt_path)\n\n if is_best:\n filename = self.model_name + '_model_best.pth.tar'\n shutil.copyfile(\n ckpt_path, os.path.join(self.ckpt_dir, filename)\n )", "title": "" }, { "docid": "f74190dfdc210784ff5e822a1ae472c2", "score": "0.57079244", "text": "def save_checkpoint(state, is_best, path, prefix, filename=\"checkpoint.pth.tar\"):\n if not os.path.exists(path):\n os.mkdir(path)\n prefix_save = os.path.join(path, prefix)\n name = \"_\".join([prefix_save, filename])\n torch.save(state, name)\n if is_best:\n torch.save(state, path + \"/model_best.pth.tar\")", "title": "" } ]
4ac2e54590f993f25d4c93be27d37de1
An object to perform Mean Pooling that ignores PADtoken representations
[ { "docid": "fce1104ec07a2850aa4a0b1b96958266", "score": "0.6255539", "text": "def __init__(self):\n super(MeanMaskedPooling, self).__init__()", "title": "" } ]
[ { "docid": "ddf59ab9f023d2a3c12019cd0472996b", "score": "0.5281446", "text": "def __init__(\n self,\n pools: list[LiquidityPair],\n token_in: Token,\n token_out: Token,\n ):\n self.pools: LiquidityPair\n\n tokens = [token_in]\n for pair in pools:\n if tokens[-1] == pair.tokens[0]:\n tokens.append(pair.tokens[1])\n else:\n tokens.append(pair.tokens[0])\n\n super().__init__(pools, tokens)", "title": "" }, { "docid": "f8ece946cfd0cf474012931ba2a0cdcd", "score": "0.52588207", "text": "def cls_pooling(model_output):\n return model_output[0][:, 0] # 1st token is the [CLS] token", "title": "" }, { "docid": "5e1a650754a3d2c1809fdcf0fbf55708", "score": "0.521926", "text": "def mean_pooling(self, model_output, attention_mask):\n token_embeddings = model_output[0] #First element of model_output contains all token embeddings\n input_mask_expanded = attention_mask.unsqueeze(-1).expand(token_embeddings.size()).float()\n sum_embeddings = torch.sum(token_embeddings * input_mask_expanded, 1)\n sum_mask = torch.clamp(input_mask_expanded.sum(1), min=1e-9)\n return sum_embeddings / sum_mask", "title": "" }, { "docid": "a319e6be58e54206068925e1e35e34e4", "score": "0.51791686", "text": "def __init__(self, network, use_shortlist=True, exclude_unk=False,\n profile=False):\n\n self._vocabulary = network.vocabulary\n self._unk_id = self._vocabulary.word_to_id['<unk>']\n\n # The functions take as input a mini-batch of word IDs and class IDs,\n # and slice input and target IDs for the network.\n batch_word_ids = tensor.matrix('textscorer/batch_word_ids',\n dtype='int64')\n batch_word_ids.tag.test_value = test_value(\n size=(21, 4), high=self._vocabulary.num_words())\n batch_class_ids = tensor.matrix('textscorer/batch_class_ids',\n dtype='int64')\n batch_class_ids.tag.test_value = test_value(\n size=(21, 4), high=self._vocabulary.num_classes())\n membership_probs = tensor.matrix('textscorer/membership_probs',\n dtype=theano.config.floatX)\n membership_probs.tag.test_value = test_value(\n size=(20, 4), high=1.0)\n\n # Convert out-of-shortlist words to <unk> in input.\n shortlist_size = self._vocabulary.num_shortlist_words()\n input_word_ids = batch_word_ids[:-1]\n oos_indices = tensor.ge(input_word_ids, shortlist_size).nonzero()\n input_word_ids = tensor.set_subtensor(input_word_ids[oos_indices],\n self._unk_id)\n # Out-of-shortlist words are already in <unk> class, because they don't\n # have own classes.\n input_class_ids = batch_class_ids[:-1]\n target_class_ids = batch_class_ids[1:]\n # Target word IDs are not used by the network. We need them to compute\n # probabilities for out-of-shortlist word.\n target_word_ids = batch_word_ids[1:]\n\n logprobs = tensor.log(network.target_probs())\n # Add logprobs from the class membership of the predicted word.\n logprobs += tensor.log(membership_probs)\n\n mask = network.mask\n if use_shortlist and network.oos_logprobs is not None:\n # The probability of out-of-shortlist words (which is the <unk>\n # probability) is multiplied by the fraction of the actual word\n # within the set of OOS words.\n logprobs += network.oos_logprobs[target_word_ids]\n # Always exclude OOV words when using a shortlist - No probability\n # mass is left for them.\n mask *= tensor.neq(target_word_ids, self._unk_id)\n elif exclude_unk:\n # If requested, ignore OOS and OOV probabilities.\n mask *= tensor.neq(target_word_ids, self._unk_id)\n mask *= tensor.lt(target_word_ids, shortlist_size)\n\n # Ignore unused input variables, because is_training is only used by\n # dropout layer.\n masked_logprobs = logprobs * tensor.cast(mask, theano.config.floatX)\n self._target_logprobs_function = theano.function(\n [batch_word_ids, batch_class_ids, membership_probs, network.mask],\n [masked_logprobs, mask],\n givens=[(network.input_word_ids, input_word_ids),\n (network.input_class_ids, input_class_ids),\n (network.target_class_ids, target_class_ids),\n (network.is_training, numpy.int8(0))],\n name='target_logprobs',\n on_unused_input='ignore',\n profile=profile)\n\n # If some word is not in the training data, its class membership\n # probability will be zero. We want to ignore those words. Multiplying\n # by the mask is not possible, because those logprobs will be -inf.\n mask *= tensor.neq(membership_probs, 0.0)\n masked_logprobs = tensor.switch(mask, logprobs, 0.0)\n self._total_logprob_function = theano.function(\n [batch_word_ids, batch_class_ids, membership_probs, network.mask],\n [masked_logprobs.sum(), mask.sum()],\n givens=[(network.input_word_ids, input_word_ids),\n (network.input_class_ids, input_class_ids),\n (network.target_class_ids, target_class_ids),\n (network.is_training, numpy.int8(0))],\n name='total_logprob',\n on_unused_input='ignore',\n profile=profile)\n\n # These are updated by score_line().\n self.num_words = 0\n self.num_unks = 0", "title": "" }, { "docid": "19bd68d8f0ac87688e9a9d25b19904bf", "score": "0.5167621", "text": "def __init__(self):\n model_name = 'bert-large-uncased-whole-word-masking-finetuned-squad'\n self.tokenizer = BertTokenizer.from_pretrained(model_name)\n self.model = BertForQuestionAnswering.from_pretrained(model_name) \n self.model.eval() \n self.model.to('cpu')", "title": "" }, { "docid": "932900bedb6691657ede311547f3e90a", "score": "0.51538", "text": "def __init__(self, token, label, weight, multivalent_tokens=False):\n self._token = token\n self._label = label\n self._weight = weight\n self._multivalent_tokens = multivalent_tokens\n self._fill_with_defaults()", "title": "" }, { "docid": "bf920e63ba58ac4a5bb3a6d35ad6e154", "score": "0.51338893", "text": "def forward(self, **inputs):\n\n # Run through transformers model\n tokens = super().forward(**inputs)\n mask = inputs[\"attention_mask\"]\n\n # Mean pooling\n # pylint: disable=E1101\n mask = mask.unsqueeze(-1).expand(tokens.size()).float()\n return torch.sum(tokens * mask, 1) / torch.clamp(mask.sum(1), min=1e-9)", "title": "" }, { "docid": "9f55fe5c284ee2062c8de21b1a6845c6", "score": "0.4987951", "text": "def nat():\n pass", "title": "" }, { "docid": "85dd9095387738f97f74bb2b4db54faa", "score": "0.4954867", "text": "def make_instances( tokenizer, ann_files, text_files, corenlp_files, outdir, max_len = 512, is_training = False, use_sys_ners = False):\n\n # first collect all valid entity for predictions. this is instance-based, which only relies on character offset. And it only extracts spans that cooccur with target in a sentence\n gold_spanins = [] # stores entities that are in a gold relation in annotation. used for evaluation \n\n seen_goldids = set()\n for text_file, ann_file, corenlp_file in zip(text_files, ann_files, corenlp_files):\n\n intrasent_gold_relations = [(e1, e2, relation) for e1, e2, relation in extract_intrasent_goldrelations_from_ann(ann_file, corenlp_file = corenlp_file) if relation == 'Contains' and e1['label'] == 'Target' and e2['label'] in ['Element', 'Mineral']]\n\n for e1, e2, relation in intrasent_gold_relations:\n\n span2 = Span_Instance(e2['venue'], e2['year'], e2['docname'], e2['doc_start_char'], e2['doc_end_char'], e2['text'], 'Component') # specifically assign component as the label \n span2.relation_label = 'Contains'\n \n if e2['label'] in ['Element', 'Mineral', 'Component'] and span2.span_id not in seen_goldids:\n seen_goldids.add(span2.span_id)\n gold_spanins.append(span2)\n\n\n spanins = []\n seen_spanids = set()\n exceed_len_cases = 0 \n added_extra = 0 \n pseudo_positive_training = []\n for text_file, ann_file, corenlp_file in zip(text_files, ann_files, corenlp_files):\n\n doc = json.load(open(corenlp_file))\n\n for e1, e2 in extract_intrasent_entitypairs_from_text_file(text_file, ann_file, doc = doc, use_sys_ners = use_sys_ners, use_component = True):\n\n sentid = e1['sentid']\n if e1['label'] != 'Target' or e2['label'] not in ['Element', 'Mineral', 'Component']:\n continue\n\n sent_toks = [token['word'] for token in doc['sentences'][sentid]['tokens']]\n \n\n span2 = Span_Instance(e2['venue'], e2['year'], e2['docname'], e2['doc_start_char'], e2['doc_end_char'], e2['text'], 'Component', sent_toks = deepcopy(sent_toks), sentid = sentid, sent_start_idx = e2['sent_start_idx'], sent_end_idx = e2['sent_end_idx'])\n\n \n if e2['label'] in ['Element', 'Mineral', 'Component'] and span2.span_id not in seen_spanids :\n exceed = span2.insert_type_markers(tokenizer, max_len = max_len)\n span2.relation_label = 'Contains' if span2.span_id in seen_goldids else 'O'\n spanins.append(span2)\n seen_spanids.add(span2.span_id)\n exceed_len_cases += exceed\n\n\n\n \n if is_training:\n posins_ids = set([(s.venue, s.year, s.docname, s.sentid, s.std_text) for s in spanins if s.relation_label != 'O'])\n \n for s in spanins:\n if (s.venue, s.year, s.docname, s.sentid, s.std_text) in posins_ids:\n\n if s.relation_label == 'O':\n pseudo_positive_training.append(s)\n s.relation_label = 'Contains'\n\n\n\n print(f\"Generated {len(spanins)} extracted instances with {len([s for s in spanins if s.relation_label != 'O'])} positive, and {exceed_len_cases} of these exceed max_len\")\n print(f\"Generated {len(gold_spanins)} gold instances\")\n\n intersection = len(set([s.span_id for s in spanins]).intersection(seen_goldids))/len(seen_goldids)\n print(f\"{intersection*100:.2f}% gold spans are matched in the extracted spans\")\n \n if not exists(outdir):\n os.makedirs(outdir)\n\n outfile = join(outdir, f\"spanins.pkl\")\n print(f\"Saving to {outfile}\")\n with open(outfile, \"wb\") as f:\n pickle.dump(spanins, f)\n\n outfile = join(outdir, f\"gold_spanins.pkl\")\n print(f\"Saving the evaluation set to {outfile}\")\n with open(outfile, \"wb\") as f:\n pickle.dump(gold_spanins, f)\n\n print()", "title": "" }, { "docid": "8bf58e1b29cc6f152da54c44e75532de", "score": "0.49119505", "text": "def prepare_minibatch(mb: List[Example],\n tokenizer: BertTokenizer,\n max_length: int=512,\n device: str=None):\n cls_token_id = tokenizer.cls_token_id\n sep_token_id = tokenizer.sep_token_id\n pad_token_id = tokenizer.pad_token_id\n cls_token = torch.tensor([cls_token_id])#.to(device=device)\n sep_token = torch.tensor([sep_token_id])#.to(device=device)\n inputs = []\n exps = []\n labels = []\n position_ids = []\n for inst in mb:\n q = inst.query\n d = inst.tokens\n exp = inst.token_labels\n labels.append(inst.label)\n if len(q) + len(d) + 2 > max_length:\n # d = torch.Tensor(d[:(max_length - len(q) - 2)]).type_as(cls_token)\n # exp = torch.Tensor(exp[:(max_length - len(q) - 2)])\n d = d[:(max_length - len(q) - 2)]\n exp = exp[:(max_length - len(q) - 2)]\n # q = torch.Tensor(q).type_as(cls_token)\n # print(cls_token.__class__, q.__class__, exp.__class__)\n # print(cls_token.type(), q.type(), exp.type())\n inputs.append(torch.cat([cls_token, q, sep_token, d]))\n exps.append(torch.cat([torch.Tensor([0] * (len(q) + 2)), exp]))\n position_ids.append(torch.tensor(list(range(0, len(q) + 1)) + list(range(0, len(d) + 1)))) # tokens\n # positions are counted from 1, the two 0s are for [cls] and [sep], [pad]s are also nominated as pos 0\n\n inputs = PaddedSequence.autopad(inputs, batch_first=True, padding_value=pad_token_id, device=device)\n positions = PaddedSequence.autopad(position_ids, batch_first=True, padding_value=0, device=device)\n exps = PaddedSequence.autopad(exps, batch_first=True, padding_value=0, device=device)\n attention_masks = inputs.mask(on=1., off=-1000.).type(torch.float).to(device=device)\n padding_masks = inputs.mask(on=1., off=0.).type(torch.bool).to(device=device)\n labels = torch.LongTensor(labels).to(device=device)\n return inputs, exps, labels, positions, attention_masks, padding_masks\n\n\n #\n #\n # queries, documents, exps, labels = [], [], [], []\n # for inst in mb:\n # q = inst.query\n # queries.append(torch.Tensor(q))\n # documents.append(torch.Tensor(inst.tokens))\n # exps.append(torch.cat(torch.Tensor([0] * (len(q) + 2))inst.token_labels))\n # labels.append(torch.Tensor([inst.label]))\n #\n # return queries, documents, exps, labels", "title": "" }, { "docid": "ca3ed559d01bba92a07f02e3769812aa", "score": "0.49012294", "text": "def __init__(self):\n self.max_word_id = 3\n self.wmap_len = 0\n self.key2id = {}\n self.synchronize()\n self.reserved_keys = {'<unk> ': utils.UNK_ID,\n '<eps> ': utils.UNK_ID,\n '<epsilon> ': utils.UNK_ID,\n '<s> ': utils.GO_ID,\n '</s> ': utils.EOS_ID}", "title": "" }, { "docid": "4aec7d31aeb6500516ec2a8532b66f5b", "score": "0.4880331", "text": "def __init__(self, features, gcn=False, num_list=None, start_end_dict=None, pass_pseudo_id=False):\n\t\t\n\t\tsuper(MeanAggregator, self).__init__()\n\t\t\n\t\tself.features = features\n\t\tself.gcn = gcn\n\t\tself.num_list = torch.as_tensor(num_list)\n\t\tself.mask = None\n\t\tself.start_end_dict = start_end_dict\n\t\t\n\t\t# If the feature function comes from a graphsage encoder, use the cell_id * (bin_num+1) + bin_id as the bin_id\n\t\tself.pass_pseudo_id =pass_pseudo_id\n\t\t\n\t\tprint (\"pass_pseudo_id\", self.pass_pseudo_id)", "title": "" }, { "docid": "926567d1f3343fe5dd2cea120f920747", "score": "0.4857709", "text": "def __init__(self, corpus):\n self.unigramcount = collections.defaultdict(lambda: 0)\n self.biagramcount = collections.defaultdict(lambda: 0)\n self.totals = 0 # Number of tokens\n self.train(corpus)", "title": "" }, { "docid": "3c07d26ce211d6201989b40a999b4741", "score": "0.4846744", "text": "def __init__(self, input_file, tokenizer, dataset_name, num_classes):\n # TODO: read the input file line by line and put the lines in a list.\n\n # TODO: split the whole file (including both training and validation\n # data) into words and create the corresponding vocab dictionary.\n\n # TODO: create inputs and labels for both training and validation data\n # and make sure you pad your inputs.\n\n # Hint: remember to add start and pad to create inputs and labels\n self.lengths=[]\n self.labels=[]\n self.tokenizer=tokenizer\n self.pad_token=tokenizer(\"<pad>\")[\"input_ids\"][1]\n\n # Seems to already be inserting the cls token (0) and sep token (2)\n # during tokenization\n self.cls_token=tokenizer(\"<s>\")[\"input_ids\"][1]\n self.sep_token=tokenizer(\"</s>\")[\"input_ids\"][1]\n\n self.tokens=[]\n self.texts=[]\n multi_class_counts = [0] * 8\n binary_class_counts = [0] * num_classes\n\n with open(input_file,'r') as f:\n \n while True:\n line=f.readline()\n \n if not line:\n break\n\n tabbed = line.split(\"\\t\")\n text = tabbed[0]\n line_labels = tabbed[1]\n toke=self.tokenizer(text)['input_ids']\n num_emotions = 8\n prepped_label = [0] * num_emotions\n\n for i in range(num_emotions):\n if str(i+1) in line_labels:\n prepped_label[i] = 1\n multi_class_counts[i] += 1\n\n if (num_classes == 1):\n sentiment=convert_to_binary(prepped_label)\n if sentiment==None:\n #print(\"Throw this line out\")\n #print(prepped_label)\n continue\n else:\n self.labels.append([sentiment])\n binary_class_counts[0] += sentiment\n else:\n self.labels.append(prepped_label)\n self.texts.append(text)\n \n self.tokens.append(torch.as_tensor(toke))\n self.lengths.append(len(toke))\n \n self.labels=torch.as_tensor(self.labels)\n self.tokens=pad_sequence(self.tokens,batch_first=True,padding_value=self.pad_token)\n num_examples = len(self.tokens)\n if (num_classes == 1):\n class_counts = binary_class_counts\n else: \n class_counts = multi_class_counts\n\n self.pos_weights = calculate_pos_weights(class_counts, num_examples)\n # print(\"Size of tokens: \" + str(self.tokens.size()))\n # print(\"Size of labels: \" + str(self.labels.size()))\n # print(\"Size of lengths: \" + str(len(self.lengths)))\n # print(\"Some tokens\")\n # print(self.tokens[:5])\n # print(\"Some labels\")\n # print(self.labels[:5])\n # print(\"Some lengths\")\n # print(self.lengths[:5])\n # print(max(self.lengths))\n # self.masks=pad_sequence(self.masks,batch_first=True,padding_value=padding_value)", "title": "" }, { "docid": "589fb0c1bf1e8ffe0fb7731225fc697d", "score": "0.48186275", "text": "def __init__(self, features, sampler, am, is_softgate=True, cuda=False, gcn=False):\n\n super(MeanAggregator, self).__init__()\n\n self.features = features\n self.sampler = sampler\n self.is_softgate = is_softgate\n self.cuda = cuda\n self.gcn = gcn\n self.am = am", "title": "" }, { "docid": "dd71d189a57fbb8dd7b9b3a28b348d0c", "score": "0.47999626", "text": "def __init__(self,\n queries: Iterator[str],\n targets: Iterator[str],\n preprocessor: Tokenizer_nltk,\n batch_size: int = 64,\n num_negatives: int =100,\n reuse_negatives: bool = True,\n negativepool: Iterator[str] = np.array([]),\n init_word_to_index: Dict[str, int] = None,\n embedder_type: str = 'index'):\n self.preprocessor = preprocessor\n self.embeddertype = embedder_type\n self.batch_size = batch_size\n self.num_negatives = num_negatives\n self.reuse_negatives = reuse_negatives\n self.word_to_index = init_word_to_index.copy() if init_word_to_index is not None else {}\n\n # Extract and preprocess queries and targets\n self.queries_text = list(queries)\n self.targets_text = list(targets)\n\n assert len(queries) == len(targets)\n self.queries = self._preprocess(self.queries_text)\n self.targets = self._preprocess(self.targets_text)\n\n if len(negativepool) == 0:\n self.negative_text = list(set(self.targets_text))\n self.negativepool = self._preprocess(self.negative_text)\n else:\n self.negative_text = list(negativepool)\n self.negativepool = self._preprocess(self.negative_text)\n print('There are {} faq to do negative sampling'.format(len(self.negativepool)))\n\n # Initialize variables and create dataset by shuffling positives and sampling negatives\n self.num_examples = len(self.queries)\n self.num_batches = math.ceil(self.num_examples / self.batch_size)\n self.position = 0\n self.seed = 0\n self.negatives = []\n\n #self._recreate_dataset()", "title": "" }, { "docid": "834443481be35ea4730e0122e900eb98", "score": "0.47950938", "text": "def forward(self, tokens, lengths):\n tokens = tokens[:, : lengths.max()]\n # When using just one GPU this should not change behavior\n # but when splitting batches across GPU the tokens have padding\n # from the entire original batch\n mask = lengths_to_mask(lengths, device=tokens.device)\n\n # Run BERT model.\n word_embeddings = self.bert(tokens, mask)[0]\n \n # Average Pooling\n word_embeddings = mask_fill(\n 0.0, tokens, word_embeddings, self.tokenizer.padding_index\n )\n sentemb = torch.sum(word_embeddings, 1)\n sum_mask = mask.unsqueeze(-1).expand(word_embeddings.size()).float().sum(1)\n sentemb = sentemb / sum_mask\n \n return {\"logits\": self.classification_head(sentemb)}", "title": "" }, { "docid": "9b2e986ee774062676c2d8de95f3fd16", "score": "0.479204", "text": "def __init__(self,\n token_dict,\n bpe_rank):\n self.token_dict = token_dict\n self.token_dict_inv = {v: k for k, v in self.token_dict.items()}\n self.bpe_rank = bpe_rank\n self.byte_encoder = self.init_byte_encoder()\n self.byte_decoder = {v: k for k, v in self.byte_encoder.items()}\n self.token_pattern = re.compile(r\"'s|'t|'re|'ve|'m|'ll|'d| ?\\p{L}+| ?\\p{N}+| ?[^\\s\\p{L}\\p{N}]+|\\s+(?!\\S)|\\s+\")\n self.cache = {}", "title": "" }, { "docid": "c3f885723fffb82cdf80f552fde0abd2", "score": "0.47532955", "text": "def __init__(self, supports_len, batch_size, num_nodes, his_len, d_in,\r\n max_diffusion_step, d_out, filter_type, bias_start=0.0, adpt_type=\"pam\"):\r\n super().__init__()\r\n if adpt_type == \"no_adpt\":\r\n self.num_matrices = supports_len * max_diffusion_step + 1\r\n else: # \"pam\" or \"random_embedding\"\r\n self.num_matrices = supports_len*max_diffusion_step + 2\r\n self.batch_size = batch_size\r\n self.his_len = his_len\r\n self.batch_size_t = batch_size * his_len\r\n self.d_in = d_in\r\n self.d_out = d_out\r\n self._num_nodes = num_nodes\r\n self._max_diffusion_step = max_diffusion_step\r\n self.weight = nn.Parameter(torch.FloatTensor(size=(d_in*self.num_matrices, d_out)))\r\n self.biases = nn.Parameter(torch.FloatTensor(size=(d_out,)))\r\n nn.init.xavier_normal_(self.weight.data, gain=1.414)\r\n nn.init.constant_(self.biases.data, val=bias_start)\r\n self.filter_type = filter_type\r\n self.adpt_type = adpt_type", "title": "" }, { "docid": "3a902b10cd71b6f4d33dd1a2aacf4dbd", "score": "0.4750279", "text": "def parallel_tokenizer(df):\n pool = mp.Pool(processes=4)\n df['tokenized_abs'] = pool.map(_tokenize_abstract, df['Abstract'])\n pool.terminate()\n return df", "title": "" }, { "docid": "80827826c1ab508f7964b58c2e54328e", "score": "0.47444448", "text": "def __init__(self):\n self._ngrams = {}", "title": "" }, { "docid": "58ee36ff98c470766416024c0c885771", "score": "0.473262", "text": "def filtering(pool,embedding='bert', model='bert_base_cased', max_seq_length=128,pooling='reduce_mean'):\n en = Encoder(embedding, model, max_seq_length) #load encoder\n result = dict()\n for key,value in pool.items():\n vector1 = en.encode([key], pooling)\n # a = vector1.reshape(1,-1)\n paraphrases = []\n for candidate in value:\n vector2 = en.encode([candidate], pooling)\n # b = vector2.reshape(1,-1)\n # cos_sim = arccos_similarity(vector1,vector2)\n cos_sim = get_similarity(vector1,vector2)\n print(cos_sim)\n if cos_sim > 0.5:\n paraphrases.append(candidate)\n result[key] = paraphrases\n return result", "title": "" }, { "docid": "74a89668d670b6068e4e8e50334f6ff4", "score": "0.47262183", "text": "def test_create_meanparallel(self):\n strategy = Strategy('MeanParallelStrategy')\n assert isinstance(strategy, MeanParallelStrategy)", "title": "" }, { "docid": "617e88e24fcb4eaa108af6b4a54fc698", "score": "0.47230762", "text": "def __init__(\n self, batch, idx_to_token, target_start_token_idx=27, target_end_token_idx=28\n ):\n self.batch = batch\n self.target_start_token_idx = target_start_token_idx\n self.target_end_token_idx = target_end_token_idx\n self.idx_to_char = idx_to_token", "title": "" }, { "docid": "e2bfdeb292898674691e1dfdad86bdad", "score": "0.47109714", "text": "def __init__(self, **kwargs):\n\n super(NUPATNATPool, self).__init__()\n\n # Read/Write Attributes\n \n self._ip_type = None\n self._name = None\n self._last_updated_by = None\n self._last_updated_date = None\n self._address_range = None\n self._default_patip = None\n self._permitted_action = None\n self._description = None\n self._embedded_metadata = None\n self._end_address_range = None\n self._end_source_address = None\n self._entity_scope = None\n self._creation_date = None\n self._use_uplink_ip = None\n self._associated_gateway_id = None\n self._associated_gateway_type = None\n self._associated_subnet_id = None\n self._associated_vlan_id = None\n self._start_address_range = None\n self._start_source_address = None\n self._owner = None\n self._external_id = None\n self._dynamic_source_enabled = None\n \n self.expose_attribute(local_name=\"ip_type\", remote_name=\"IPType\", attribute_type=str, is_required=False, is_unique=False, choices=[u'DUALSTACK', u'IPV4', u'IPV6'])\n self.expose_attribute(local_name=\"name\", remote_name=\"name\", attribute_type=str, is_required=True, is_unique=False)\n self.expose_attribute(local_name=\"last_updated_by\", remote_name=\"lastUpdatedBy\", attribute_type=str, is_required=False, is_unique=False)\n self.expose_attribute(local_name=\"last_updated_date\", remote_name=\"lastUpdatedDate\", attribute_type=str, is_required=False, is_unique=False)\n self.expose_attribute(local_name=\"address_range\", remote_name=\"addressRange\", attribute_type=str, is_required=False, is_unique=False)\n self.expose_attribute(local_name=\"default_patip\", remote_name=\"defaultPATIP\", attribute_type=str, is_required=False, is_unique=False)\n self.expose_attribute(local_name=\"permitted_action\", remote_name=\"permittedAction\", attribute_type=str, is_required=False, is_unique=False, choices=[u'ALL', u'DEPLOY', u'EXTEND', u'INSTANTIATE', u'READ', u'USE'])\n self.expose_attribute(local_name=\"description\", remote_name=\"description\", attribute_type=str, is_required=False, is_unique=False)\n self.expose_attribute(local_name=\"embedded_metadata\", remote_name=\"embeddedMetadata\", attribute_type=list, is_required=False, is_unique=False)\n self.expose_attribute(local_name=\"end_address_range\", remote_name=\"endAddressRange\", attribute_type=str, is_required=False, is_unique=False)\n self.expose_attribute(local_name=\"end_source_address\", remote_name=\"endSourceAddress\", attribute_type=str, is_required=False, is_unique=False)\n self.expose_attribute(local_name=\"entity_scope\", remote_name=\"entityScope\", attribute_type=str, is_required=False, is_unique=False, choices=[u'ENTERPRISE', u'GLOBAL'])\n self.expose_attribute(local_name=\"creation_date\", remote_name=\"creationDate\", attribute_type=str, is_required=False, is_unique=False)\n self.expose_attribute(local_name=\"use_uplink_ip\", remote_name=\"useUplinkIP\", attribute_type=bool, is_required=False, is_unique=False)\n self.expose_attribute(local_name=\"associated_gateway_id\", remote_name=\"associatedGatewayId\", attribute_type=str, is_required=False, is_unique=False)\n self.expose_attribute(local_name=\"associated_gateway_type\", remote_name=\"associatedGatewayType\", attribute_type=str, is_required=False, is_unique=False, choices=[u'AUTO_DISC_GATEWAY', u'GATEWAY', u'IKE_GATEWAY', u'NSGATEWAY'])\n self.expose_attribute(local_name=\"associated_subnet_id\", remote_name=\"associatedSubnetId\", attribute_type=str, is_required=False, is_unique=False)\n self.expose_attribute(local_name=\"associated_vlan_id\", remote_name=\"associatedVlanId\", attribute_type=str, is_required=False, is_unique=False)\n self.expose_attribute(local_name=\"start_address_range\", remote_name=\"startAddressRange\", attribute_type=str, is_required=False, is_unique=False)\n self.expose_attribute(local_name=\"start_source_address\", remote_name=\"startSourceAddress\", attribute_type=str, is_required=False, is_unique=False)\n self.expose_attribute(local_name=\"owner\", remote_name=\"owner\", attribute_type=str, is_required=False, is_unique=False)\n self.expose_attribute(local_name=\"external_id\", remote_name=\"externalID\", attribute_type=str, is_required=False, is_unique=True)\n self.expose_attribute(local_name=\"dynamic_source_enabled\", remote_name=\"dynamicSourceEnabled\", attribute_type=bool, is_required=False, is_unique=False)\n \n\n # Fetchers\n \n \n self.nat_map_entries = NUNATMapEntriesFetcher.fetcher_with_object(parent_object=self, relationship=\"child\")\n \n \n self.address_maps = NUAddressMapsFetcher.fetcher_with_object(parent_object=self, relationship=\"child\")\n \n \n self.permissions = NUPermissionsFetcher.fetcher_with_object(parent_object=self, relationship=\"child\")\n \n \n self.metadatas = NUMetadatasFetcher.fetcher_with_object(parent_object=self, relationship=\"child\")\n \n \n self.global_metadatas = NUGlobalMetadatasFetcher.fetcher_with_object(parent_object=self, relationship=\"child\")\n \n \n self.enterprise_permissions = NUEnterprisePermissionsFetcher.fetcher_with_object(parent_object=self, relationship=\"child\")\n \n \n self.statistics = NUStatisticsFetcher.fetcher_with_object(parent_object=self, relationship=\"child\")\n \n \n self.statistics_policies = NUStatisticsPoliciesFetcher.fetcher_with_object(parent_object=self, relationship=\"child\")\n \n \n self.bulk_statistics = NUBulkStatisticsFetcher.fetcher_with_object(parent_object=self, relationship=\"child\")\n \n\n self._compute_args(**kwargs)", "title": "" }, { "docid": "4e5fd978cd01678c27901ae4fef2b4b7", "score": "0.4701279", "text": "def __init__(self, tokens, ctx):\n assert len(tokens) == 1\n self.patt = tokens[0].content\n self.patt = re.sub(' +', ' ', self.patt)\n self.patt = re.sub('\\n+', '\\n', self.patt)\n self.patt = re.sub('\\n $', '\\n', self.patt)\n self.patt = re.sub('^ \\n', '\\n', self.patt)\n self.patt = re.escape(self.patt)\n self.patt = re.sub('\\\\\\n\\\\\\ ', '\\n *', self.patt)\n self.patt = re.sub('[^\\n] ', ' +', self.patt)\n self.patt = re.sub('^\\\\\\\\\\n', '\\n*', self.patt)\n self.patt = re.sub('\\\\\\\\\\n$', '\\n*', self.patt)\n self.patt = re.sub('(.)\\\\\\\\\\n(.)', lambda x: x.group(1) + '\\n+' + x.group(2), self.patt)", "title": "" }, { "docid": "bbfcef63310231f2931de0af9bd81bd3", "score": "0.47010666", "text": "def assemble(self, prediction):\n\n raise NotImplementedError()", "title": "" }, { "docid": "07dbb2b846acaced4abb111f5807316f", "score": "0.47007063", "text": "def __init__(self, config: GPTConfig):\n super().__init__()\n self.config = config\n tp = config.execution.tensor_parallel_1\n dp = config.execution.data_parallel\n self.replica_grouping = popxl.gcg().ir.replica_grouping(stride=tp, group_size=dp)\n # identical\n self.ln_f = LayerNorm()", "title": "" }, { "docid": "94888b1327d94726d69fa54d027a9d03", "score": "0.46920502", "text": "def __init__(self, num_classes:int, num_points:int, use_input_transform:bool,\n use_feature_transform:bool):\n super(PointNetSemanticSegmentation, self).__init__()\n\n self.encoder = PointNetExtractor([64,64],[64,128,1024],\n use_input_transform, \n use_feature_transform,\n return_transform_features=[1])\n\n conv = nn.Conv1d(128, num_classes, 1)\n nn.init.zeros_(conv.bias)\n nn.init.xavier_uniform_(conv.weight)\n self.decoder = nn.Sequential(\n Conv1DModule(1088, 512),\n Conv1DModule(512, 256),\n Conv1DModule(256, 128),\n Conv1DModule(128, 128),\n conv\n )\n\n self.num_classes = num_classes\n self.num_points = num_points", "title": "" }, { "docid": "59c6a821c7797de71861e754e9d1891e", "score": "0.46915516", "text": "def __init__(self, order=1, fullyConnected=False, nIterations=100, topK=1):\n BaseTokenClassifier.__init__(self, 'mallet', topK)\n self.simpleTagger = 'java -Xmx2g -cp ' + self.classpath \\\n + ' cc.mallet.fst.SimpleTagger'\n self.crfOrder = order\n if fullyConnected:\n self.connectedOption = 'true'\n else:\n self.connectedOption = 'false'\n self.nIterations = nIterations", "title": "" }, { "docid": "d6cd302ab52bed31386b59b13f3c6b8d", "score": "0.4683276", "text": "def __init__(self):\n self.words = []\n self.idx_words = []\n self.label = []\n self.sent_leng = 0", "title": "" }, { "docid": "12ab544fbee116687e9c550b4f512f19", "score": "0.46806967", "text": "def __init__(self):\n super(NaiveAverage, self).__init__()", "title": "" }, { "docid": "9e3610f5c614a7d5a1c4a49696ff50b1", "score": "0.4661992", "text": "def ocl_run(self):\n self.input_offset.unmap() # we will use input_offset\n return super(GDMaxPooling, self).ocl_run()", "title": "" }, { "docid": "d8a635d51775d2ec9e46ef624cc1e621", "score": "0.46529335", "text": "def _add_normalization(valindex, token, norm_map):\n AnnToken = collections.namedtuple(\"AnnToken\", \"words,units,normalization,valindex\")\n cur_words = []\n cur_norms = []\n for w in token.words:\n if w in norm_map:\n cur_norms.append(w)\n else:\n cur_words.append(w)\n return AnnToken(cur_words, token.units, cur_norms, valindex)", "title": "" }, { "docid": "4768e51695cbc60812144adf1411682b", "score": "0.46512276", "text": "def nltk_annotate(self, text, mode):\n if mode == 'word_tokenize':\n return nltk.word_tokenize(text)\n elif mode == 'pos_tag':\n return nltk.pos_tag(text)\n elif mode == 'ner':\n return nltk.ne_chunk(nltk.pos_tag(nltk.word_tokenize(text)))", "title": "" }, { "docid": "aa0492c58a7a359e07b076a11e623995", "score": "0.46509948", "text": "def preprocess(self):", "title": "" }, { "docid": "417f160968b5ee4f2d9059f1fb4fb65c", "score": "0.46447995", "text": "def __init__(self):\n self.text_normalizator = TEXT_NORM_FUNC", "title": "" }, { "docid": "9c6bc9be62a65bd183e2e5c0563baf91", "score": "0.4640304", "text": "def __call__(self,chain,n_token=None):\n \n # TODO: inputs longer than self.model.config.max_position_embeddings yield an error. The workaround here is to truncate the input with [:,:self.model.config.max_position_embeddings], but then not all tokens receive their paradigm. A more consistent solution is needed.\n\n if isinstance(n_token,int):\n if n_token>=min(chain.len, self.model.config.max_position_embeddings):\n raise Exception(f\"The toke number should be within the range of the chain length (< {min(chain.len, self.model.config.max_position_embeddings)})\")\n sent_mask = chain.mask(n_token)\n input = self.bert_tokenizer(sent_mask)\n outputs = self.model(input[\"input_ids\"][:,:self.model.config.max_position_embeddings])\n parad_logits = outputs.logits[0, n_token]\n logits_positif = tf.nn.relu(parad_logits)\n probs, norms = tf.linalg.normalize(logits_positif, ord=1)\n\n non_zeroes = tf.math.count_nonzero(probs)\n\n parad_data = tf.math.top_k(probs, k = non_zeroes.numpy(), sorted=True)\n\n parad = Paradigm(\n parad_data.indices,\n parad_data.values,\n non_zeroes,self.decoder, chain.semiotic)\n\n return parad\n\n else:\n sent_mask = [chain.mask(n) for n in range(chain.len)]\n input = self.bert_tokenizer(sent_mask)\n outputs = self.model(input[\"input_ids\"][:,:self.model.config.max_position_embeddings])\n parad_logits = tf.gather_nd(outputs.logits, indices=[[i,i] for i in range(chain.len)])\n logits_positif = tf.nn.relu(parad_logits)\n probs, norms = tf.linalg.normalize(logits_positif, ord=1, axis=1)\n \n # Maybe its cheaper to compute top k for k = non_zero, which would differ from row to row, and hence not yeld a tensor. To be tested.\n\n non_zeroes = tf.math.count_nonzero(probs,axis=1)\n max_non_zeroes = max(non_zeroes).numpy()\n\n parad_data = tf.math.top_k(probs, k = max_non_zeroes, sorted=True)\n\n parads = [Paradigm(ids,values,nonzeroes,self.decoder, chain.semiotic) for ids,values,nonzeroes in zip(parad_data.indices,parad_data.values,non_zeroes)]\n\n for token,parad in zip(chain,parads):\n token.paradigm = parad", "title": "" }, { "docid": "e25bc043c8a3ec1a257f850b80fda762", "score": "0.46391967", "text": "def __init__(self, example_list, hps, vocab, score):\n self.pad_id = vocab.word2id(data.PAD_TOKEN) # id of the PAD token used to pad sequences\n\n self.init_encoder_seq(example_list, hps) # initialize the input to the encoder'''\n self.init_decoder_seq(example_list, hps) # initialize the input and targets for the decoder\n self.store_orig_strings(example_list) # store the original strings\n self.score = score", "title": "" }, { "docid": "d5aa92f2d32696e2edd77853c86c4342", "score": "0.46380487", "text": "def __call__(self, results: Dict[str, Any]):\n dataset_item = results.pop(\"dataset_item\") # Prevent unnessary deepcopy\n labels = results[\"ann_info\"][\"labels\"]\n\n ann_info = get_annotation_mmseg_format(dataset_item, labels, self.use_otx_adapter)\n\n results[\"gt_semantic_seg\"] = ann_info[\"gt_semantic_seg\"]\n results[\"seg_fields\"].append(\"gt_semantic_seg\")\n\n return results", "title": "" }, { "docid": "fc1cd46d61db435a9beca6475b83849d", "score": "0.46343052", "text": "def train(self, corpus): \n for sentence in corpus.corpus: # iterate over sentences in the corpus\n ngram = ['</s>'] * (self.N-1) # list with N-1 previous tokens\n for token in sentence: # iterate over tokens in the sentence\n ngram.append(token)\n #prefix, suffix = ngram[:-1], ngram[1:] # N-1 prefix and suffix list of tokens\n for n in range(self.N) :\n key = ' '.join(ngram[self.N-n-1:]) # construct k for ngram counting\n self.ngrams[n][key] += 1\n self.total += 1\n ngram = ngram[1:] # list with last N-1 tokens\n \n self.V = len(self.ngrams[0]) # vocabulary size \n \n #probability calculations, ngram\n if self.N == 1: #special case for unigrams\n den = math.log10(self.total + self.V) \n for ug,count in self.ngrams[0].items():\n \t\tself.f[0][ug] = math.log10(count + 1) - den \n \tself.f[0][\"<UNK>\"] = - math.log10(self.total+self.V) # OOV modeling \t \n else:\n \tfor key,count in self.ngrams[self.N-2].items(): # prefixes\n \t\tself.f[self.N-2][key] = - math.log10(count+self.V)\n \tfor ng,count in self.ngrams[self.N-1].items(): \n \tprefix = ' '.join(ng.split()[:-1])\n \t\tself.f[self.N-1][ng] = math.log10(count+1) + self.f[self.N-2][prefix] \n \tself.f[self.N-1][\"<UNK>\"] = - math.log10(self.V) # OOV modeling \t\t\t\t", "title": "" }, { "docid": "43d82c48e396355e9557ed215d58150f", "score": "0.46337268", "text": "def get_algorithm_name():\n return \"NN\"", "title": "" }, { "docid": "eb5a8ec8f272f68a7e197d4be2134f17", "score": "0.46330968", "text": "def __init__(\n self,\n metric,\n note_tokens: Sequence[Sequence[Mapping[str, Union[str, int]]]],\n note_spans: Sequence[Sequence[Mapping[str, Union[str, int]]]],\n label_mapper_list: Sequence,\n post_processor,\n note_level_aggregator,\n notation: str,\n mode: str,\n confusion_matrix: bool = False,\n format_results: bool = True\n ) -> NoReturn:\n self._metric = metric\n self._note_tokens = note_tokens\n self._note_spans = note_spans\n self._label_mapper_list = label_mapper_list\n self._note_level_aggregator = note_level_aggregator\n self._notation = notation\n self._scheme = MetricsCompute.get_scheme(self._notation)\n self._mode = mode\n self._post_processor = post_processor\n self._confusion_matrix = confusion_matrix\n self._format_results = format_results", "title": "" }, { "docid": "a46eb939bbcaa918e37789c8e18a86f1", "score": "0.4632452", "text": "def __init__(self, entityType, sentenceFilter):\n BaseMentionClusterer.__init__(self, entityType, sentenceFilter, useDetected=False)", "title": "" }, { "docid": "940d7838d2d5441f7d1cfa8461c34fef", "score": "0.46267042", "text": "def preprocess_corpus(train_sents):\r\n# global st\r\n# nretagsdict = defaultdict(list)\r\n# st = StanfordNERTagger(\r\n# 'C:/Users/Srishti/Desktop/nlp/cse538-assignment-2 (2018-Fall)_V2/Assignment2_for_students/stanford-ner-2018-02-27/classifiers/english.all.3class.distsim.crf.ser.gz',\r\n# 'C:/Users/Srishti/Desktop/nlp/cse538-assignment-2 (2018-Fall)_V2/Assignment2_for_students/stanford-ner-2018-02-27/stanford-ner.jar',\r\n# encoding='utf-8')\r\n#\r\n# for train_sent in train_sents:\r\n# sentence = construct_sentence(train_sent).decode('utf-8').replace(\" \\'\", \"\\'\")\r\n# tokenized_text = word_tokenize(sentence)\r\n# classified_text = st.tag(tokenized_text)\r\n# nretagsdict[sentence] = classified_text\r\n# with open('nertags.pickle', 'wb') as handle:\r\n# pickle.dump(nretagsdict, handle, protocol=pickle.HIGHEST_PROTOCOL)\r\n global n\r\n n = 35\r\n with open(\"exout.txt\") as f:\r\n content = f.readlines()\r\n for x in content:\r\n c = x.split()\r\n clustdict[c[0]] = c[1]\r\n# #wordfreq[c[0]] = c[2]\r\n \r\n pass", "title": "" }, { "docid": "dfdb6b49b4f97a93108f848cff39001d", "score": "0.4620878", "text": "def __init__(self,toks, boost, group_name='Unknown group'):\n\n ### Instantiate, and set to None, attribute placeholder for which\n ### attacking group is targeting this group.\n ###\n ### N.B. this attribuite may be set during the target selection\n ### phase, and cleared at the end of the attacking phase\n self.clear_attacker()\n\n ### Save original line, assume toks are single-spaced\n ### Save group name\n self.original = ' '.join(toks)\n self.group_name = group_name\n\n ### Parse toks (tokens) list from back end using .pop() e.g.\n ### ... with an attack that does 8 cold damage at initiative 10\n\n self.initiative = int(toks.pop())\n assert 'initiative' == toks.pop()\n assert 'at' == toks.pop()\n assert 'damage' ==toks.pop()\n self.attack_type = toks.pop()\n assert self.attack_type in GROUP.st_attacks\n self.unit_attack_damage = int(toks.pop()) + boost\n assert 'does' == toks.pop()\n assert 'that' == toks.pop()\n assert 'attack' == toks.pop()\n assert 'an' == toks.pop()\n assert 'with' == toks.pop()\n\n ### Reverse tokens to parse from front end using .pop() e.g.\n ### 18 units each with 729 hit points ...\n\n toks.reverse()\n\n self.units = int(toks.pop())\n assert 'units' == toks.pop()\n assert 'each' == toks.pop()\n assert 'with' == toks.pop()\n self.hp = int(toks.pop())\n assert 'hit' == toks.pop()\n assert 'points' == toks.pop()\n\n ### Initialize all damage multipliers to 1\n self.dt_iw_multiples = dict([(s,1,) for s in GROUP.st_attacks])\n\n ### Parse middle section immunities and/or weaknesses\n ### .. (weak to fire; immune to cold, slashing) ...\n ###\n ### N.B. immunity and weakness keywords tokens are always trailed\n ### by a single character that is not part of the keyword e.g\n ### \"fire;\" or \"cold,\" or \"slashing)\"\n\n ### Set immunity/weakness multiple to None to ensure parsed data\n ### syntax and grammar is correct\n iw_multiple = None\n\n ### Loop over reversed tokens i.e. from end of list, which is also\n ### the start of the tokens originally supplied\n while toks:\n\n tok = toks.pop()\n\n if tok.endswith('weak'):\n\n ### - Weaknesses double the damage of the given attack types\n\n assert iw_multiple is None\n iw_multiple = 2\n assert 'to' == toks.pop()\n continue\n\n if tok.endswith('immune'):\n\n ### - Immunities avoid all damage of the given attack types\n\n assert iw_multiple is None\n iw_multiple = 0\n assert 'to' == toks.pop()\n continue\n\n ### - Attack types (st_attacks); set multiple per last [immune to]\n ### or [weak to] tokens parsed\n\n assert tok[:-1] in GROUP.st_attacks\n assert not (iw_multiple is None)\n self.dt_iw_multiples[tok[:-1]] = iw_multiple\n\n ### - Clear iw multiple if a semi-colon is encountered\n if tok.endswith(';'): iw_multiple = None\n\n ### End of [while toks:] loop", "title": "" }, { "docid": "54962cbb5c7d46562823a3010edda6be", "score": "0.46145827", "text": "def __init__(self, tokens):\n self.tokens = tokens", "title": "" }, { "docid": "84ee5cfba5b293f2028b131e1a2eaa4e", "score": "0.46112132", "text": "def __init__(self, **kwargs):\n\n super(NUCSNATPool, self).__init__()\n\n # Read/Write Attributes\n \n self._name = None\n self._end_address = None\n self._start_address = None\n \n self.expose_attribute(local_name=\"name\", remote_name=\"name\", attribute_type=str, is_required=True, is_unique=False)\n self.expose_attribute(local_name=\"end_address\", remote_name=\"endAddress\", attribute_type=str, is_required=True, is_unique=False)\n self.expose_attribute(local_name=\"start_address\", remote_name=\"startAddress\", attribute_type=str, is_required=True, is_unique=False)\n \n\n # Fetchers\n \n \n self.c_translation_maps = NUCTranslationMapsFetcher.fetcher_with_object(parent_object=self, relationship=\"child\")\n \n\n self._compute_args(**kwargs)", "title": "" }, { "docid": "1a6e0c7b0e9a54727ca66df4b39fe201", "score": "0.4608508", "text": "def __init__(self, config: DollyConfig):\n super().__init__()\n self.config = config\n tp = config.execution.tensor_parallel\n dp = config.execution.data_parallel\n self.replica_grouping = popxl.gcg().ir.replica_grouping(stride=tp, group_size=dp)\n # identical\n self.ln_f = LayerNorm()\n shard_size = ceil(self.config.model.embedding.vocab_size / tp)\n self.head = Linear(shard_size, replica_grouping=self.replica_grouping, bias=False)", "title": "" }, { "docid": "4b64c7f4a9e97fecbf9284d470efb26f", "score": "0.46074674", "text": "def __init__(self) :\n\t\trospy.init_node(\"DWM1001_POS_Calculator_{}\".format(random.randint(0,100000)), anonymous=False)\n\t\t#Number of anchors in the network\n\t\tself.num_anchors = 4\n\t\t#Anchors' positions\n\t\tself.anchor_pos = [[47.0,-257.0],[47.0,195.0],[-598.0,197.0],[-598.0,-234.0]]\n\t\tself.anchor_pos = np.array(self.anchor_pos)\n\t\t\n\t\t#Vectors to store tag position mean and distance to each anchor\n\t\tself.mean_pos = [0,0]\n\t\tself.anchor2tag_dist = [0,0,0,0]\n\n\t\t# Empty dictionary to store topics being published\n\t\tself.topics = {}\n\n\t\t## Get raw distances to anchors\n\t\tself.d1_sub = message_filters.Subscriber(\"/utuTIERS/tag/0/to/anchor/1/distance\",Float64)\n\t\tself.d2_sub = message_filters.Subscriber(\"/utuTIERS/tag/0/to/anchor/2/distance\",Float64)\n\t\tself.d3_sub = message_filters.Subscriber(\"/utuTIERS/tag/0/to/anchor/3/distance\",Float64)\n\t\tself.d4_sub = message_filters.Subscriber(\"/utuTIERS/tag/0/to/anchor/4/distance\",Float64)\n\t\tats = message_filters.ApproximateTimeSynchronizer([self.d1_sub,self.d2_sub,self.d3_sub,self.d4_sub], queue_size=10, slop=0.1,allow_headerless=True)\n\t\tats.registerCallback(self.dist_cb)", "title": "" }, { "docid": "150093d4bf01de119e1fd5b73aeedb0e", "score": "0.46040753", "text": "def _align_nn(self, model_path, source_kb, target_kb, candidate_selector, cuda_device, batch_size=128):\n from emma.allennlp_classes.ontoemma_dataset_reader import OntologyMatchingDatasetReader\n from emma.allennlp_classes.ontoemma_model import OntoEmmaNN\n from emma.allennlp_classes.ontoemma_predictor import OntoEmmaPredictor\n\n alignment, s_ent_ids, t_ent_ids = self._align_string_equiv(source_kb, target_kb, candidate_selector)\n sys.stdout.write(\"%i alignments with string equivalence\\n\" % len(alignment))\n\n if cuda_device > 0:\n with device(cuda_device):\n archive = load_archive(model_path, cuda_device=cuda_device)\n else:\n archive = load_archive(model_path, cuda_device=cuda_device)\n\n predictor = Predictor.from_archive(archive, 'ontoemma-predictor')\n\n sys.stdout.write(\"Making predictions...\\n\")\n s_ent_tqdm = tqdm.tqdm(s_ent_ids,\n total=len(s_ent_ids))\n sim_scores = dict()\n\n if cuda_device > 0:\n with device(cuda_device):\n batch_json_data = []\n\n for s_ent_id in s_ent_tqdm:\n s_ent = source_kb.get_entity_by_research_entity_id(s_ent_id)\n for t_ent_id in candidate_selector.select_candidates(s_ent_id)[:constants.KEEP_TOP_K_CANDIDATES]:\n t_ent = target_kb.get_entity_by_research_entity_id(t_ent_id)\n json_data = {\n 'source_ent': s_ent.form_json(),\n 'target_ent': t_ent.form_json(),\n 'label': 0\n }\n batch_json_data.append(json_data)\n\n if len(batch_json_data) == batch_size:\n results = predictor.predict_batch_json(batch_json_data, cuda_device)\n for model_input, output in zip(batch_json_data, results):\n sim_scores[(\n model_input['source_ent']['research_entity_id'],\n model_input['target_ent']['research_entity_id']\n )] = output['score'][0]\n batch_json_data = []\n\n # finish last batch\n if batch_json_data:\n results = predictor.predict_batch_json(batch_json_data, cuda_device)\n for model_input, output in zip(batch_json_data, results):\n sim_scores[(\n model_input['source_ent']['research_entity_id'],\n model_input['target_ent']['research_entity_id']\n )] = output['score'][0]\n else:\n for s_ent_id in s_ent_tqdm:\n s_ent = source_kb.get_entity_by_research_entity_id(s_ent_id)\n for t_ent_id in candidate_selector.select_candidates(s_ent_id)[:constants.KEEP_TOP_K_CANDIDATES]:\n t_ent = target_kb.get_entity_by_research_entity_id(t_ent_id)\n json_data = {\n 'source_ent': s_ent.form_json(),\n 'target_ent': t_ent.form_json(),\n 'label': 0\n }\n output = predictor.predict_json(json_data, cuda_device)\n sim_scores[(\n json_data['source_ent']['research_entity_id'],\n json_data['target_ent']['research_entity_id']\n )] = output['score'][0]\n\n return sim_scores", "title": "" }, { "docid": "dcaa5347ab1de09e860cbcc64fc620dc", "score": "0.4601753", "text": "def __init__(self, s1, s2, labels, allpostags, allners):\n\n self.s1 = s1\n self.s2 = s2\n self.labels = labels\n self.allpostags = allpostags\n self.allners = allners\n\n # Setting the token variables\n self.s1tokens = s1.tokens\n self.s2tokens = s2.tokens\n\n # Get the word mapping features\n self.get_word_mapping_features()\n\n # Get the word deletion featurse for sent1\n\n self.get_word_del_featuers() # deletion features for both the sentences are handled here\n # Get edge mapping features\n\n\n # Get edge deletion featurs for sent1\n\n\n # Get edge deletion featurse for sent 2", "title": "" }, { "docid": "a69c9461e4c41f9ab92d25447dffb689", "score": "0.45994312", "text": "def _dynamic_padding(self, batch_data):\n\n\t\tpad_id_t = self.token_vocab.get_id(self.token_vocab.pad_token)\n\t\tpad_id_f = self.flag_vocab.get_id(self.flag_vocab.pad_token)\n\t\tpad_id_e = self.flag_vocab.get_id(self.elmo_vocab.pad_token)\n\t\tpad_id_c = self.char_vocab.get_id(self.char_vocab.pad_token)\n\t\tpad_p_len = min(self.p_max_tokens_len, max(batch_data['article_tokens_len']))\n\t\tpad_q_len = min(self.q_max_tokens_len, max(batch_data['question_tokens_len']))\n\n\t\tbatch_data['article_token_ids'] = [(ids + [pad_id_t] * (pad_p_len - len(ids)))[: pad_p_len]\n\t\t\t\t\t\t\t\t\t\t for ids in batch_data['article_token_ids']]\n\t\tbatch_data['question_token_ids'] = [(ids + [pad_id_t] * (pad_q_len - len(ids)))[: pad_q_len]\n\t\t\t\t\t\t\t\t\t\t\tfor ids in batch_data['question_token_ids']]\n\n\t\tbatch_data['article_flag_ids'] = [(ids + [pad_id_f] * (pad_p_len - len(ids)))[: pad_p_len]\n\t\t\t\t\t\t\t\t\t\t for ids in batch_data['article_flag_ids']]\n\t\tbatch_data['question_flag_ids'] = [(ids + [pad_id_f] * (pad_q_len - len(ids)))[: pad_q_len]\n\t\t\t\t\t\t\t\t\t\t for ids in batch_data['question_flag_ids']]\n\n\t\tbatch_data['article_elmo_ids'] = [(ids + [pad_id_e] * (pad_p_len - len(ids)))[: pad_p_len]\n\t\t\t\t\t\t\t\t\t\t for ids in batch_data['article_elmo_ids']]\n\t\tbatch_data['question_elmo_ids'] = [(ids + [pad_id_e] * (pad_q_len - len(ids)))[: pad_q_len]\n\t\t\t\t\t\t\t\t\t\t for ids in batch_data['question_elmo_ids']]\n\t\tpad_p_token_len = self.p_token_max_len\n\t\tpad_q_token_len = self.q_token_max_len\n\t\tif self.use_char_emb:\n\t\t\tbatch_data['article_c_len'] = []\n\t\t\tfor article in batch_data['article_char_ids']:\n\t\t\t\tbatch_data['article_c_len'] += [len(token) for token in article] + [0] * (pad_p_len - len(article))\n\n\t\t\tbatch_data['question_c_len'] = []\n\t\t\tfor question in batch_data['question_char_ids']:\n\t\t\t\tbatch_data['question_c_len'] += [len(token) for token in question] + [0] * (pad_q_len - len(question))\n\n\t\t\tpad_p_token_len = min(self.p_token_max_len, max(batch_data['article_c_len']))\n\t\t\tpad_q_token_len = min(self.q_token_max_len, max(batch_data['question_c_len']))\n\n\t\t\tbatch_data['article_char_ids'] = [\n\t\t\t\t([(ids + [pad_id_c] * (pad_p_token_len - len(ids)))[:pad_p_token_len] for ids in tokens] + [\n\t\t\t\t\t[pad_id_c] * pad_p_token_len] * (pad_p_len - len(tokens)))[:pad_p_len] for tokens\n\t\t\t\tin batch_data['article_char_ids']]\n\n\t\t\tbatch_data['question_char_ids'] = [\n\t\t\t\t([(ids + [pad_id_c] * (pad_q_token_len - len(ids)))[:pad_q_token_len] for ids in tokens] + [\n\t\t\t\t\t[pad_id_c] * pad_q_token_len] * (pad_p_len - len(tokens)))[:pad_q_len] for tokens\n\t\t\t\tin batch_data['question_char_ids']]\n\t\t# print(len(batch_data))\n\t\treturn batch_data, pad_p_len, pad_q_len, pad_p_token_len, pad_q_token_len", "title": "" }, { "docid": "b940d5524e3667dea1612569aa56ade5", "score": "0.4591235", "text": "def convert_pooling(node, **kwargs):\n from onnx.helper import make_node\n name, input_nodes, attrs = get_inputs(node, kwargs)\n\n kernel = convert_string_to_list(attrs.get('kernel', '()'))\n pool_type = attrs.get('pool_type', 'max')\n global_pool = attrs.get('global_pool', 'False')\n global_pool = global_pool in ['True', '1']\n _ = attrs.get('cudnn_off', 'False')\n pooling_convention = attrs.get('pooling_convention', 'valid')\n stride = convert_string_to_list(attrs.get('stride', '()'))\n pad = convert_string_to_list(attrs.get('pad', '()'))\n p_value = attrs.get('p_value', '0')\n if p_value != 'None':\n p_value = int(p_value)\n count_include_pad = attrs.get('count_include_pad', 'True')\n layout = attrs.get('layout', 'NCHW')\n\n if pooling_convention == 'same':\n raise NotImplementedError('Pooling currently does not support '\n 'pooling_convention==\\'same\\'')\n if pool_type == 'sum':\n raise NotImplementedError('Pooling currently does not support pool_type==\\'sum\\'')\n if pool_type == 'lp' and not global_pool and pooling_convention != 'valid':\n raise NotImplementedError('Pooling currently does not support '\n 'pooling_convention!=\\'valid\\' when pool_type==\\'lp\\' and global_pool==False')\n\n if layout not in ['NCHW', 'NCDHW']:\n raise NotImplementedError('Pooling currently does not support layout not in '\n '[\\'NCHW\\', \\'NCDHW\\']')\n\n kwargs_ = {}\n if kernel:\n kwargs_['kernel_shape'] = tuple(kernel)\n if pad:\n kwargs_['pads'] = tuple(pad) + tuple(pad)\n if stride:\n kwargs_['strides'] = stride\n\n ceil_mode = 1 if pooling_convention == 'full' else 0\n count_include_pad = 1 if count_include_pad == 'True' else 0\n\n nodes = []\n if pool_type == 'avg' and not global_pool:\n nodes += [\n make_node('AveragePool', [input_nodes[0]], [name], ceil_mode=ceil_mode,\n count_include_pad=count_include_pad, **kwargs_)\n ]\n elif pool_type == 'max' and not global_pool:\n nodes += [\n make_node('MaxPool', [input_nodes[0]], [name], ceil_mode=ceil_mode, **kwargs_)\n ]\n elif pool_type == 'lp' and not global_pool:\n nodes += [\n make_node('LpPool', [input_nodes[0]], [name], p=p_value, **kwargs_)\n ]\n elif pool_type == 'avg' and global_pool:\n nodes += [\n make_node('GlobalAveragePool', [input_nodes[0]], [name])\n ]\n elif pool_type == 'max' and global_pool:\n nodes += [\n make_node('GlobalMaxPool', [input_nodes[0]], [name])\n ]\n elif pool_type == 'lp' and global_pool:\n nodes += [\n make_node('GlobalLpPool', [input_nodes[0]], [name], p=p_value)\n ]\n else:\n raise NotImplementedError('Unknown pool_type in Pooling')\n\n return nodes", "title": "" }, { "docid": "7fd9ddaebbff6017fdf42f2948bde6be", "score": "0.45896894", "text": "def model_api(self, input_data):\n output = \"\"\n for sentence in sent_tokenize(input_data):\n if self.char_emb:\n tokens = self._char_emb_format(sentence)\n else:\n tokens = sentence # word-level emb\n \n normalized = self.normalizer.predict(tokens)\n\n if self.char_emb:\n normalized = normalized.replace(' ', '') \\\n .replace('<space>', ' ')\n\n output += normalized\n return output", "title": "" }, { "docid": "e414954912c06f0c73894f9928c19f78", "score": "0.45874992", "text": "def __init__(self):\n\t\t#change 3rd param\n\t\tself.neural = ANN(48*48,7,2,1200,10,0.5)", "title": "" }, { "docid": "dd310285571d2ab849206a677dff0521", "score": "0.45871276", "text": "def __init__(self, features, sampler, is_softgate=True, cuda=True, gcn=False):\n\n super(MeanAggregator, self).__init__()\n\n self.features = features\n self.sampler = sampler\n self.is_softgate = True\n self.cuda = cuda\n self.gcn = gcn", "title": "" }, { "docid": "fb1942e236ff314f6463433f8f136275", "score": "0.45804596", "text": "def encode(\r\n self, sentence: str, *addl_sentences, no_separator=False\r\n ) -> torch.LongTensor:\r\n bpe_sentence = \"<s> \" + self.bpe.encode(sentence) + \" </s>\"\r\n for s in addl_sentences:\r\n bpe_sentence += \" </s>\" if not no_separator else \"\"\r\n bpe_sentence += \" \" + self.bpe.encode(s) + \" </s>\"\r\n tokens = self.task.source_dictionary.encode_line(\r\n bpe_sentence, append_eos=False, add_if_not_exist=False\r\n )\r\n return tokens.long()", "title": "" }, { "docid": "958bc495d0d6b09bf89c91cccbfe9a72", "score": "0.45758593", "text": "def __init__(self, raw):\n self.raw = raw\n self.token = list()\n self.slot = list()\n self.alignment = list()\n self.s = -1\n\n self.tag = list()\n self.mat = np.zeros(0)\n self.matches = list()\n\n self.pred = list()\n\n self.scenario_idx = 0\n self.sentence_idx = 0", "title": "" }, { "docid": "37a7003611ef71ca0c25aba78e6c64fd", "score": "0.45735148", "text": "def __init__(self, tagged_sents):\n # WORK HERE!!\n # COLLECT REQUIRED STATISTICS INTO DICTIONARIES.\n self.tagged_sents = tagged_sents\n self.word_tags = defaultdict(lambda: defaultdict(int))\n self.tag_words = defaultdict(lambda: defaultdict(int))\n self.freq_tag = defaultdict(int)\n self.freq_word = defaultdict(int)\n nsents = 0\n token = 0\n for sent in self.tagged_sents:\n nsents += 1\n for word, tag in sent:\n self.word_tags[word][tag] += 1 \n self.tag_words[tag][word] += 1\n self.freq_word[word] += 1\n self.freq_tag[tag] += 1\n token += 1\n self._scount = nsents\n self._tkcount = token\n self._wcount = len(self.word_tags)\n self._tcount = len(self.tag_words)\n self.word_tags = dict(self.word_tags)\n self.tag_words = dict(self.tag_words)\n self.freq_tag = dict(self.freq_tag)\n self.freq_word = dict(self.freq_word)", "title": "" }, { "docid": "d5149faf52b591570273cdeeb67199eb", "score": "0.45631132", "text": "def preprocess(self,raw_query):\n tmp = pseg.cut(raw_query)\n words=[]\n pos=[]\n for word, flag in tmp:\n words.append(word)\n pos.append(flag)\n inst={}\n inst['tag']=pos\n inst['word']=words\n del words\n del pos\n inst['query']=raw_query\n return inst", "title": "" }, { "docid": "bed70000cc7f68fa1c0140e5c75c99de", "score": "0.4560323", "text": "def learn(self,tokenizer):\n lastN = tuple([None] * self.n)\n for token in tokenizer:\n counts = self.db.get(lastN,{})\n counts[token] = counts.get(token,0)+1\n self.db[lastN] = counts\n\n lastN = list(lastN)\n lastN.pop(0)\n lastN.append(token)\n lastN = tuple(lastN)", "title": "" }, { "docid": "947c9086aeeb7d7eb65784581e3ba744", "score": "0.45597887", "text": "def __call__(self, batch: List[Dict[str, Any]]):\n batch_sentences = [sample['sentences'] for sample in batch]\n # all the sentences in all samples of batch\n # len = number of all sentences in batch (sum of sentences of each sample of whole batch)\n all_sentences = [sent for sample in batch_sentences for sent in sample]\n # (N_batch_sent x N_sent_tok x d_language_model)\n encoded_batch = self.tokenizer.batch_encode_plus(\n all_sentences,\n max_length=self.max_length,\n add_special_tokens=True,\n padding=True,\n truncation=True,\n return_token_type_ids=False,\n return_attention_mask=True,\n return_special_tokens_mask=True,\n return_tensors='pt'\n )\n\n # for each sample, a list of all the start indices (token index) of its sentences\n sentences_start_positions_batch: List[List[int]] = []\n # for each sample, a list of all lengths (number of tokens) of its sentences\n sentences_lengths_batch: List[List[int]] = []\n\n start_index = 0\n sentence_lengths = encoded_batch['attention_mask'].sum(-1).int() # (N_batch_sent)\n for sample in batch:\n num_sentences = len(sample['sentences'])\n\n sample_sentence_lengths = sentence_lengths[start_index:start_index + num_sentences] # (num_sent)\n sentences_lengths_batch.append(sample_sentence_lengths.tolist())\n sample_start_indices = sample_sentence_lengths.cumsum(dim=0) - sample_sentence_lengths[0]\n sentences_start_positions_batch.append(sample_start_indices.tolist())\n\n start_index += num_sentences\n\n encoded_batch['sentence_splits'] = BatchSentenceSplitsInfo.compute_for_batch(sentences_start_positions_batch,\n sentences_lengths_batch)\n return encoded_batch", "title": "" }, { "docid": "574d005957f111be356992ae8ddea91e", "score": "0.4545944", "text": "def __init__(self):\n super(GlobalAvgPool2d, self).__init__()", "title": "" }, { "docid": "574d005957f111be356992ae8ddea91e", "score": "0.4545944", "text": "def __init__(self):\n super(GlobalAvgPool2d, self).__init__()", "title": "" }, { "docid": "574d005957f111be356992ae8ddea91e", "score": "0.4545944", "text": "def __init__(self):\n super(GlobalAvgPool2d, self).__init__()", "title": "" }, { "docid": "dd5151ca82df34e1c042b60c6d5d8021", "score": "0.454545", "text": "def __init__(self,\n name,\n is_lazy,\n lazy_directory,\n debug,\n encoding):\n\n super(MEltPreProcessor, self).__init__(name,\n is_lazy,\n lazy_directory,\n debug,\n encoding,\n \"/\")\n\n self.set_sentence_tokenizer(PunktSentenceTokenizer())", "title": "" }, { "docid": "52cb075f9bffe41c29b45e929c98f9ef", "score": "0.45438153", "text": "def get_word_word_attention(token_token_attention, words_to_tokens, mode=\"mean\"):\n\n #print(token_token_attention)\n #print(words_to_tokens)\n\n word_word_attention = np.array(token_token_attention)\n not_word_starts = []\n for word in words_to_tokens:\n not_word_starts += word[1:]\n\n # sum up the attentions for all tokens in a word that has been split\n for word in words_to_tokens:\n #print(word)\n word_word_attention[:, word[0]] = word_word_attention[:, word].sum(axis=-1)\n word_word_attention = np.delete(word_word_attention, not_word_starts, -1)\n\n # several options for combining attention maps for words that have been split\n # we use \"mean\" in the paper\n for word in words_to_tokens:\n if mode == \"first\":\n pass\n elif mode == \"mean\":\n word_word_attention[word[0]] = np.mean(word_word_attention[word], axis=0)\n elif mode == \"max\":\n word_word_attention[word[0]] = np.max(word_word_attention[word], axis=0)\n word_word_attention[word[0]] /= word_word_attention[word[0]].sum()\n else:\n raise ValueError(\"Unknown aggregation mode\", mode)\n word_word_attention = np.delete(word_word_attention, not_word_starts, 0)\n\n return word_word_attention", "title": "" }, { "docid": "66a641ee487f228b828ab75115f10c2b", "score": "0.4541561", "text": "def __init__(self) -> None:\n self.tokenizer = lambda x: list(x)", "title": "" }, { "docid": "68396556a624a5229a2c18335bf56924", "score": "0.45271772", "text": "def __init__(self):\n # those are not used here\n # self.chunk_id, self.ground_truth, self.ground_truth_len, self.begin_gm, self.end_gm\n # self.cand_entities_labels,\n\n self.sample = None\n self.empty = True", "title": "" }, { "docid": "00555cb31f7dd1af3adcbb7161971608", "score": "0.45247", "text": "def __init__(self, num_sample=None, pooling_type='sum', gcn=False):\n super(Aggregator, self).__init__()\n self.num_sample = num_sample\n self.pooling_type = pooling_type\n self.gcn = gcn\n assert pooling_type in ['sum', 'average', 'max']", "title": "" }, { "docid": "c627b5cf1637ff457455ffa1cbe2f0d7", "score": "0.45240444", "text": "def __call__(self, query):\n\n # Add prefix, if necessary\n if self.prefix:\n query = f\"{self.prefix}{query}\"\n\n # Tokenize and generate text using model\n features = self.tokenizer([query], return_tensors=\"pt\")\n output = self.model.generate(input_ids=features[\"input_ids\"], attention_mask=features[\"attention_mask\"], max_length=self.maxlength)\n\n # Decode tokens to text\n result = self.tokenizer.decode(output[0], skip_special_tokens=True)\n\n # Clean and return generated text\n return self.clean(result)", "title": "" }, { "docid": "9ad2c28911740a1bbd00e2aee19252a1", "score": "0.45193005", "text": "def __init__(self, poolname: str) -> None:\n\n super().__init__(poolname, PoolResourcesMetricsDimensions.USAGE)", "title": "" }, { "docid": "db64dd07d188b05aad449054374d6371", "score": "0.45170882", "text": "def __call__(self, batch: List[Dict[str, Any]]):\n batch_sentences = [sample['sentences'] for sample in batch]\n # all the sentences in all samples of batch\n # len = number of all sentences in batch (sum of sentences of each sample of whole batch)\n all_sentences = [sent for sample in batch_sentences for sent in sample]\n # for each batch the related slice of all_sentences\n sentences_slices_of_samples = []\n start_index = 0\n for sample_sentences in batch_sentences:\n num_sentences = len(sample_sentences)\n sentences_slices_of_samples.append(slice(start_index, start_index + num_sentences))\n start_index += num_sentences\n max_content_length = self.max_length - self.tokenizer.num_special_tokens_to_add(pair=False)\n # concatenate_sentences\n # 1. encode all sentences (without special tokens or padding)\n all_sentences = self.tokenizer.batch_encode_plus(\n all_sentences,\n max_length=max_content_length,\n add_special_tokens=False,\n padding=False,\n truncation=True,\n return_token_type_ids=False,\n return_attention_mask=False\n )\n # 2. concatenate encoded sentences and add special tokens (and store sentence split positions)\n # for each sample, a list of all the start indices (token index) of its sentences\n sentences_start_positions_batch: List[List[int]] = []\n # for each sample, a list of all lengths (number of tokens) of its sentences\n sentences_lengths_batch: List[List[int]] = []\n # batch of all inputs ids\n input_ids_batch: List[List[int]] = []\n special_tokens_mask_batch: List[List[int]] = []\n for sentence_slice_of_sample in sentences_slices_of_samples:\n # list of the sentences (input_ids of each sentence) of this sample\n sentences_of_sample: List[List[int]] = all_sentences['input_ids'][sentence_slice_of_sample]\n\n input_ids_of_sample: List[int] = []\n current_start_index = 0\n sentences_start_positions: List[int] = []\n sentences_lengths: List[int] = []\n for sentence in sentences_of_sample:\n if current_start_index + len(sentence) <= max_content_length:\n input_ids_of_sample.extend(sentence)\n sentences_start_positions.append(current_start_index)\n sentences_lengths.append(len(sentence))\n current_start_index += len(sentence)\n elif current_start_index == 0:\n # no sentence added\n input_ids_of_sample.extend(sentence[:max_content_length])\n sentences_start_positions.append(0)\n sentences_lengths.append(max_content_length)\n break\n else:\n break\n input_ids_with_special_tokens = self.tokenizer.build_inputs_with_special_tokens(input_ids_of_sample)\n input_ids_batch.append(input_ids_with_special_tokens)\n special_tokens_mask_batch.append(self.tokenizer.get_special_tokens_mask(input_ids_with_special_tokens,\n already_has_special_tokens=True))\n sentences_start_positions_batch.append(sentences_start_positions)\n sentences_lengths_batch.append(sentences_lengths)\n # 3. pad and convert to PyTorch\n batch_sentences = self.tokenizer.pad(\n {'input_ids': input_ids_batch, 'special_tokens_mask': special_tokens_mask_batch},\n return_attention_mask=True,\n return_tensors='pt'\n )\n batch_sentences['sentence_splits'] = BatchSentenceSplitsInfo.compute_for_batch(sentences_start_positions_batch,\n sentences_lengths_batch)\n return batch_sentences", "title": "" }, { "docid": "d7641f4dc7bdb0f5cea6d7ff51ab9189", "score": "0.45155045", "text": "def __init__(self, pool: dict = None):\n self.pool = pool or {}", "title": "" }, { "docid": "fff9db2b78afdfd5a4d8e7886e48ee17", "score": "0.45139164", "text": "def __init__(self, ae, name=''):\n Operation.__init__(self, ae, name)", "title": "" }, { "docid": "fff9db2b78afdfd5a4d8e7886e48ee17", "score": "0.45139164", "text": "def __init__(self, ae, name=''):\n Operation.__init__(self, ae, name)", "title": "" }, { "docid": "e20659c7b3118b030de16ab32e9f8a02", "score": "0.45086312", "text": "def acc_sg(self):\n return super(Noun, self).add_m()", "title": "" }, { "docid": "c2c177980b39525b990e4d86602efd46", "score": "0.4506157", "text": "def __init__(self, vocab_size, d_model):\n super(TokenEmbedding, self).__init__(vocab_size, d_model, padding_idx=1)", "title": "" }, { "docid": "639671e3e4f64b45f03cb96e5fd73a64", "score": "0.45020097", "text": "def __init__(self, train_set, tokenisation_type=\"unigram\", lowercase=True):\n self.lowercase = lowercase\n\n # stores word counts for each label.\n label_to_word_freq_dict = {\n \"NOT ENOUGH INFO\": defaultdict(float),\n \"SUPPORTS\": defaultdict(float),\n \"REFUTES\": defaultdict(float)\n }\n\n # total proportions for each label\n label_proportions = defaultdict(int)\n\n # determine which feature computation to use (default unigram)\n self.tokenisation_type = tokenisation_type\n self.token_splitter = self.get_token_splitter(self.tokenisation_type)\n\n # loop over train set\n for example in train_set:\n claim = example['claim']\n\n if self.lowercase:\n claim = claim.lower()\n\n tokenized_claim = self.token_splitter(claim)\n label = example['label']\n label_proportions[label] +=1\n\n # loop over words in claim\n for token in tokenized_claim:\n # add token count\n label_to_word_freq_dict[label][token] += 1.0\n\n self.label_proportions = label_proportions\n self.labels = sorted(self.label_proportions.keys())\n\n # normalise by total label frequency\n for label in label_to_word_freq_dict.keys():\n label_proportion = label_proportions[label]\n for word in label_to_word_freq_dict[label].keys():\n label_to_word_freq_dict[label][word] = label_to_word_freq_dict[label][word] / label_proportion\n\n self.label_to_word_freq_dict = label_to_word_freq_dict\n\n # optional\n self.get_token_entropies()\n print(\"Done fitting Naive Bayes.\")", "title": "" }, { "docid": "8a62c00277e5b74e0868d5cf93ad63fc", "score": "0.45004335", "text": "def do_nsmls(self, a, buffer=None):\n _ = self.nsmls(a,buffer)", "title": "" }, { "docid": "0374fe20511fd95f5f4beb8d1cf310e4", "score": "0.4499713", "text": "def __init__(self, train=train_sents):\n\t\tself._chunkParser = NEChunkParser(train)", "title": "" }, { "docid": "e49b110b3aa74290e8080dea760b4352", "score": "0.44990462", "text": "def random_word(tokens, tokenizer, inference_mode: bool = False):\n output_label = []\n\n mask_id = tokenizer.all_special_ids[tokenizer.all_special_tokens.index(tokenizer.special_tokens_map['mask_token'])]\n\n for i in range(len(tokens)):\n prob = random.random()\n # mask token with 15% probability\n if prob < 0.15 and not inference_mode:\n prob /= 0.15\n\n # 80% randomly change token to mask token\n if prob < 0.8:\n #token = '[MASK]'\n token = mask_id\n # 10% randomly change token to random token\n elif prob < 0.9:\n token = random.choice(list(tokenizer.get_vocab().items()))[1]\n #token = random.choice(list(tokenizer.token_to_idx.items()))[0]\n #while (token in tokenizer.symbols) or (token == tokens[i]):\n while (token in tokenizer.all_special_ids) or (token == tokens[i]):\n token = random.choice(list(tokenizer.get_vocab().items()))[1]\n #token = random.choice(list(tokenizer.token_to_idx.items()))[0]\n # -> rest 10% randomly keep current token\n else:\n token = tokens[i]\n\n # set the replace token and append token to output (we will predict these later)\n try:\n #output_label.append(tokenizer.token_to_idx[tokens[i]])\n output_label.append(token)\n tokens[i] = token\n except KeyError:\n # For unknown words (should not occur with BPE vocab)\n output_label.append(tokenizer.token_to_idx['[UNK]'])\n logger.warning('Cannot find token \"{}\" in token_to_idx. Using [UNK] instead'.format(tokens[i]))\n else:\n # no masking token (will be ignored by loss function later)\n output_label.append(-1)\n\n return tokens, output_label", "title": "" }, { "docid": "3f33464a11f92d0155cf8c1ecf21baaf", "score": "0.44983798", "text": "def test_multiprocess_entity_data(self):\n max_seq_len = 7\n max_ent_len = 10\n self.args.data_config.max_seq_len = max_seq_len\n self.args.data_config.max_ent_len = max_ent_len\n input_data = [\n {\n \"aliases\": [\"alias1\", \"multi word alias2\"],\n \"qids\": [\"Q1\", \"Q4\"],\n \"sent_idx_unq\": 0,\n \"sentence\": \"alias1 or multi word alias2\",\n \"spans\": [[0, 1], [2, 5]],\n \"gold\": [True, True],\n }\n ]\n\n # THERE ARE NO DESCRIPTIONS\n X_entity_dict = self.tokenizer(\n [\n \"[SEP]\",\n \"alias1 [ent_type] T1 [ent_type] T2\",\n \"multi alias2 [ent_type] T3\",\n \"word alias3 [ent_type]\",\n \"nonalias4 [ent_type] T2\",\n \"[SEP]\",\n ],\n max_length=max_ent_len,\n padding=\"max_length\",\n truncation=True,\n add_special_tokens=True,\n )\n gold_entity_to_mask = [\n [0 for _ in range(len(inp))] for inp in X_entity_dict[\"input_ids\"]\n ]\n gold_entity_to_mask[1][1:3] = [1, 1]\n gold_entity_to_mask[2][1:4] = [1, 1, 1]\n gold_entity_to_mask[3][1:4] = [1, 1, 1]\n gold_entity_to_mask[4][1:5] = [1, 1, 1, 1]\n utils.write_jsonl(self.temp_file_name, input_data)\n dataset = BootlegDataset(\n self.args,\n name=\"Bootleg_test\",\n dataset=self.temp_file_name,\n use_weak_label=True,\n load_entity_data=True,\n tokenizer=self.tokenizer,\n entity_symbols=self.entity_symbols,\n dataset_threads=3,\n split=\"train\",\n is_bert=True,\n )\n self.assertListEqual(\n X_entity_dict[\"input_ids\"],\n dataset.X_entity_dict[\"entity_input_ids\"].tolist(),\n )\n self.assertListEqual(\n X_entity_dict[\"token_type_ids\"],\n dataset.X_entity_dict[\"entity_token_type_ids\"].tolist(),\n )\n self.assertListEqual(\n X_entity_dict[\"attention_mask\"],\n dataset.X_entity_dict[\"entity_attention_mask\"].tolist(),\n )\n self.assertListEqual(\n gold_entity_to_mask,\n dataset.X_entity_dict[\"entity_to_mask\"].tolist(),\n )", "title": "" }, { "docid": "ae8a949357da42ca5415897db63ba1de", "score": "0.4491799", "text": "def postprocessing(algorithm, size, T_clustered, T_structure,\n T_postprocessed, T_max_vals=None, T_min_vals=None):\n\n idxs_merged = list() # Already visited groups\n groups_merged = list() # Resulting merged groups\n structure_merged = list() # Updated group labels\n\n\n # 1. Find the two candidate groups\n # print(\"T clustered is \", T_clustered)\n \"\"\" print(\"T_structure is \", T_structure) \"\"\"\n for idx, bad_group in enumerate(T_clustered):\n bad_g_size = len(bad_group)\n if bad_g_size < size: # For any bad group\n bad_group_vals = list(bad_group.values())\n \"\"\" print(\"idx is \", idx) \"\"\"\n label = T_structure[idx]\n\n # 1.a Find its nearest neighbour (NN) - 1st candidate group\n\n # Search the group's NN as the one\n # with the most similar label\n idx_nn = -1\n found_nn = False\n metric_nn = float('inf')\n\n # print(\"T_structure is \", T_structure)\n # print(\"Bad group label is \", label)\n for other_idx, other_label in enumerate(T_structure):\n # Per label construction, if the two labels\n # bar the last char are equal, it means the two groups\n # come from the same parent; hence they are the respective NN\n # print(\"other label \", other_label)\n if label[:-1] == other_label[:-1]: \n if idx == other_idx:\n continue\n\n # If the group hasn't already been merged\n # with another group, mark it as a valid NN\n if other_idx not in idxs_merged:\n found_nn = True\n idx_nn = other_idx\n break\n # print(\"bad group index \", idx)\n # print(\"indexes merged \", idxs_merged)\n\n merge_with_other_group = False\n if found_nn:\n group_nn = T_clustered[idx_nn]\n elif idx_nn !=idx:\n if idx - 1 > 0:\n idx_nn = idx - 1\n elif idx + 1 < len(T_structure) - 1:\n idx_nn = idx + 1 \n group_nn = T_clustered[idx_nn]\n merge_with_other_group = True\n\n if found_nn or merge_with_other_group:\n group_merged_nn = bad_group_vals\n\n \n group_merged_nn = group_merged_nn \\\n + list(group_nn.values())\n \n if algorithm == 'naive':\n metric_nn = normalized_certainty_penalty(group_merged_nn,\n T_max_vals, T_min_vals)\n elif algorithm == 'kapra':\n metric_nn = instant_value_loss(group_merged_nn)\n\n # Redefine group_merged_nn as dict\n group_merged_nn = dict()\n group_merged_nn.update(bad_group)\n group_merged_nn.update(group_nn)\n\n # 1.b Find the most appropriate large group (>= 2*size -|G|) - 2nd candidate group\n metric_large_g = float('inf')\n idx_large_g = -1\n\n for other_idx, other_group in enumerate(T_clustered):\n # If the group is large enough\n if len(other_group) >= 2*size - bad_g_size: # 2*size - |G|\n # print(\"dentro if large group metric\")\n if other_idx not in idxs_merged:\n group_merged_large_g = bad_group.copy()\n group_large_g_vals = list(group_merged_large_g.values())\n\n # Select the size - |G| records from the large group that minimize\n # the intra-NCP or VL metric with the original group\n for j in range(size - bad_g_size): # size - |G|\n tmp_metric = float('inf')\n\n best_record = {}\n best_row = []\n\n # Select the best record to merge\n # at the j-th iteration\n for ridx, row in other_group.items():\n if ridx not in group_merged_large_g.keys():\n if algorithm == 'naive':\n metric = normalized_certainty_penalty(group_large_g_vals + [ row ],\n T_max_vals, T_min_vals)\n elif algorithm == 'kapra':\n metric = instant_value_loss(group_large_g_vals + [ row ])\n\n if metric < tmp_metric: # Update min metric\n best_record = { ridx : row }\n tmp_metric = metric\n best_row = row\n \n group_merged_large_g.update(best_record)\n group_large_g_vals.append(best_row)\n\n # Check if the current candidate large group\n # is better than any previous ones\n if tmp_metric < metric_large_g:\n metric_large_g = tmp_metric\n idx_large_g = other_idx\n\n # Isolate the records that are kept from\n # the original (2*size - |G|) large group\n leftover_group_large_g = { k : val for (k, val)\n in other_group.items()\n if k not in group_merged_large_g.keys() }\n # print(\"group_merged_large_g \\n\\n\", group_merged_large_g)\n \"\"\" print(\"Metric nn: \", str(metric_nn))\n print(\"Metric large group: \", str(metric_large_g))\n print(\"Found nn: \", str(found_nn))\n # print(\"group merged nn \", group_merged_nn)\n print(\"Bad group label: \", label)\n print(\"Bad group: \", bad_group) \"\"\"\n # 1.c Choose which of the two candidate\n # groups is best to merge with\n \"\"\" if math.isinf(metric_nn) and math.isinf(metric_large_g):\n continue \"\"\"\n if metric_nn < metric_large_g: \n idxs_merged.append(idx_nn)\n groups_merged.append(group_merged_nn)\n structure_merged.append(label[:-1]) \n # print(\"NN CASE\")\n else:\n # print(\"dentro else\")\n idxs_merged.append(idx_large_g)\n # Add both groups to merge\n groups_merged.append(group_merged_large_g)\n groups_merged.append(leftover_group_large_g)\n # print(\"LARGE GROUP CASE\")\n structure_merged.append('') # Add empty label for new group\n\n # Add the currently processed group Id\n # to already visited groups Ids\n idxs_merged.append(idx)\n\n # 2. Re-assest outer data structures\n T_clustered = [ group for (idx, group)\n in enumerate(T_clustered)\n if idx not in idxs_merged ]\n T_clustered += groups_merged \n\n T_structure = [ label for (idx, label)\n in enumerate(T_structure)\n if idx not in idxs_merged]\n T_structure += structure_merged\n\n T_postprocessed += T_clustered\n\n # 3. Check if there are any more bad groups\n bad_groups_cnt = 0\n\n for group in T_clustered:\n if len(group) < size:\n bad_groups_cnt +=1\n\n # print(\"Number of bad groups before a possible recursive call: \", str(bad_groups_cnt))\n\n# def postprocessing(algorithm, size, T_clustered, T_structure, T_postprocessed, T_max_vals=None, T_min_vals=None):\n if bad_groups_cnt > 0: # Call recursively if any left\n postprocessing(algorithm, size, T_clustered, T_structure,\n T_postprocessed, T_max_vals, T_min_vals)", "title": "" }, { "docid": "d1b38f519cf83972e4e6bd0b8d565adc", "score": "0.44888708", "text": "def __augment_pad(__image, __label):\n image_shape = tf.shape(__image)\n image_height = image_shape[0]\n image_width = image_shape[1]\n target_height = image_height + tf.maximum(self.crop_height - image_height, 0)\n target_width = image_width + tf.maximum(self.crop_width - image_width, 0)\n __image = image_process_util.pad_to_bounding_box(\n image=__image,\n offset_height=0,\n offset_width=0,\n target_height=target_height,\n target_width=target_width,\n pad_value=MEAN_RGB)\n __label = image_process_util.pad_to_bounding_box(\n image=__label,\n offset_height=0,\n offset_width=0,\n target_height=target_height,\n target_width=target_width,\n pad_value=self.segmentation_ignore_value)\n return __image, __label", "title": "" }, { "docid": "660b65d8f91d5c449fed3eff67d481a2", "score": "0.44872338", "text": "def main(unused_argv):\n if not FLAGS.submission_file:\n raise ValueError(\"You must input submission file.\")\n eval_labels = read_labels(\n FLAGS.eval_data_pattern, cache_path=FLAGS.label_cache)\n tf.logging.info(\"Total rated segments: %d.\" % len(eval_labels.labels))\n positive_counter = {}\n for k, v in eval_labels.labels.items():\n _, label_id = k\n if v > 0:\n positive_counter[label_id] = positive_counter.get(label_id, 0) + 1\n\n seg_preds = read_segment_predictions(\n FLAGS.submission_file, eval_labels, top_n=FLAGS.top_n)\n map_cal = map_calculator.MeanAveragePrecisionCalculator(len(seg_preds))\n seg_labels = []\n seg_scored_preds = []\n num_positives = []\n for label_id in sorted(seg_preds):\n class_preds = seg_preds[label_id]\n seg_label = [eval_labels.labels[(pred, label_id)] for pred in class_preds]\n seg_labels.append(seg_label)\n seg_scored_pred = []\n if class_preds:\n seg_scored_pred = [\n float(x) / len(class_preds) for x in range(len(class_preds), 0, -1)\n ]\n seg_scored_preds.append(seg_scored_pred)\n num_positives.append(positive_counter.get(label_id, 0))\n map_cal.accumulate(seg_scored_preds, seg_labels, num_positives)\n map_at_n = np.mean(map_cal.peek_map_at_n())\n tf.logging.info(\"Num classes: %d | mAP@%d: %.6f\" %\n (len(seg_preds), FLAGS.top_n, map_at_n))", "title": "" }, { "docid": "d80ef0ea9cf43949b8e0e4ce8234b5da", "score": "0.44838348", "text": "def __init__(self, tokens, pred_state):\n self.tokens = tokens\n self.pred_state = pred_state\n self.score = 0.0\n self.score_pos = 0", "title": "" }, { "docid": "072f2ac7fd21c7a0796e35004363cba5", "score": "0.4481153", "text": "def weighted_pooling(raw_representation, positional_weighting, tokens):\n positional_weighting_non_zero = non_zero_tokens(tf.to_float(tokens)) * positional_weighting\n pooled_representation = tf.matmul(\n tf.reshape(positional_weighting_non_zero, [-1, 1, tf.shape(positional_weighting)[1]]),\n raw_representation\n )\n return tf.reshape(pooled_representation, [-1, tf.shape(raw_representation)[2]])", "title": "" }, { "docid": "d47bc0d62b2bb282a79065ed2106d9a9", "score": "0.44773206", "text": "def operations(self):", "title": "" }, { "docid": "c4c2e016006c56bea5218a13c3a0f628", "score": "0.44756314", "text": "def __init__(self, n_clusters = 8, random_state = None, algo = \"full\"):\n self.model = KMeans(n_clusters=n_clusters, random_state=random_state,algorithm=algo)", "title": "" }, { "docid": "720d2a2202db7e5ecde95b90d12fa2fc", "score": "0.44754562", "text": "def __init__(self, op, alpha):\n self._op = op\n self._alpha = alpha\n\n super(ScaledBlockedOperator, self).__init__()", "title": "" }, { "docid": "73c30d4e552b44e3f223a8cf0cde0368", "score": "0.44730443", "text": "def __init__(\n self,\n *args,\n **kwargs,\n ):\n super(CustomGATConv, self).__init__(*args)", "title": "" }, { "docid": "9e921b81569b4f2b761e8e769fdc19b3", "score": "0.44635713", "text": "def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n\n # dynamic add padding based on the kernel_size\n self.padding = (self.kernel_size[0] // 2, self.kernel_size[1] // 2)", "title": "" }, { "docid": "57c0506a64fff25d253bbdd972e2be31", "score": "0.44626525", "text": "def unpadder(self):", "title": "" }, { "docid": "8979150a6787af8f993bb3b6253e8dc1", "score": "0.44611022", "text": "def __init__(self, prefix: str, checkpoint_name: str='microsoft/deberta-v3-base', num_classes: Optional[int]=0, pooling_mode: Optional[str]='cls', gradient_checkpointing: Optional[bool]=False, low_cpu_mem_usage: Optional[bool]=False, pretrained: Optional[bool]=True):\n super().__init__(prefix=prefix, checkpoint_name=checkpoint_name, num_classes=num_classes, pooling_mode=pooling_mode, gradient_checkpointing=gradient_checkpointing, low_cpu_mem_usage=low_cpu_mem_usage, pretrained=pretrained)\n logger.debug(f'initializing {checkpoint_name}')\n if self.config.model_type in {'gpt2', 'roberta'}:\n self.tokenizer = AutoTokenizer.from_pretrained(checkpoint_name, add_prefix_space=True)\n else:\n self.tokenizer = AutoTokenizer.from_pretrained(checkpoint_name)\n if hasattr(self.model.config, 'max_position_embeddings'):\n self.tokenizer.model_max_length = self.model.config.max_position_embeddings\n if hasattr(self.model.config, 'n_positions'):\n self.tokenizer.model_max_length = self.model.config.n_positions", "title": "" }, { "docid": "eda4ccffe6258fcaf34b72183f0958da", "score": "0.44604722", "text": "def narration(self) -> str:", "title": "" }, { "docid": "480ea1f7ac7c6d91db2f2dfbdb00ecec", "score": "0.44594306", "text": "def fine_tune_pooler_output(nr_class, *, exclusive_classes=True, **cfg):\n return chain(\n get_pytt_pooler_output,\n flatten_add_lengths,\n with_getitem(0, Softmax(nr_class, cfg[\"token_vector_width\"])),\n Pooling(mean_pool),\n )", "title": "" }, { "docid": "4a8c48a924c4bd440cb6c8a7dabd74af", "score": "0.44590542", "text": "def __init__(self, boosting_p):\n self.process = boosting_p\n self.pb = boosting_p.pb\n\n \"\"\"Allocate memory for intermediate data matrices\"\"\"\n self.__index = self.pb.index_matrix\n self.__err = np.zeros(shape = self.__index.shape,dtype =\"float32\")\n self.__bout = np.zeros(self.__index.shape[1],dtype=\"bool\")\n self.__dt = np.zeros(shape = self.process.label.shape,dtype =\"float32\")\n self.__not_label = np.logical_not(self.process.label)\n self.__label = np.logical_not(self.__not_label)", "title": "" }, { "docid": "872cc04c53d6540ac853beb583e1fb3d", "score": "0.44576496", "text": "def mask_token(self, idx, tokens, types, vocab_words, rng):\n label = tokens[idx]\n if rng.random() < 0.8:\n new_label = self.tokenizer.get_command('MASK').Id\n else:\n if rng.random() < 0.5:\n new_label = label\n else:\n new_label = rng.choice(vocab_words)\n\n tokens[idx] = new_label\n\n return label", "title": "" } ]
c85978c1d384804b672ed6f512e49e4a
Creates a CROD card
[ { "docid": "4d20a4bfeacf4d99e100688379ffb27a", "score": "0.0", "text": "def __init__(self, eid, pid, nids, comment=''):\n RodElement.__init__(self)\n if comment:\n self.comment = comment\n self.eid = eid\n self.pid = pid\n self.nodes = self.prepare_node_ids(nids)\n assert len(self.nodes) == 2\n self.nodes_ref = None\n self.pid_ref = None", "title": "" } ]
[ { "docid": "befd41ff71f86542ff2345867d5668d9", "score": "0.74394673", "text": "def create_card(self, data: dict) -> Any:\n\n return self._post('/create/card', data)", "title": "" }, { "docid": "8cb4c0b4b6afd4be477ea060678d534c", "score": "0.74331784", "text": "def test_create_credit_card(self):\n CreditCard('visa', '1234123412341234', '01', '2018', '000', 'John', 'Smith')", "title": "" }, { "docid": "bd6d250a18564213fa90d08857e055c8", "score": "0.69959027", "text": "def add_card(cls, card, comment=''):\n eid = integer(card, 1, 'eid')\n pid = integer_or_blank(card, 2, 'pid', eid)\n nids = [integer(card, 3, 'n1'),\n integer(card, 4, 'n2')]\n assert len(card) == 5, 'len(CROD card) = %i\\ncard=%s' % (len(card), str(card))\n return CROD(eid, pid, nids, comment=comment)", "title": "" }, { "docid": "4706799074ffa9b93eaed19152a4478e", "score": "0.6520387", "text": "def add_card(cls, card, comment=''):\n eid = integer(card, 1, 'eid')\n nids = [integer(card, 2, 'n1'),\n integer(card, 3, 'n2')]\n mid = integer(card, 4, 'mid')\n A = double_or_blank(card, 5, 'A', 0.0)\n j = double_or_blank(card, 6, 'j', 0.0)\n c = double_or_blank(card, 7, 'c', 0.0)\n nsm = double_or_blank(card, 8, 'nsm', 0.0)\n assert len(card) <= 9, 'len(CONROD card) = %i\\ncard=%s' % (len(card), str(card))\n return CONROD(eid, mid, nids, A, j, c, nsm, comment=comment)", "title": "" }, { "docid": "8002b4e3bf0e1dcc7e87f299070b2342", "score": "0.64519346", "text": "def add_card(cls, card, comment=''):\n eid = integer(card, 1, 'eid')\n pid = integer_or_blank(card, 2, 'pid', eid)\n nids = [integer(card, 3, 'n1'),\n integer(card, 4, 'n2')]\n assert len(card) == 5, f'len(CTUBE card) = {len(card):d}\\ncard={card}'\n return CTUBE(eid, pid, nids, comment=comment)", "title": "" }, { "docid": "d099621afd629db15abec5277c9493de", "score": "0.6450271", "text": "def create_deck():\n numeric_cards = range(2,11)\n value_court_cards = 10\n n_court_cards = 3\n value_ace = 11\n\n cards_in_a_suit = list(numeric_cards) + [value_court_cards]*n_court_cards + [value_ace]\n deck = 4 * cards_in_a_suit\n\n return deck", "title": "" }, { "docid": "ba2b7cd2006ca198496705270385a447", "score": "0.6407448", "text": "def CreateCard(self, barcode, member, credit):\n with self.connection as cursor:\n if cursor.execute(\"\"\"SELECT * FROM `creditcard` WHERE `barcode` = ?\"\"\",\n (barcode, )):\n return False\n with self.connection as cursor:\n cursor.execute(\"\"\"\n INSERT INTO `creditcard` (`datetime`, `barcode`, `member`, `credit`)\n VALUES (?, ?, ?, ?) \"\"\",\n (datetime.datetime.now().strftime('%F %T'), barcode, member, credit))\n return True", "title": "" }, { "docid": "41855d998cb174234dff8d92ecbca44f", "score": "0.63402456", "text": "def __init__(self, card):\r\n self.card = card", "title": "" }, { "docid": "46c5ea8805829c2997869e96bc3638b7", "score": "0.63210565", "text": "def createTrapCard(self):\n card_dict = {}\n card_dict['name'] = self.card_info['card']['name']\n card_dict['id'] = self.card_info['card']['number']\n card_dict['type'] = self.card_info['card']['type']\n card_dict['mon_type'] = self.card_info['card']['monster_types']\n card_dict['species'] = self.card_info['card']['species']\n card_dict['attr'] = self.card_info['card']['attribute']\n card_dict['level'] = self.card_info['card']['stars']\n card_dict['need_materials'] = self.card_info['card']['has_materials']\n card_dict['need_name'] = self.card_info['card']['has_name_condition']\n card_dict['desc'] = self.card_info['card']['text']\n card_dict['atk'] = self.card_info['card']['attack']\n card_dict['def'] = self.card_info['card']['defense']\n card_dict['desc'] = self.card_info['card']['text']\n card_dict['legality'] = self.card_info['card']['legality']\n card_dict['releases'] = self.card_info['card']['releases']\n card_dict['img'] = self.card_info['card']['image_path']\n card_dict['thumb'] = self.card['card']['thumbnail_path']\n # card_dict['is_extra_deck'] = self.card_info['card']['is_extra_deck']\n # card_dict['is_fusion'] = self.card_info['card']['is_fusion']\n # card_dict['pendulum'] = self.card_info['card']['is_illegal']\n # card_dict['is_link'] = self.card_info['card']['is_link']\n\n return card_dict", "title": "" }, { "docid": "2de376335cb341f9df3f2bc1952e9978", "score": "0.627174", "text": "def create_charge(self, card, amount_in_dollars, description):\n amount_in_cents = int((amount_in_dollars * Decimal('100')).quantize(Decimal(10)))\n transaction_record = stripe.Charge.create(\n card=card,\n customer=self.customer,\n amount=amount_in_cents,\n currency=settings.DEFAULT_CURRENCY,\n description=description,\n )\n return transaction_record.id", "title": "" }, { "docid": "d3fe7854d7c85d36475abbbceb76d6a8", "score": "0.62400025", "text": "def test_create_credit_card(self):\n self.c.force_authenticate(user=self.user)\n value = {\n \"first_name\": \"carlos\",\n \"last_name\": \"olivero\",\n \"number_card\": \"12345678912345675\",\n \"type_card\": \"mastercard\",\n \"date_expiration\": \"2022-02-01\"\n }\n response = self.c.post('/payment/card/', value, format='json')\n self.assertEqual(response.status_code, 201, \"error al crear la tarjeta\")\n credit_card = paymet_models.CreditCard.objects.filter(user=self.user.id).values()\n print(\"valores del test para agregar la tarjeta: \", credit_card)", "title": "" }, { "docid": "eadbbbb1a2eaf5c099f59aace1e9cd8f", "score": "0.6225554", "text": "def generate_cards():\n cards = []\n for color in [\"red\", \"blue\", \"yellow\", \"green\"]:\n cards.append(Card(number=0, symbol=color))\n for n in list(range(1, 10)) + [\"+2\", \"skip\"]:\n for i in range(2):\n cards.append(Card(number=n, symbol=color))\n for i in range(4):\n cards.append(Card(symbol=\"rainbow\", number=\"0\"))\n cards.append(Card(symbol=\"rainbow\", number=\"+4\"))\n return cards", "title": "" }, { "docid": "fcca2ea0410f88fcf7fcdfb9340f2fc9", "score": "0.6148847", "text": "def create_card(self, lane_id, title, params={}):\n return self.__get_parent__().create_card(\n column_id = self.id,\n lane_id = lane_id,\n title = title,\n params = params,\n )", "title": "" }, { "docid": "4a9359b78ebaa1a93453b98d78e87d1c", "score": "0.6142437", "text": "def create_card(self, board_id, column_id, lane_id, title, params={}):\n params['space_id'] = self.id\n params['board_id'] = board_id\n params['column_id'] = column_id\n params['lane_id'] = lane_id\n params['title'] = title\n\n return self.__create_item__('/cards', 'Card', params)", "title": "" }, { "docid": "a9c534fcd1a8b971b2ee1ac41286aaab", "score": "0.6138161", "text": "def build_cc_deck():\n # community chest is Go, Jail, then money\n CC_Deck = []\n for i in range(14):\n c = Card('CC', 'COL', i, 0, 0, str(i))\n CC_Deck.append(c) \n go = Card('CC', 'MOV', 0, GO_SPACE, 0, \"go to go\")\n jail = Card('CC', 'MOV', 0, JAIL_SPACE, 0, \"go to jail\")\n jail = Card('CC', 'MOV', 0, JAIL_SPACE, 0, \"go to jail\")\n CC_Deck += [go, jail]\n random.shuffle(CC_Deck)\n CC_Deck = Deck(CC_Deck)\n if DECK_DEBUG:\n CC_Deck.print_deck()\n return CC_Deck", "title": "" }, { "docid": "dee33e35fbe80b99bdf31612f43f658e", "score": "0.61150646", "text": "def construct_card(name, database, template, basic=False):\n if name not in database:\n print(\"Cannot find \" + name + \" in local database\")\n return False\n \n if basic:\n template.executeBasic(database[name])\n else:\n template.execute(database[name])\n return True", "title": "" }, { "docid": "6f51b7f816812240441680a2e45104e0", "score": "0.61049336", "text": "def create_new_card(list_id):\n try:\n card = domain.Card(\n list_id=list_id,\n translation_id=request.form['translation_id']\n )\n except KeyError:\n app.logger.error('Insufficient parameters: %r', request.form)\n abort(400)\n\n card = CardlistRepo().add_card(card)\n translation = TranslationRepo().get(card.translation_id)\n return jsonify(card=card.dto(), translation=translation.dto_autocomplete())", "title": "" }, { "docid": "281401c4a3b4401b683b5d65ef9e00ae", "score": "0.60903335", "text": "def new_card(self, token, client=None):\n return self._api_call(\"https://api.paymill.com/v2/payments\",\n dict_without_none(token=str(token), client=str(client)), return_type=Payment)", "title": "" }, { "docid": "01f3f31d0041c1e965b2020d1bfb4a11", "score": "0.60848117", "text": "def tocard(dict_card):\n redas = [toreda(r) for r in dict_card['redactions']]\n return models.Card(dict_card['name'], dict_card['number'], redactions=redas)", "title": "" }, { "docid": "c663de9e83fe90edd8afdfaddbb67f2c", "score": "0.60817796", "text": "def charge_card(self, tag):\n \n if tag == 'param_card':\n tag = 'slha'\n elif tag == 'run_card':\n tag = 'mgruncard' \n elif tag == 'proc_card':\n tag = 'mg5proccard' \n elif tag == 'shower_card':\n tag = 'mgshowercard'\n elif tag == 'FO_analyse_card':\n tag = 'foanalyse'\n\n assert tag in ['slha', 'mgruncard', 'mg5proccard', 'mgshowercard', 'foanalyse'], 'invalid card %s' % tag\n \n if tag == 'slha':\n param_card = self[tag].split('\\n')\n self.param_card = param_card_reader.ParamCard(param_card)\n return self.param_card\n elif tag == 'mgruncard':\n run_card = self[tag].split('\\n') \n if 'parton_shower' in self[tag]:\n self.run_card = RunCardNLO(run_card)\n else:\n self.run_card = RunCard(run_card)\n return self.run_card\n elif tag == 'mg5proccard':\n proc_card = self[tag].split('\\n')\n self.proc_card = ProcCard(proc_card)\n return self.proc_card\n elif tag =='mgshowercard':\n shower_content = self[tag] \n if MADEVENT:\n import internal.shower_card as shower_card\n else:\n import madgraph.various.shower_card as shower_card\n self.shower_card = shower_card.ShowerCard(shower_content, True)\n # set testing to false (testing = true allow to init using \n # the card content instead of the card path\"\n self.shower_card.testing = False\n return self.shower_card\n elif tag =='foanalyse':\n analyse_content = self[tag] \n if MADEVENT:\n import internal.FO_analyse_card as FO_analyse_card\n else:\n import madgraph.various.FO_analyse_card as FO_analyse_card\n # set testing to false (testing = true allow to init using \n # the card content instead of the card path\"\n self.FOanalyse_card = FO_analyse_card.FOAnalyseCard(analyse_content, True)\n self.FOanalyse_card.testing = False\n return self.FOanalyse_card", "title": "" }, { "docid": "c69337feb504738191c7e6b606b0c46d", "score": "0.6069044", "text": "def create(cls, **kwargs):\n transformed_args = dict(card=kwargs)\n return _as_object(\n cls._request('post',\n cls._collection_path(),\n transformed_args))", "title": "" }, { "docid": "5174fdb22c3e039761ce4f2f8011c7c4", "score": "0.6015293", "text": "def insert_bankcard(self, request):\n random_str = ipara.private_key + request['userId'] + request['cardOwnerName']\n random_str += request['cardNumber'] + request['cardExpireMonth'] + request['cardExpireYear']\n random_str += request['clientIp'] + self.get_transaction_date()\n return self.api_request('POST', '/bankcard/create', random_str, 'json', request)", "title": "" }, { "docid": "a6523bbbdf1da1d2844dc22c0ddde431", "score": "0.6000521", "text": "def add_card(cls, card, comment=''):\n key = string(card, 1, 'key')\n\n n = 1\n value = None\n if key == 'ACOUT':\n value = string_or_blank(card, 2, 'value', 'PEAK')\n elif key == 'ACOWEAK':\n value = string_or_blank(card, 2, 'value', 'NO')\n elif key == 'ADJMETH':\n value = integer_or_blank(card, 2, 'value', 0)\n elif key == 'ADPCON':\n value = double_or_blank(card, 2, 'value', 1.0)\n #elif key == 'ADMPOST':\n #value = string_or_blank(card, 2, 'value', 0) ## TODO: 0 is not a string\n elif key == 'ADSDISC':\n value = double_or_blank(card, 2, 'value', 1e-8)\n elif key == 'AESMAXIT':\n value = integer_or_blank(card, 2, 'value', 15)\n elif key == 'AESMETH':\n value = string_or_blank(card, 2, 'value', 'SELECT')\n assert value in ['SELECT', 'AUTO', 'DIRECT', 'RITZ', 'ITER'], 'value=%s' % value\n elif key == 'AESTOL':\n value = double_or_blank(card, 2, 'value', 1e-10)\n elif key in ['ALPHA1FL', 'ALPHA2FL']: # check alpha1/alpha1FL\n value1 = double_or_blank(card, 2, 'value1', 0.0)\n value2 = double_or_blank(card, 3, 'value2', 0.0)\n n = 2\n elif key == 'COMPMATT':\n #('COMPMATT', 'NO', ['YES', 'NO', 'NONSMEAR']), # MSC only: 'NONSMEAR'\n value = string_or_blank(card, 2, 'value1', 'NO')\n if value == 'NONS': # assume\n value = 'NONSMEAR'\n if value == 'SMEAR': # assume\n value = 'YES'\n assert value in ['YES', 'NO', 'NONSMEAR'], 'value=%r' % value\n\n elif key == 'POST':\n value = integer_or_blank(card, 2, 'value', 1)\n elif key == 'UNITSYS':\n value = string(card, 2, 'value')\n\n #-------------------------------------------------------------\n # strings; has defaults\n elif key in string_params:\n default, allowed_values = string_params[key]\n value = string_or_blank(card, 2, 'value', default=default)\n assert value in allowed_values, f'value={value} allowed={allowed_values}'\n\n # ints; has defaults\n elif key in int_params:\n default=int_params[key]\n value = integer_or_blank(card, 2, 'value', default=default)\n elif key in int_params_allowed:\n default, allowed_values = int_params_allowed[key]\n value = integer_or_blank(card, 2, 'value', default=default)\n assert value in allowed_values, f'value={value} allowed={allowed_values}'\n\n # floats; has defaults\n elif key in float_params:\n default=float_params[key]\n value = double_or_blank(card, 2, 'value', default=default)\n elif key in float2_params:\n defaults = float2_params[key]\n value = double_or_blank(card, 2, 'value', default=defaults[0])\n value = double_or_blank(card, 2, 'value', default=defaults[1])\n n = 2\n\n # unchecked catch all\n elif key in STR_WORDS_1:\n value = string(card, 2, 'value')\n elif key in INT_WORDS_1:\n value = integer(card, 2, 'value')\n elif key in FLOAT_PARAMS:\n value = double(card, 2, 'value')\n elif key in FLOAT2_PARAMS:\n value1 = double(card, 2, 'value1')\n value2 = double(card, 3, 'value2')\n values = [value1, value2]\n n = 2\n\n #-------------------------------------------------------------\n else:\n #raise NotImplementedError(card)\n n = 2\n value1 = integer_double_string_or_blank(card, 2, 'value1')\n value2 = integer_double_string_or_blank(card, 3, 'value2')\n if value2 is None:\n value = value1\n n = 1\n\n if value is None:\n # n=2 or blank\n if isinstance(value1, str):\n assert ' ' not in value1, f'PARAM value1={value1!r}'\n if isinstance(value2, str):\n assert ' ' not in value2, f'PARAM value2={value2!r}'\n values = [value1, value2]\n else:\n # n=1\n if isinstance(value, str):\n assert ' ' not in value, f'PARAM value={value!r}'\n values = [value]\n\n if n == 1:\n assert len(card) <= 3, f'len(PARAM card)={len(card):d} card={card!r}'\n else:\n assert len(card) <= 4, f'len(PARAM card)={len(card):d} card={card!r}'\n return PARAM(key, values, comment=comment)", "title": "" }, { "docid": "1407ed0176cca2a43bcd4080acca4e6b", "score": "0.5967349", "text": "async def card(self, ctx, card=None):\n if card is None:\n await send_cmd_help(ctx)\n\n card = self.get_card_name(card)\n\n if card is None:\n await self.bot.say(\"Card name is not valid.\")\n return\n\n data = discord.Embed(\n title=self.card_to_str(card),\n description=self.get_card_description(card),\n color=self.get_random_color())\n data.set_thumbnail(url=self.get_card_image_url(card))\n data.add_field(\n name=\"Elixir\",\n value=self.crdata[\"Cards\"][card][\"elixir\"])\n data.add_field(\n name=\"Rarity\",\n value=string.capwords(self.crdata[\"Cards\"][card][\"rarity\"]))\n\n # for id in range(cardpop_range_min, cardpop_range_max):\n # data.add_field(\n # name=\"Snapshot {}\".format(id),\n # value=self.get_cardpop(card, id))\n\n try:\n await self.bot.type()\n await self.bot.say(embed=data)\n except discord.HTTPException:\n await self.bot.say(\"I need the `Embed links` permission \"\n \"to send this\")\n\n # Display card trend of the card\n await ctx.invoke(Card.cardtrend, card)\n\n # Display top decks\n await ctx.invoke(Card.decks, card)", "title": "" }, { "docid": "8e07e570c127d3964634ef7f50be8621", "score": "0.5965065", "text": "def newcard(self, token, client=None):\n p = []\n if client is not None:\n p += [(\"client\", client)]\n p += [(\"token\", token)]\n return self._apicall(\"https://api.paymill.de/v2/payments\", tuple(p))", "title": "" }, { "docid": "6f9301ea62361ee3576ac0d5e0df1b96", "score": "0.5892781", "text": "def create_deck(self):\n for s in self.suits:\n for p in self.points:\n self.cards.append(Card(s, p))", "title": "" }, { "docid": "f56486daafcbfb293293d30bd448581b", "score": "0.58859134", "text": "def create_card_type(self, letter, name, color):\n return self.__create_item__(\n '/card-types',\n 'CardType',\n { 'letter': letter, 'name': name, 'color': color }\n )", "title": "" }, { "docid": "dfe560887e6ee63cc810360946bbfce3", "score": "0.5846699", "text": "def add_flash_card():\n\n pass", "title": "" }, { "docid": "d126dd8dc843d797d5f4a188145f30c0", "score": "0.58416414", "text": "def __init__(self, type, card_info):\n self.type = type\n self.card_info = card_info\n if self.type is 'Monster':\n self.card = self.createMonsterCard()\n elif self.type is 'Spell':\n self.card = self.createSpellCard()\n else:\n self.card = self.createTrapCard()", "title": "" }, { "docid": "477c0e5da4a9392f74b794d89e1fabd1", "score": "0.583543", "text": "def createSpellCard(self):\n card_dict = {}\n card_dict['name'] = self.card_info['card']['name']\n card_dict['id'] = self.card_info['card']['number']\n card_dict['type'] = self.card_info['card']['type']\n card_dict['mon_type'] = self.card_info['card']['monster_types']\n card_dict['species'] = self.card_info['card']['species']\n card_dict['attr'] = self.card_info['card']['attribute']\n card_dict['level'] = self.card_info['card']['stars']\n card_dict['need_materials'] = self.card_info['card']['has_materials']\n card_dict['need_name'] = self.card_info['card']['has_name_condition']\n card_dict['desc'] = self.card_info['card']['text']\n card_dict['atk'] = self.card_info['card']['attack']\n card_dict['def'] = self.card_info['card']['defense']\n card_dict['desc'] = self.card_info['card']['text']\n card_dict['legality'] = self.card_info['card']['legality']\n card_dict['releases'] = self.card_info['card']['releases']\n card_dict['img'] = self.card_info['card']['image_path']\n card_dict['thumb'] = self.card['card']['thumbnail_path']\n # card_dict['is_extra_deck'] = self.card_info['card']['is_extra_deck']\n # card_dict['is_fusion'] = self.card_info['card']['is_fusion']\n # card_dict['pendulum'] = self.card_info['card']['is_illegal']\n # card_dict['is_link'] = self.card_info['card']['is_link']\n\n return card_dict", "title": "" }, { "docid": "c621ab13afe969cc50613c0bd47ea1a9", "score": "0.58313805", "text": "def add_card(cls, card, comment=''):\n key = string(card, 1, 'key')\n\n if key == 'AUTOSPC':\n values = [\n integer_or_blank(card, 2, 'value', 0)\n ]\n elif key == 'BETA':\n values = [\n double_or_blank(card, 2, 'value', 0.55)\n ]\n elif key == 'BETAD':\n values = [\n integer_or_blank(card, 2, 'value', 4)\n ]\n elif key == 'COUPMASS':\n values = [\n integer_or_blank(card, 2, 'value', 1)\n ]\n # CTYPE\n elif key == 'CYCIO':\n values = [\n integer_or_blank(card, 2, 'value', 1)\n ]\n elif key == 'CYCSEQ':\n values = [\n integer_or_blank(card, 2, 'value', -1)\n ]\n elif key == 'EPSHT':\n values = [\n double_or_blank(card, 2, 'value', 0.001)\n ]\n elif key == 'ESPIO':\n values = [\n double_or_blank(card, 2, 'value', 1e-5)\n ]\n elif key == 'G':\n values = [\n double(card, 2, 'G')\n ]\n else:\n ifield = 2\n # x\n #[PARAM, AUTOSPC, 2]\n values = []\n while ifield < len(card):\n value = integer_double_string_or_blank(card, ifield, 'field_{i}')\n values.append(value)\n ifield += 1\n return PARAM_NASA95(key, values, comment=comment)", "title": "" }, { "docid": "474def2d265c222fc0256db9b31bc62b", "score": "0.58102024", "text": "def create_deck(self, data):\n tsv_lines = re.split(r\"\\n|\\r|\\r\\n\", data)\n\n # Setup the counters for card requirements\n limits = {\"STATEMENT\": Match._MINIMUM_STATEMENT_CARDS,\n \"OBJECT\": Match._MINIMUM_OBJECT_CARDS,\n \"VERB\": Match._MINIMUM_VERB_CARDS}\n\n # Card ID counters\n card_id_counter = 0\n\n # Read all cards from the source\n left = Match._MAXIMUM_CARDS_IN_DECK\n for line in tsv_lines:\n # Remove whitespace\n line = line.strip()\n if line == \"\":\n continue\n\n # Ensure that cards have a TEXT<tab>TYPE format\n tsv = re.split(r\"\\t\", line)\n if len(tsv) != 2:\n return False, \"invalid_format\"\n\n text = escape(tsv[0])\n type = tsv[1]\n if type not in (\"STATEMENT\", \"OBJECT\", \"VERB\"):\n return False, \"invalid_type\"\n\n # Check that the number of gaps fits for the given type\n gaps = text.count(\"_\")\n if gaps > 0:\n if type != \"STATEMENT\":\n # Gaps in a non-statement card are not allowed\n return False, \"illegal_gap\"\n if gaps > 3:\n # More than three gaps are not supported\n return False, \"too_many_gaps\"\n else:\n if type == \"STATEMENT\":\n # Statement card without any gaps\n return False, \"statement_no_gap\"\n\n # Add the card to the deck\n if type not in self._deck:\n self._deck[type] = []\n self._deck[type].append(Card(card_id_counter, type, text))\n card_id_counter += 1\n limits[type] -= 1\n\n # Enforce the card limit\n left -= 1\n if left == 0:\n break\n\n # Ensure that all limits are met\n for num in limits.values():\n if num > 0:\n return False, \"deck_too_small\"\n\n # Create multidecks\n for type in self._deck:\n self._multidecks[type] = MultiDeck[Card, int](self._deck[type])\n\n return True, \"OK\"", "title": "" }, { "docid": "c744da8354ac0122e9ea1de34341e322", "score": "0.58081394", "text": "def create_card(self, board_id, list_id, name, desc, label_names=[], assignee_id=None):\n if self.init_if_needed():\n board = self.client.get_board(board_id)\n trello_list = board.get_list(list_id)\n\n labels = [self.get_label_by_label_name(board_id, label_name) for label_name in label_names]\n assign = [self.client.get_member(assignee_id)] if assignee_id else None\n\n return trello_list.add_card(name=name, desc=desc, labels=labels, assign=assign)", "title": "" }, { "docid": "16fdedaf09eea1aaf59ace7e3dae287e", "score": "0.5801407", "text": "def __init__(self,card,domain = None):\n self.card = card\n self.domain = domain", "title": "" }, { "docid": "5229dc910e0f830e847915f45d10b836", "score": "0.5794167", "text": "def test_add_card(self):\n hand = self._hand\n card = BjCard('spades', 'A')\n hand.add_card(card)\n self.assertEqual(len(hand), 1)\n self.assertEqual(hand[0], card)", "title": "" }, { "docid": "8c680b9c07322da9152e38838b805196", "score": "0.57908404", "text": "def create_random_card_id():\n return secrets.token_hex(32)", "title": "" }, { "docid": "1726e7d851352680fab0551addcc5eb7", "score": "0.578265", "text": "def create_turn(player_hand, deck):\n player_hand.cards.append(deck.get_card())", "title": "" }, { "docid": "08f626e8ceecc088d3d280e2628a40e3", "score": "0.5774982", "text": "def create_card(self, column_id, title, params={}):\n return self.__get_parent__().create_card(\n lane_id = self.id,\n column_id = column_id,\n title = title,\n params = params,\n )", "title": "" }, { "docid": "18b5653003e9af2cb2def6ddb9e5dc0e", "score": "0.57745534", "text": "def build_ch_deck():\n # Chance\n CH_Deck = []\n for i in range(6):\n c = Card('CH', 'COL', i, 0, 0, str(i))\n CH_Deck.append(c) \n go = Card('CH', 'MOV', 0, GO_SPACE, 0, \"go to go\")\n illinois = Card('CH', 'MOV', 0, ILL_SPACE, 0, \"go to illinois\")\n charles = Card('CH', 'MOV', 0, SCP_SPACE, 0, \"go to St. Charles\")\n util = Card('CH', 'MOV', 0, None, UTIL_REL, \"nearest utility\")\n RR = Card('CH', 'MOV', 0, None, RR_REL, \"nearest RR\")\n RR2 = Card('CH', 'MOV', 0, None, RR_REL, \"nearest RR\")\n back_3 = Card('CH', 'MOV', 0, None, B3_REL, \"back 3\")\n\n jail = Card('CH', 'MOV', 0, JAIL_SPACE, 0, \"go to jail\")\n RR1 = Card('CH', 'MOV', 0, RR1_SPACE, 0, \"reading RR\")\n boardw = Card('CH', 'MOV', 0, BOARDW_SPACE, 0, \"boardwalk\")\n CH_Deck += [go, illinois, charles, util, RR, RR2, back_3, jail, RR1, boardw]\n random.shuffle(CH_Deck)\n CH_Deck = Deck(CH_Deck)\n if DECK_DEBUG:\n CH_Deck.print_deck()\n return CH_Deck", "title": "" }, { "docid": "54b23370332c548e8854484ce8022f94", "score": "0.57244134", "text": "def draw_card(self, card):\n if len(self.hand) <= 13 :\n self.hand.append(Card(card.rank, card.suit, card.isjoker))\n pass", "title": "" }, { "docid": "da5913afb9998d1855f8789401d13e8d", "score": "0.5696578", "text": "def add_card(user_id, bldg_id, card_img, comments):\n\n card = Card(user_id=user_id,\n bldg_id=bldg_id,\n card_img=card_img,\n comments=comments)\n\n db.session.add(card)\n db.session.commit()", "title": "" }, { "docid": "f8ebc93d2a3d2822336bbcb5dc398a20", "score": "0.56773067", "text": "def __init__(self, name):\n self.name = name\n self.cardhand = cardhand.Cardhand()", "title": "" }, { "docid": "dc1c45ad409af387a5ce91d3d724a6ba", "score": "0.56681526", "text": "def cc_gen():\n\n max_amount = 10\n Valid_Cards = []\n\n # Ensure Proper Usage\n if len(sys.argv) > 3:\n sys.exit(\"Usage: cc_gen.py <card_type> <quantity(optional)\")\n if len(sys.argv) == 3:\n given = sys.argv[1]\n max_amount = int(sys.argv[2])\n if max_amount > 1000:\n sys.exit(\"Limit set to 1000 to prevent system hanging\")\n if len(sys.argv) == 2:\n given = sys.argv[1]\n max_amount = max_amount\n if len(sys.argv) == 1:\n given = input(\"Would you like an AMEX, Discover, Visa, or MasterCard: \")\n max_amount = max_amount\n\n # Reject Invalid Card Types\n card_types = [\"visa\", \"amex\", \"discover\", \"mastercard\"]\n if given.lower() not in card_types:\n sys.exit(\"Card Type {} not found\".format(given))\n\n # Generate Visa Card\n if given.lower() == \"visa\":\n card_start = 4\n card_length = 16 # can be set to 13 or 16\n\n # Generate AMEX Card\n if given.lower() == \"amex\":\n card_start = 3\n card_length = 15\n\n # Generate Discover Card\n if given.lower() == \"discover\":\n card_start = 6\n card_length = 16\n\n # Generate MasterCard\n if given.lower() == \"mastercard\":\n card_start = 5\n card_length = 16\n\n # Generate Numbers until Valid_Cards reaches Max_Amount\n while len(Valid_Cards) < max_amount:\n\n random_card = generate_number(card_start, card_length)\n\n # If Card is Valid And not apart of Valid_Cards\n if cc_check(random_card) and random_card not in Valid_Cards:\n print(\"Valid {} Generated: {}\".format(given, random_card))\n Valid_Cards.append(random_card)\n\n # Write out to file - Can be set to \"w\" to reset file or \"a\" to append file each run\n pathlib.Path('results').mkdir(parents=True, exist_ok=True)\n filename = given.lower()+\"_numbers.txt\"\n with open(\"./results/\"+filename, \"a\") as f:\n for number in Valid_Cards:\n print(number, file=f)", "title": "" }, { "docid": "18c4275ef2ceef9da664f6c693b6e661", "score": "0.5657641", "text": "def build_response_card(title, subtitle, options):\n buttons = None\n if options is not None:\n buttons = []\n for i in range(min(5, len(options))):\n buttons.append(options[i])\n\n return {\n 'contentType': 'application/vnd.amazonaws.card.generic',\n 'version': 1,\n 'genericAttachments': [{\n 'title': title,\n 'subTitle': subtitle,\n 'buttons': buttons\n }]\n }", "title": "" }, { "docid": "88f9f35dc798af680cf2b5174cfbc4b2", "score": "0.5638229", "text": "def create_card(self, column_id, lane_id, title, params={}):\n return self.__get_parent__().create_card(\n board_id = self.id,\n column_id = column_id,\n lane_id = lane_id,\n title = title,\n params = params,\n )", "title": "" }, { "docid": "908e3eb16cade807fab8b701a0a36671", "score": "0.5621438", "text": "def create_deck_with_cards(collection_id, deck_title, card_list):\n collection = Collection.objects.get(id=collection_id)\n deck = create_deck(collection_id, deck_title)\n add_cards_to_deck(deck, card_list)\n return deck", "title": "" }, { "docid": "ed35d53c8e1e9f8930165be5053c99d9", "score": "0.56137466", "text": "def test_create_new_deck(mock_deck):\n mock_deck.draw_card()\n assert len(mock_deck.cards) == 51\n mock_deck.create_new_deck()\n assert len(mock_deck.cards) == 52", "title": "" }, { "docid": "43be9651b3fab42d00881a6b8aeda369", "score": "0.56062186", "text": "def create(cls, shuffle=False):\n cards = [Card(s, r) for r in Card.RANKS for s in Card.SUITS]\n if shuffle:\n random.shuffle(cards)\n return cls(cards)", "title": "" }, { "docid": "9768cf24d938d4e199bc2190072922fc", "score": "0.558752", "text": "def card_builder():\n if request.method == 'POST':\n post_add_note_req()\n\n return render_template(\"card_builder.html\", user=current_user)", "title": "" }, { "docid": "f3934daddaa9507fb57876831b3f7bbf", "score": "0.55781317", "text": "def __init__(self, color, texture, shape, number):\n self.card_params = {\n \"color\": color,\n \"texture\": texture,\n \"number\": number,\n \"shape\": shape\n }", "title": "" }, { "docid": "29fa6205d16cde45c0c6c09665b6f5f2", "score": "0.55780095", "text": "def fill_card(self, target_fields, position):\n new_card = [\"BEGIN:VCARD\", \"VERSION:3.0\",]\n\n # Upper class promotion\n if random.randint(0,100) < frequency_of_noble:\n noble = random.choice([\"Von\", \"Van\", \"Zu\"])\n else:\n noble = ''\n\n # Fill in the fields\n if \"Name\" in target_fields:\n namefield = \"N:\"\n namefield += str(self.last_names[position % len(self.last_names)])\n namefield +=\";\"\n namefield += str(self.first_names[position % len(self.first_names)])\n namefield +=\";\"\n if noble:\n namefield += noble\n namefield +=\";\"\n new_card.append(namefield)\n\n if \"FullName\" in target_fields:\n fnamefield = \"FN:\"\n fnamefield += str(self.first_names[position % len(self.first_names)])\n fnamefield += \" \"\n if noble:\n fnamefield += noble\n fnamefield += \" \"\n fnamefield += str(self.last_names[position % len(self.last_names)])\n new_card.append(fnamefield)\n\n if \"Organization\" in target_fields and random.randint(0,100) < frequency_of_org:\n orgfield = \"ORG:\"\n orgfield += str(self.orgs[position % len(self.orgs)])\n new_card.append(orgfield)\n\n if \"Title\" in target_fields and random.randint(0,100) < frequency_of_title:\n titlefield = \"TITLE:\"\n titlefield += str(self.titles[position % len(self.titles)])\n new_card.append(titlefield)\n\n if \"Photo\" in target_fields and random.randint(0,100) < frequency_of_photo:\n filepath = \"faces/\"+str(random.randint(1,number_of_faces))+\".jpg\"\n with open(filepath, \"rb\") as imagefile:\n base64img = base64.b64encode(imagefile.read())\n photofield = \"PHOTO;TYPE=JPEG;ENCODING=b:\"\n photofield += base64img\n new_card.append(photofield)\n\n if \"Phone\" in target_fields:\n phonefield = \"TEL;WORK;VOICE:\"\n if random.randint(0,100) < 25:\n phonefield += \"00 41 \"\n elif random.randint(0,100) < 25:\n phonefield += \"+41 \"\n else:\n phonefield += \"0\"\n phonefield += \"7\"\n phonefield += \"%01d\" % random.randrange(6,9)\n phonefield += \" \"\n phonefield += \"%03d\" % random.randrange(100,999)\n phonefield += \" \"\n phonefield += \"%02d\" % random.randrange(10,99)\n phonefield += \" \"\n phonefield += \"%02d\" % random.randrange(10,99)\n new_card.append(phonefield)\n\n if \"Address\" in target_fields and random.randint(0,100) < frequency_of_address:\n addrfield = \"ADR;WORK:;;\"\n addrfield += str(self.streets[position % len(self.streets)])\n addrfield += \" \"\n addrfield += str(random.randrange(1,250))\n if random.randint(0,100) < 15:\n addrfield += random.choice([\"A\", \"B\", \"C\", \"a\", \"b\", \"c\", \".1\", \".2\", \"-A\", \"-B\", \"-C\"])\n addrfield += \";\"\n addrfield += str(self.city[position % len(self.city)])\n addrfield += \";\"\n addrfield += str(self.canton[position % len(self.canton)])\n addrfield += \";\"\n addrfield += str(self.plz[position % len(self.plz)])\n addrfield += \";Switzerland\"\n new_card.append(addrfield)\n\n if \"Email\" in target_fields:\n emailfield = \"EMAIL;PREF;INTERNET:\"\n special_char_map = {\n ord(u'Ä'): u'Ae',\n ord(u'Ü'): u'Ue',\n ord(u'Ö'): u'Oe',\n ord(u'ä'): u'ae',\n ord(u'ü'): u'ue',\n ord(u'ö'): u'oe',\n }\n firstname = unicode(str.lower(self.first_names[position % len(self.first_names)]), \"utf-8\").translate(special_char_map)\n lastname = unicode(str.lower(self.last_names[position % len(self.last_names)]), \"utf-8\").translate(special_char_map)\n if random.randint(0,100) < 50:\n emailfield += \"{}{}\".format(firstname, lastname)\n elif random.randint(0,100) < 50:\n emailfield += \"{}.{}\".format(firstname[0], lastname)\n elif random.randint(0,100) < 50:\n emailfield += \"{}{}\".format(firstname, str(random.randrange(75,99)))\n else:\n emailfield += lastname\n emailfield += \"@\"\n emailfield += str(self.domains[position % len(self.domains)])\n new_card.append(emailfield)\n\n if add_class_field:\n classfield = \"CLASS:vcard_generator\"\n new_card.append(classfield)\n\n # Create fake revision timestamp in the past up to two years ago\n now = datetime.datetime.now()\n random_rev_time = now - datetime.timedelta(seconds=random.randrange(0,60*60*24*30*12*2))\n new_card.append(\"REV:%s\" % random_rev_time.strftime('%Y%m%dT%H%M%SZ')) # revision date; format: 20140301T221110Z\n new_card.append(\"END:VCARD\")\n return new_card", "title": "" }, { "docid": "716a4387680ee811486085da82cb2e1a", "score": "0.5565083", "text": "def generate_cards(self):\n\t\tfull_deck = []\n\t\tfor num in range(0, 3): #0, 1, 2 for value\n\t\t\tfor size in range(1, 4): # 1, 2,3 for size\n\t\t\t\tfor shape in ['tuple', 'list', 'set']:\n\t\t\t\t\tfull_deck.append(Card(num, size, shape))\n\t\trandom.shuffle(full_deck)\n\t\treturn full_deck", "title": "" }, { "docid": "d2ef9877409d98dcb0395fca62a52675", "score": "0.5552666", "text": "def create(self, values):\n \n result = super(sale_subscription, self).create(values)\n\n partners_domain = [('id','=',result.partner_id.id)]\n all_partners = self.env['res.partner'].search(partners_domain, order='id desc')\n\n if len(all_partners) > 0:\n #for partner in all_partners:\n if not all_partners[0].barcode:\n a_barcode = '041'+\"\".join(choice(digits) for i in range(9))\n all_partners[0].write({\n 'barcode': a_barcode\n })\n\n the_partners_domain = [('id','=',result.partner_id.id)]\n the_all_partners = self.env['res.partner'].search(the_partners_domain, order='id desc')\n if len(the_all_partners) > 0:\n now = datetime.now()\n \n rfid_card = dict(None or {\n 'number': the_all_partners[0].barcode,\n 'contact_id': the_all_partners[0].id,\n 'cloud_card': False,\n 'activation_temp_date': result.recurring_next_date,\n 'activate_on': result.date_start,\n 'deactivate_on': result.date_start,\n })\n self.env['hr.rfid.card'].sudo().create(rfid_card)\n # result.write({\n # 'description_contrat': result.description,\n # 'description': False\n # })\n\n return result", "title": "" }, { "docid": "752b05fccf34a1b7123bb42aa1630f7f", "score": "0.55445766", "text": "def copycard(c1, c2):\n c1.value = c2.value\n c1.comment = c2.comment", "title": "" }, { "docid": "d03f4618f07a3837e804a38a615f34ba", "score": "0.5536615", "text": "def json_to_card(json_data):\n card_obj = Card(\n front=json_to_side(json_data['sides'][0]),\n back=json_to_side(json_data['sides'][1]),\n card_id=json_data['id']\n )\n\n return card_obj", "title": "" }, { "docid": "8ee6db6965a4733a6ae0449687af51cb", "score": "0.5521792", "text": "def getCard(name):\n query = session.query(DBCard).filter(DBCard.name == name)\n try:\n dbcard = query.one()\n return Card(name, dbcard.cost, dbcard.text, dbcard.power,\n dbcard.toughness, dbcard.loyalty)\n except:\n return Card(name)", "title": "" }, { "docid": "b2adb7a6b48710264b449d2f6e767951", "score": "0.5519448", "text": "def createNewDeck():\r\n deck = []\r\n for i in range(1,14):\r\n deck.append('C' + str(i))\r\n deck.append('D' + str(i))\r\n deck.append('H' + str(i))\r\n deck.append('S' + str(i))\r\n random.shuffle(deck) \r\n return deck", "title": "" }, { "docid": "9b6dd464db1bb0e9f275cf9e5be8c1da", "score": "0.5503389", "text": "def draw(self):\n card = self.deck.draw()\n self.cards.append(card)\n return card", "title": "" }, { "docid": "f2598a03836a52b1c622a21a8711a43c", "score": "0.54986686", "text": "def Create_C(size, group):\n crystal = Sp_class.Crystal()\n crystal.rect.x = random.randrange(size[0]-40)\n crystal.rect.y = random.randrange(-730,0)\n group.add(crystal)", "title": "" }, { "docid": "5933464991ade77935a46b052f22140e", "score": "0.54747045", "text": "def add_card(cls, card, comment=''):\n key = string(card, 1, 'key')\n\n if key == 'ARP_TOL':\n values = [\n double_or_blank(card, 2, 'value', 1e-6)\n ]\n elif key == 'AUTOSPC':\n # ['PARAM', 'AUTOSPC', 'Y', '1.0E-9', None, 'Y', 'Y']\n # enabled, tol, nset, info, print_\n enabled = string_or_blank(card, 2, 'ENABLED', 'Y')\n\n tol = double_or_blank(card, 3, 'TOL', 1e-6)\n nset = integer_or_blank(card, 4, 'NSET', 1)\n info = string_or_blank(card, 5, 'INFO', 'N')\n print_ = string_or_blank(card, 6, 'PRINT', 'N')\n\n assert enabled in {'Y', 'N'}, enabled\n assert info in {'Y', 'N'}, info\n assert print_ in {'Y', 'N'}, print_\n values = [\n enabled, tol, nset, info, print_\n ]\n elif key == 'ART_MASS':\n # ['PARAM', 'ART_MASS', 'Y', '0.', '1.E-6']\n values = [\n string_or_blank(card, 2, 'is_mass', 'N'),\n double_or_blank(card, 3, 'trans_mass', 1e-6),\n double_or_blank(card, 4, 'rot_mass', 1e-6),\n ]\n elif key == 'EIGNORM2':\n #['PARAM', 'EIGNORM2', 'Y']\n values = [string_or_blank(card, 2, 'enabled', 'N')]\n elif key == 'EQCHECK':\n #['PARAM', 'EQCHECK', '0', '3', '3', None, None, None, '-1.E10']\n values = [\n integer_or_blank(card, 2, 'refgrid', 0),\n integer_or_blank(card, 3, 'g-set', 0),\n integer_or_blank(card, 4, 'n-set', 0),\n integer_or_blank(card, 5, 'f-set', 0),\n integer_or_blank(card, 6, 'a-set', 0),\n integer_or_blank(card, 7, 'l-set', 0),\n double_or_blank(card, 8, 'grid_forces_tol', 1e-5),\n string_or_blank(card, 9, 'norm_forces', 'N'),\n ]\n elif key == 'GRDPNT':\n #['PARAM', 'GRDPNT', '0']\n values = [\n integer_or_blank(card, 2, 'GRDPNT', -1),\n ]\n elif key == 'MEFMLOC':\n # ['PARAM', 'MEFMLOC', 'CG']\n values = [string_or_blank(card, 2, 'enabled', 'GRDPNT')]\n assert values[0] in ['GRDPNT', 'CG', 'GRID'], values\n elif key == 'POST':\n #['PARAM', 'POST', '-1']\n values = [\n integer_or_blank(card, 2, 'POST', -1),\n ]\n elif key == 'RCONDK':\n #['PARAM', 'RCONDK', 'Y']\n values = [\n string_or_blank(card, 2, 'RCONDK', 'N'),\n ]\n elif key == 'SOLLIB':\n # ['PARAM', 'SOLLIB', 'LAPACK']\n values = [\n string_or_blank(card, 2, 'library', 'INTELMKL'),\n ]\n #SOLLIB; char; default=IntMKL\n #Denotes which library to use for matrix decomposition and equation solution. Options are:\n #IntMKL: Intel Math Kernel Library (matrices stored in sparse form)\n #LAPACK (matrices stored in band form)\n #YaleSMP: (matrices stored in sparse form) – not fully implemented in MYSTRAN\n assert values[0] in ['INTMKL', 'LAPACK', 'YALESMP'], values\n elif key == 'SPARSTOR':\n values = [\n string_or_blank(card, 2, 'storage', 'SYM'),\n ]\n assert values[0] in {'SYM', 'NONSYM'}, values\n elif key == 'WTMASS':\n #['PARAM', 'WTMASS', '.002591']\n values = [\n double_or_blank(card, 2, 'WTMASS', 1.0),\n ]\n\n # basic print flags\n elif key == 'PRTBASIC':\n #['PARAM', 'PRTBASIC', '1']\n values = [\n integer_or_blank(card, 2, 'PRTBASIC', 0),\n ]\n assert values[0] in [0, 1], values\n elif key == 'PRTCGLTM':\n #['PARAM', 'PRTCGLTM', '1']\n values = [\n integer_or_blank(card, 2, 'PRTCGLTM', 0),\n ]\n assert values[0] in [0, 1], values\n elif key == 'PRTDLR':\n #['PARAM', 'PRTDLR', '1']\n values = [\n integer_or_blank(card, 2, 'PRTDLR', 0),\n ]\n assert values[0] in [0, 1], values\n elif key == 'PRTGMN':\n #['PARAM', 'PRTGMN', '1']\n values = [\n integer_or_blank(card, 2, 'PRTGMN', 0),\n ]\n assert values[0] in [0, 1], values\n elif key == 'PRTGOA':\n #['PARAM', 'PRTGOA', '1']\n values = [\n integer_or_blank(card, 2, 'PRTGMN', 0),\n ]\n assert values[0] in [0, 1], values\n elif key == 'PRTSCP':\n #['PARAM', 'PRTSCP', '1']\n values = [\n integer_or_blank(card, 2, 'PRTSCP', 0),\n ]\n assert values[0] in [0, 1], values\n elif key == 'PRTTSET':\n #['PARAM', 'PRTTSET', '1']\n values = [\n integer_or_blank(card, 2, 'PRTTSET', 0),\n ]\n assert values[0] in [0, 1], values\n\n elif key == 'PRTHMN':\n #['PARAM', 'PRTHMN', '1']\n values = [\n integer_or_blank(card, 2, 'PRTHMN', 0),\n ]\n assert values[0] in [0, 1], values\n elif key == 'PRTIFLTM':\n #['PARAM', 'PRTIFLTM', '1']\n values = [\n integer_or_blank(card, 2, 'PRTIFLTM', 0),\n ]\n assert values[0] in [0, 1], values\n elif key == 'PRTKXX':\n #['PARAM', 'PRTKXX', '1']\n values = [\n integer_or_blank(card, 2, 'PRTKXX', 0),\n ]\n assert values[0] in [0, 1], values\n elif key == 'PRTMXX':\n #['PARAM', 'PRTMXX', '1']\n values = [\n integer_or_blank(card, 2, 'PRTMXX', 0),\n ]\n assert values[0] in [0, 1], values\n elif key == 'PRTPHIXA':\n #['PARAM', 'PRTPHIXA', '1']\n values = [\n integer_or_blank(card, 2, 'PRTPHIXA', 0),\n ]\n assert values[0] in [0, 1], values\n elif key == 'PRTPHIZL':\n #['PARAM', 'PRTPHIZL', '1']\n values = [\n integer_or_blank(card, 2, 'PRTPHIZL', 0),\n ]\n assert values[0] in [0, 1], values\n elif key == 'PRTQSYS':\n #['PARAM', 'PRTQSYS', '1']\n values = [\n integer_or_blank(card, 2, 'PRTQSYS', 0),\n ]\n assert values[0] in [0, 1], values\n elif key == 'PRTUO0':\n #['PARAM', 'PRTUO0', '1']\n values = [\n integer_or_blank(card, 2, 'PRTUO0', 0),\n ]\n assert values[0] in [0, 1], values\n elif key == 'PRTYS':\n #['PARAM', 'PRTYS', '1']\n values = [\n integer_or_blank(card, 2, 'PRTUO0', 0),\n ]\n assert values[0] in [0, 1], values\n # ----------------------------------\n # not basic print flags\n elif key == 'PRTCORD':\n #['PARAM', 'PRTCORD', '2']\n values = [\n integer_or_blank(card, 2, 'PRTCORD', 0),\n ]\n assert values[0] in [0, 1, 2], values # TODO: why is 2 allowed?\n elif key == 'PRTDOF':\n #['PARAM', 'PRTDOF', '1']\n values = [\n integer_or_blank(card, 2, 'PRTDOF', 0),\n ]\n assert values[0] in [0, 1, 2, 3], values\n elif key == 'PRTRMG':\n #['PARAM', 'PRTRMG', '3']\n values = [\n integer_or_blank(card, 2, 'PRTRMG', 0),\n ]\n assert values[0] in [0, 1, 2, 3], values\n\n\n elif key == 'PRTSTIFF':\n # ['PARAM', 'PRTSTIFF', '1', '3', '3', '3', '3']\n values = [\n integer(card, 2, 'PRTSTIFF-2'),\n integer(card, 3, 'PRTSTIFF-3'),\n integer(card, 4, 'PRTSTIFF-4'),\n integer(card, 5, 'PRTSTIFF-5'),\n integer(card, 6, 'PRTSTIFF-6'),\n ]\n for i, value in enumerate(values):\n assert value in [1, 3], f'i={i} values={values}'\n elif key == 'PRTSTIFD':\n # ['PARAM', 'PRTSTIFD', '1', '3', '3', '3', '3']\n values = [\n integer(card, 2, 'PRTSTIFD-2'),\n integer(card, 3, 'PRTSTIFD-3'),\n integer(card, 4, 'PRTSTIFD-4'),\n integer(card, 5, 'PRTSTIFD-5'),\n integer(card, 6, 'PRTSTIFD-6'),\n ]\n for i, value in enumerate(values):\n assert value in [1, 3], f'i={i} values={values}'\n elif key == 'PRTFOR':\n # ['PARAM', 'PRTFOR', '1', '3', '3', '3', '3']\n values = [\n integer(card, 2, 'PRTFOR-2'),\n integer(card, 3, 'PRTFOR-3'),\n integer(card, 4, 'PRTFOR-4'),\n integer(card, 5, 'PRTFOR-5'),\n integer(card, 6, 'PRTFOR-6'),\n ]\n for i, value in enumerate(values):\n assert value in [1, 3], f'i={i} values={values}'\n elif key == 'PRTMASS':\n # ['PARAM', 'PRTMASS', '1', '3', '3', '3', '3']\n values = [\n integer(card, 2, 'PRTMASS-2'),\n integer(card, 3, 'PRTMASS-3'),\n integer(card, 4, 'PRTMASS-4'),\n integer(card, 5, 'PRTMASS-5'),\n integer(card, 6, 'PRTMASS-6'),\n ]\n for i, value in enumerate(values):\n assert value in [1, 3], f'i={i} values={values}'\n else:\n raise NotImplementedError(card)\n\n return PARAM_MYSTRAN(key, values, comment=comment)", "title": "" }, { "docid": "58517c9f2ec2351820f3401450bf23b1", "score": "0.54651314", "text": "def generate_deck():\n suits = [BusGame.HEARTS, BusGame.DIAMONDS, BusGame.CLUBS, BusGame.SPADES]\n cards = list()\n for suit in suits:\n for rank in BusGame.RANKS:\n cards.append(BusCard(suit, rank))\n return cards", "title": "" }, { "docid": "b985a5027b02ac0e474992ef54fa00ce", "score": "0.546389", "text": "def arhive(self):\n return self.__update__( 'Card', { 'condition' : 2 } )", "title": "" }, { "docid": "f74be1369230d4553bfabede133bac94", "score": "0.5463683", "text": "def deck_creation():\n # Create a starting deck of 20 cards\n cards = [\n 'A',\n 'A',\n 'B',\n 'B',\n 'C',\n 'C',\n 'D',\n 'D',\n 'E',\n 'E',\n 'F',\n 'F',\n 'G',\n 'G',\n 'H',\n 'H',\n 'I',\n 'I',\n 'J',\n 'J']\n\n # Set a variable for a card list to modify\n selection_deck = cards\n # Set a variable for the deck used in the game\n game_deck = []\n game_deck_counter = 0\n\n # Build the game deck\n while (game_deck_counter < 20):\n ran_card = random.choice(cards)\n selection_deck.remove(ran_card)\n game_deck.extend(ran_card)\n game_deck_counter += 1\n return game_deck", "title": "" }, { "docid": "6eab9604edb93844028ab08b3986bc6a", "score": "0.54621804", "text": "def addAccountCard(self,card, accountId, responseFields = None):\r\n\r\n\t\turl = MozuUrl(\"/api/commerce/customer/accounts/{accountId}/cards?responseFields={responseFields}\", \"POST\", UrlLocation.TenantPod, False);\r\n\t\turl.formatUrl(\"accountId\", accountId);\r\n\t\turl.formatUrl(\"responseFields\", responseFields);\r\n\t\tself.client.withResourceUrl(url).withBody(card).execute();\r\n\t\treturn self.client.result();", "title": "" }, { "docid": "0dd0fbfadadf7e3d2d134287beb990f3", "score": "0.5461157", "text": "def create_charge(self, data: dict) -> Any:\n\n return self._post('/create/charge', data)", "title": "" }, { "docid": "874c04696cb582fafb96414fd488f585", "score": "0.545323", "text": "def create_or_update(self, business_id, credit_card_id=None):\n cc_number = self.cleaned_data['cc_number'].encode()\n exp_month = self.cleaned_data['exp_month']\n exp_year = self.cleaned_data['exp_year']\n cvv_number = self.cleaned_data['cvv_number']\n card_holder = self.cleaned_data['card_holder']\n try:\n credit_card = CreditCard.objects.get(id=credit_card_id,\n business__id=business_id)\n credit_card.exp_month = exp_month\n credit_card.exp_year = exp_year\n credit_card.card_holder = card_holder\n except CreditCard.DoesNotExist:\n credit_card = CreditCard(business_id=business_id, \n exp_month=exp_month, exp_year=exp_year, \n card_holder=card_holder)\n credit_card.cvv2 = cvv_number\n credit_card.cc_type = get_cc_type_from_number(cc_number)\n credit_card.encrypt_cc(cc_number)\n credit_card.is_storage_opt_in = True\n credit_card.save()\n return credit_card", "title": "" }, { "docid": "aacaa9fe59dc0bf44604c844787fbe8f", "score": "0.54424864", "text": "def __init__(self, cards):\n self.cards = cards", "title": "" }, { "docid": "aacaa9fe59dc0bf44604c844787fbe8f", "score": "0.54424864", "text": "def __init__(self, cards):\n self.cards = cards", "title": "" }, { "docid": "2d309a25490110c3a48228ac884affa4", "score": "0.54415655", "text": "def add_card(db_session: Session) -> Type[fs.State]:\n choice = input('1. Add a new flashcard\\n'\n '2. Exit\\n')\n if choice == '1':\n question = ''\n while not question:\n question = input('Question:\\n')\n answer = ''\n while not answer:\n answer = input('Answer:\\n')\n new_card = FlashCard(question=question, answer=answer)\n store_card_db(new_card, db_session)\n return fs.AddCard\n elif choice == '2':\n return fs.MainMenu\n else:\n print(f'{choice} is not an option\\n')\n return fs.AddCard", "title": "" }, { "docid": "a9ae474f158a5e13d4af9acbc7b9af80", "score": "0.5433984", "text": "def __init__(self,\n size: int,\n color: str\n ) -> None:\n\n self.color = color\n self.size = size\n self.deck = [Card(i,self.color) for i in range(2,self.size)]", "title": "" }, { "docid": "e164f0688084195a1c934730ff4a13e3", "score": "0.5422164", "text": "def __init__(self):\n card_combos = list(itul.product(*cfg.card_config.values())) # generate all combinations of attributes\n self.cards = [Card(*card_params) for card_params in card_combos] # create the cards and put them into the deck\n self.shuffle()", "title": "" }, { "docid": "0eeedc7a73472af7d169797a4385a695", "score": "0.5413044", "text": "def main():\n deck = Deck()\n print(deck)\n print(\"Drew:\", deck.draw_one_card())\n print(\"Drew:\", deck.draw_one_card())\n print(deck)", "title": "" }, { "docid": "8262997ffa67b06d20f18022f430a515", "score": "0.540642", "text": "def save_card():\n\n if 'user_token' not in session:\n return redirect('/')\n\n current_username = session['user_token']['username']\n user = User.query.filter_by(username=current_username).one()\n user_id = user.user_id\n\n bldg_id = request.args.get('bldg')\n card_img = request.args.get('url')\n\n comments = request.form.get('comments')\n\n not_unique = Card.query.filter_by(user_id=user_id).filter_by(bldg_id=bldg_id).first()\n\n if not not_unique:\n add_card(user_id, bldg_id, card_img, comments)\n return redirect('/dashboard')\n\n flash(\"You already have that card %d!\" % not_unique.card_id)\n return redirect('/')", "title": "" }, { "docid": "f433c449cc0cdcbcb0cd6e2dad685aa5", "score": "0.5406258", "text": "def make_div(self):\n res = dbc.Card([\n dbc.CardImg(src=get_encoded_img('img/' + self.id + '.jpg')),\n dbc.CardBody([\n html.H4([self.name, html.Br(), 'price: $' + str(self.price)]),\n dcc.Input(id=self.id, type='number', min = 0, step = 1, value=0)\n ])\n ], style={'width': '33%'})\n return res", "title": "" }, { "docid": "4e1ea2e821c4d70d6374e7843366919d", "score": "0.5401821", "text": "def build_deck(self):\n suits = ['Hearts', 'Diamonds', 'Spades', 'Clubs']\n ranks = {'2': 2, '3': 3, '4': 4, '5': 5, '6': 6, '7': 7,\n '8': 8, '9': 9, '10': 10, 'J': 10, 'Q': 10, 'K': 10, 'A': 11, }\n\n for suit in suits:\n for rank, value in ranks.items():\n card = Card(rank, value, suit)\n self.cards.append(card)", "title": "" }, { "docid": "4a4873e8a4056d23149511788fe31e33", "score": "0.54014957", "text": "def create_deck(self):\n self.cards = []\n path = os.path.join(tools.Image.path, \"cards\")\n for root, dirs, files in os.walk(path):\n for f in files:\n if f.endswith(\".png\"):\n path = os.path.abspath(os.path.join(root, f))\n image = pg.image.load(path)\n card = Card(path)\n if tools.get_category(card.path) != \"other\":\n self.cards.append(card)", "title": "" }, { "docid": "6c3ad597eaa588e953b28a2c0f521470", "score": "0.53953344", "text": "def _create_character(name, player):\n # Look for default values\n permissions = settings.PERMISSION_PLAYER_DEFAULT\n typeclass = settings.BASE_CHARACTER_TYPECLASS\n home = ObjectDB.objects.get_id(settings.DEFAULT_HOME)\n\n # Create the character\n character = create.create_object(typeclass, key=name, home=home,\n permissions=permissions)\n\n # Set playable character list\n player.db._playable_characters.append(character)\n\n # Allow only the character itself and the player to puppet it.\n character.locks.add(\"puppet:id(%i) or pid(%i) or perm(Immortals) \" \\\n \"or pperm(Immortals)\" % (character.id, player.id))\n\n # If no description is set, set a default description\n if not character.db.desc:\n character.db.desc = \"\"\n\n # We need to set this to have @ic auto-connect to this character.\n player.db._last_puppet = character\n\n return character", "title": "" }, { "docid": "54697d48e231915d244ef66014e80e2c", "score": "0.5391776", "text": "def generate_board(cards):\n\n # numbers = ['One', 'Two', 'Three']\n # colors = ['Red', 'Green', 'Purple']\n # shadings = ['Solid', 'Shaded', 'Hollow']\n # shapes = ['Squiggle', 'Oval', 'Diamond']\n\n assert len(cards) % 3 == 0, 'Must pass a list of cards with length \\\n that\\'s a multiple of 3'\n\n num_rows = len(cards) / 3\n\n canvas = Image.new('RGB', ((160+20)*3, (100+10)*num_rows-10), 'white')\n\n fnt = ImageFont.truetype('./Arial.ttf', 24)\n d = ImageDraw.Draw(canvas)\n\n i = 0\n for row in range(num_rows):\n for col in range(3):\n im = Image.open('cards/Card{}.png'.format(\n cards[i]\n ))\n canvas.paste(im, ((160+20)*col+20, (100+10)*row))\n d.text(((160+20)*col+10,(100+10)*row+5), chr(i+65), font=fnt, fill=(0,0,0,255))\n i += 1\n\n output = StringIO.StringIO()\n canvas.save(output, format='png')\n output_string = output.getvalue()\n output.close()\n\n return output_string", "title": "" }, { "docid": "b2b34ca36c7bd14dc26208df6be0a4c8", "score": "0.5386237", "text": "def add_card(handler_input, response):\n # type: (HandlerInput, Response) -> None\n response.card = SimpleCard(\n title=skill_name,\n content=convert_speech_to_text(response.output_speech.ssml))", "title": "" }, { "docid": "e4bfdfde4a838f3a0ce3c5a1a5d45b59", "score": "0.5349683", "text": "def createMonsterCard(self):\n card_dict = {}\n card_dict['name'] = self.card_info['card']['name']\n card_dict['id'] = self.card_info['card']['number']\n card_dict['type'] = self.card_info['card']['type']\n card_dict['mon_type'] = self.card_info['card']['monster_types']\n card_dict['species'] = self.card_info['card']['species']\n card_dict['attr'] = self.card_info['card']['attribute']\n card_dict['level'] = self.card_info['card']['stars']\n card_dict['need_materials'] = self.card_info['card']['has_materials']\n card_dict['need_name'] = self.card_info['card']['has_name_condition']\n card_dict['desc'] = self.card_info['card']['text']\n card_dict['atk'] = self.card_info['card']['attack']\n card_dict['def'] = self.card_info['card']['defense']\n card_dict['desc'] = self.card_info['card']['text']\n card_dict['legality'] = self.card_info['card']['legality']\n card_dict['releases'] = self.card_info['card']['releases']\n card_dict['img'] = self.card_info['card']['image_path']\n card_dict['thumb'] = self.card['card']['thumbnail_path']\n # card_dict['is_extra_deck'] = self.card_info['card']['is_extra_deck']\n # card_dict['is_fusion'] = self.card_info['card']['is_fusion']\n # card_dict['pendulum'] = self.card_info['card']['is_illegal']\n # card_dict['is_link'] = self.card_info['card']['is_link']\n\n return card_dict", "title": "" }, { "docid": "72beb4859e117fcafa417edc9b91a57c", "score": "0.53331697", "text": "def choose_dialog():\n title = random.choice(WASSEPUR_DIALOGUES)\n color = 0xF5C518\n card = make_card(title=title, color=color, thumbnail=THUMBNAIL)\n return card", "title": "" }, { "docid": "c57c9e7a297570f659aa0dda20c10b44", "score": "0.53191173", "text": "def addCard(self,card):\n self.deck.insert(0,card)", "title": "" }, { "docid": "b21f013e26b4b516a4cda695d4f2633b", "score": "0.53180295", "text": "def create_crib(self, request):\n user = request.user\n course_id = request.data[\"course\"]\n try:\n Course.objects.get(id=course_id)\n except Course.DoesNotExist as e:\n logger.exception(e)\n return Response(str(e), status.HTTP_404_NOT_FOUND)\n\n check = self._is_registered(course_id, user)\n if check is not True:\n return check\n\n serializer = self.get_serializer(data=request.data)\n if serializer.is_valid():\n serializer.save()\n return Response(serializer.data, status=status.HTTP_201_CREATED)\n errors = serializer.errors\n logger.error(errors)\n return Response(errors, status=status.HTTP_400_BAD_REQUEST)", "title": "" }, { "docid": "b9052cf84ac509cbf55b82f3cafc356b", "score": "0.53170234", "text": "def get_new_card(hand: list):\n card = poker_list.pop(random.randint(0, len(poker_list) - 1))\n hand.append(card)", "title": "" }, { "docid": "6984ebef77ba8368430c78aa29f2024e", "score": "0.53132933", "text": "def a_draw_card(self):\n # Check if you are allowed to draw a card\n if self.card_to_draw == 0:\n return\n card = self.a_get_top_card()\n self.add_log(\"You draw a card {}.\".format(card.name))\n self.hand.add_card(card)\n self.send_message(3)\n self.card_to_draw -= 1\n self.refresh_screen()", "title": "" }, { "docid": "0c23a71098ef096dc76b76488c569e64", "score": "0.5308627", "text": "def create():", "title": "" }, { "docid": "0c23a71098ef096dc76b76488c569e64", "score": "0.5308627", "text": "def create():", "title": "" }, { "docid": "0c23a71098ef096dc76b76488c569e64", "score": "0.5308627", "text": "def create():", "title": "" }, { "docid": "811aaaa1b2194f0956840d11877865fa", "score": "0.53026354", "text": "def assignCardRender(number, shape, color, fill_type):\r\n blank_card = Surface((150, 175))\r\n blank_card.fill((255, 255, 255))\r\n if fill_type < 3:\r\n for n in numberCoord[number]:\r\n shapes[shape][0](blank_card, colors[color], shapes[shape][1](n, (110, 40)), fill_type - 1)\r\n else:\r\n for n in numberCoord[number]:\r\n shapes[shape][0](blank_card, colors[color], shapes[shape][1](n, (110, 40)))\r\n drawStripes(blank_card)\r\n return blank_card", "title": "" }, { "docid": "ee4c25b9ffe1b909ce6a953c20b5e611", "score": "0.53024095", "text": "def create_coSpace(self, payload={}):\n return self.__open__(\"coSpaces\", payload=payload, HTTPmethod='POST')", "title": "" }, { "docid": "a875e1ef7df11a9b3f883681ea69ce6f", "score": "0.5301902", "text": "def add_card(self, card):\n if type(card) is tuple and len(card) == 2:\n new_card = Card(\n front=card[0],\n back=card[1],\n user_id=self.user_id\n )\n self.cards.append(new_card)", "title": "" }, { "docid": "f5b20d27ec2ff94002849647bfdfc4e5", "score": "0.5298751", "text": "def handle_add_card(self, kernel, requestor, new_card, area, gamestate):\n pass", "title": "" }, { "docid": "8e645c15c0d01e903fe0251bcf996092", "score": "0.5296386", "text": "def test_add_credit_card(self):\n # Create a new empty password database.\n self._init_database(self.dbname)\n\n # Add a new credit-card entry.\n with cli_context([\n 'storepass-cli', '-f', self.dbname, 'add', '--type',\n 'credit-card', '--description', 'E1 description', '--notes',\n 'E1 notes', '--card-type', 'E1 card type', '--card-number',\n 'E1 card number', '--expiry-date', 'E1 expiry date', '--ccv',\n 'E1 CCV', '--pin', 'E1 PIN', 'E1 name'\n ]) as cli_mock:\n cli_mock.getpass.return_value = DEFAULT_PASSWORD\n res = storepass.cli.__main__.main()\n self.assertEqual(res, 0)\n cli_mock.getpass.assert_called_once()\n self.assertEqual(cli_mock.stdout.getvalue(), \"\")\n self.assertEqual(cli_mock.stderr.getvalue(), \"\")\n\n # Read the database and dump its XML content.\n with cli_context(['storepass-cli', '-f', self.dbname,\n 'dump']) as cli_mock:\n cli_mock.getpass.return_value = DEFAULT_PASSWORD\n res = storepass.cli.__main__.main()\n self.assertEqual(res, 0)\n cli_mock.getpass.assert_called_once()\n self.assertRegex(\n cli_mock.stdout.getvalue(),\n utils.dedent(\"\"\"\\\n ^<\\\\?xml version='1\\\\.0' encoding='UTF-8'\\\\?>\n <revelationdata dataversion=\"1\">\n \\t<entry type=\"creditcard\">\n \\t\\t<name>E1 name</name>\n \\t\\t<description>E1 description</description>\n \\t\\t<updated>[0-9]+</updated>\n \\t\\t<notes>E1 notes</notes>\n \\t\\t<field id=\"creditcard-cardtype\">E1 card type</field>\n \\t\\t<field id=\"creditcard-cardnumber\">E1 card number</field>\n \\t\\t<field id=\"creditcard-expirydate\">E1 expiry date</field>\n \\t\\t<field id=\"creditcard-ccv\">E1 CCV</field>\n \\t\\t<field id=\"generic-pin\">E1 PIN</field>\n \\t</entry>\n </revelationdata>\n $\"\"\"))\n self.assertEqual(cli_mock.stderr.getvalue(), \"\")", "title": "" }, { "docid": "044b61bbabdfb019987bca7421c1d386", "score": "0.5290399", "text": "def flashcard():\r\n return Flashcard(\"question\", \"answer\", 0, 0)", "title": "" }, { "docid": "f3139c8edbae10bcf066a41c9627769a", "score": "0.5289123", "text": "def card_library():\n return render_template(\"card_library.html\", user=current_user)", "title": "" }, { "docid": "782533ed3d5013afc97d9a26db3aa1c8", "score": "0.52824295", "text": "def add_card(self, Lists, name, desc, due):\n url = self.url_prefix+\"cards\"\n query = self.query\n query[\"name\"] = name\n query[\"idList\"] = Lists[0].id\n query[\"due\"] = due\n query[\"desc\"] = desc\n response = requests.post(url, params=query)\n response.raise_for_status()", "title": "" }, { "docid": "0bf8af6efd1390a292858fb7b0d9bc68", "score": "0.52737033", "text": "def addCard(self, name, player=0, faceDown=False): \n # svg file of the card graphics\n if faceDown:\n svgFile = self.cardSvgFile(self.deckBackSVG) \n else:\n svgFile = self.cardSvgFile(name)\n \n # create CardGraphicsItem instance\n ind = len(self.getCardsList()) + 1\n tmp = CardGraphicsItem(name, ind, svgFile, player, faceDown) \n tmp.setScale(self.defScale)\n tmp.setZValue(ind) # set ZValue as index (last in is up) \n# self.cardsGraphItems.append(tmp)\n self.scene.addItem(tmp)\n # sanity check\n \n #print(\"num of cards=\" + str(len(self.cardsList)))", "title": "" }, { "docid": "f1b711ae43543fcf2099110b85d9c050", "score": "0.5271539", "text": "def test_add(self):\n self.plr.piles[Piles.DISCARD].set()\n card = self.game[\"Copper\"].remove()\n self.plr.add_card(card, Piles.DISCARD)\n self.assertEqual(card.location, Piles.DISCARD)\n self.assertIsNotNone(self.plr.piles[Piles.DISCARD][\"Copper\"])", "title": "" }, { "docid": "92fa440a7897510ec31822242f5ceec8", "score": "0.5263447", "text": "def draw_card(self):\n return self.deck.draw()", "title": "" }, { "docid": "f719c703c2c64020ad218ce2d173c77f", "score": "0.5260941", "text": "def generate_hand(deck):\n return Hand([draw_card_from_deck(deck), draw_card_from_deck(deck)])", "title": "" }, { "docid": "ef973829d685aa907bcecd950a8b09f2", "score": "0.5252817", "text": "def newStandardDeck():\n values = [1,2,3,4,5,6,7,8,9,10,11,12,13]\n deck = []\n for i in values:\n deck.append(card(i,'hearts'))\n for i in values:\n deck.append(card(i,'clubs'))\n values.reverse()\n for i in values:\n deck.append(card(i,'diamonds'))\n for i in values:\n deck.append(card(i,'spades'))\n return deck", "title": "" } ]
33d0e9c7beacc6d5cd3bf11770cd25e4
Loads all data from file as a list of lines
[ { "docid": "6e588041fdd09e5400429f132a83d461", "score": "0.7420914", "text": "def loadLines(callingFile):\n\n filePath = File.getRealPath(callingFile) + \"/data.txt\"\n\n with open(filePath, 'r') as file:\n data = file.readlines()\n\n return data", "title": "" } ]
[ { "docid": "a1ad9c21c9802b1e0e18c648191b0541", "score": "0.780131", "text": "def load_lines(filename):\n try:\n in_handler = open(filename)\n lines = [l.strip() for l in in_handler.readlines()]\n in_handler.close()\n return lines \n except Exception, e:\n sys.exit(str(e))", "title": "" }, { "docid": "cc301f7faf85eb8555e8ab651714d20f", "score": "0.75988317", "text": "def load_file(self, filename):\n input_data = []\n with open(filename, 'r') as inp:\n for line in inp:\n input_data.append(self.process_line(line.strip()))\n\n return input_data", "title": "" }, { "docid": "77b354545f5f3eeca3a27f0931262827", "score": "0.7558631", "text": "def get_data_from_file(self) -> list:\n with open(self.filename, \"r+\") as file:\n return file.readlines()", "title": "" }, { "docid": "3ec318cadc54f5d43debcdf0c0bd0313", "score": "0.75509477", "text": "def load(self) -> List[str]:\n with open(self.path) as file:\n return [line.strip() for line in file]", "title": "" }, { "docid": "99c868f0faf44358f21dd62faa04c4c2", "score": "0.7437211", "text": "def load_list(file_path):\n \n textlist = []\n with open(file_path) as file_handler:\n line = file_handler.readline()\n while line:\n textlist.append(line)\n line = file_handler.readline()\n return textlist", "title": "" }, { "docid": "7493c1db7d220ff7d503730270574a05", "score": "0.7360157", "text": "def read_lines(file):\r\n with open(file) as infile:\r\n lines_list = infile.readlines()\r\n return lines_list", "title": "" }, { "docid": "dc9c84539eac6829007c098e6d873c02", "score": "0.73538244", "text": "def load(file):\n txt = open(file, 'r')\n lines = txt.readlines()\n #print(lines, len(lines))\n return(lines)", "title": "" }, { "docid": "57c893a744e094643fc8e6a7d15abdcf", "score": "0.7337975", "text": "def read_file_all_lines(file_name):\n with open(file_name, \"r\") as f:\n data = f.read().splitlines()\n return data", "title": "" }, { "docid": "6698aa15987488d5381484edca0c555c", "score": "0.7322593", "text": "def load_data(path):\n data_file = open(path,'r')\n lines = data_file.readlines()\n data_file.close() \n return lines", "title": "" }, { "docid": "029aba051de9e5cbddcedb4e62578e2e", "score": "0.728166", "text": "def lines(self):\n with open(self.fn, \"rt\", encoding='latin-1') as f:\n lines = f.read().splitlines()\n return list(lines)", "title": "" }, { "docid": "5f2c80ac93a634dca509103d6566e2b4", "score": "0.7272746", "text": "def load() -> List[str]:\n data = []\n fullpath = get_full_path()\n if not os.path.exists(fullpath):\n return data\n\n with open(fullpath, \"r\") as fin:\n entries = fin.readlines()\n for entry in entries:\n data.append(entry)\n return data", "title": "" }, { "docid": "076d5c72b2d966c62d531c579ad7e330", "score": "0.72635835", "text": "def __read_file(self):\r\n\r\n with open(self.file) as f:\r\n return [line.rstrip() for line in f]", "title": "" }, { "docid": "558671318c2be151962e2924d96779be", "score": "0.71903586", "text": "def _read_file_by_lines(filename):\n with open(filename, \"r\") as f:\n return f.read().splitlines()", "title": "" }, { "docid": "3449e05e01dfdfed3d945d3776636b9b", "score": "0.7166112", "text": "def read_file_lines(file_: Text) -> List[Text]:\n with open(file_, \"r\") as s:\n return [line.strip() for line in s.readlines()]", "title": "" }, { "docid": "abb31e1f8772fcc9cc542511063597aa", "score": "0.7158105", "text": "def read_lines(fname):\n lines = [line.rstrip() for line in open(fname, 'r')]\n return lines", "title": "" }, { "docid": "c30c4c022a15cbb735e810620df0bbc5", "score": "0.71172035", "text": "def read_list(filename):\n with open(filename, \"r\") as file_:\n all_lines = file_.read().splitlines()\n return all_lines", "title": "" }, { "docid": "8ae473d7c38b361ba7641bbcfd8eef45", "score": "0.707663", "text": "def __load_list(self, filename):\n return [\n item.strip()\n for item\n in open(filename, \"r\").readlines()\n if item.strip() != \"\"\n ]", "title": "" }, { "docid": "68bab3a859e4ac786b98599c841cf388", "score": "0.7066947", "text": "def get_line_lst(filename):\n lst = []\n with open(filename) as fp:\n for line in fp:\n lst.append(line)\n\n return lst", "title": "" }, { "docid": "aea4ab1d4ea540e4e6c75d2aa94a3bd0", "score": "0.7053396", "text": "def listfromfilelines(file):\n with open(file, 'r') as f:\n list = [line.strip() for line in f]\n return list", "title": "" }, { "docid": "471094d307c64b410980fc828bc4b499", "score": "0.7050902", "text": "def readlines(self):\r\n lines = []\r\n with self.open('r') as file:\r\n lines = file.readlines()\r\n return lines", "title": "" }, { "docid": "048fdc197e6473413fac1b1d5c03d1fa", "score": "0.703255", "text": "def _list_from_file(file: Union[str, Path]) -> list:\n with open(file, 'rt') as f:\n return f.readlines()", "title": "" }, { "docid": "08a994bc574f7c6ddbd1d049f17c7c1d", "score": "0.70158255", "text": "def readlines(self, filepath):\n return self.read(filepath).split('\\n')", "title": "" }, { "docid": "9a232cde0aa26f9cd28f9393b13c2858", "score": "0.70076734", "text": "def input_lines(file):\n return open(file).read().splitlines()", "title": "" }, { "docid": "721c00f4636e8c795f558c6f93ab7ff8", "score": "0.69935286", "text": "def read_lines(self):\n if self._file_handle is None:\n return\n\n lines = self._file_handle.read().strip().split('\\n')\n\n self.process_lines(lines)", "title": "" }, { "docid": "a1abdba24fc93a1e80d8618c85463507", "score": "0.6984638", "text": "def _readlines(self):\n try:\n with open(self.fname, 'rb') as fp:\n fdata = fp.read()\n except (IOError, OSError):\n return []\n\n udata = fdata.decode(errors='ignore')\n\n return six.moves.filter(None, self.LINES_RE.findall(udata))", "title": "" }, { "docid": "f70460a45f795d441fd00fa44a57f727", "score": "0.69745225", "text": "def load_list(filename):\n f = open(filename)\n content = [x.strip() for x in f.readlines()]\n f.close()\n return content", "title": "" }, { "docid": "cac7157e2564981490cbafc098bdada5", "score": "0.6937585", "text": "def lines(fpath):\n with open(fpath, 'r') as fhandle:\n return fhandle.readlines()", "title": "" }, { "docid": "05226c62730e76f9038f4ad0db6b915a", "score": "0.69305265", "text": "def load_data(file_path):\n import csv\n with file(file_path) as f:\n dialect = csv.Sniffer().sniff(f.read(2048))\n f.seek(0)\n reader = csv.reader(f, dialect)\n return [l for l in reader]", "title": "" }, { "docid": "05226c62730e76f9038f4ad0db6b915a", "score": "0.69305265", "text": "def load_data(file_path):\n import csv\n with file(file_path) as f:\n dialect = csv.Sniffer().sniff(f.read(2048))\n f.seek(0)\n reader = csv.reader(f, dialect)\n return [l for l in reader]", "title": "" }, { "docid": "d78dceb9b28be7baa9b143498c1f1dfa", "score": "0.692706", "text": "def load_file_lines(option_value):\n with open(option_value, 'U') as f:\n return [line.strip() for line in f]", "title": "" }, { "docid": "3d077f5f3de0b11c603081561a74efe6", "score": "0.6904666", "text": "def load_data(filename):\n # Open file to read\n with open(filename) as f: # f is a file object\n for line in f: # Read each line as text\n print(int(line)) # Convert to integer and append to the list", "title": "" }, { "docid": "d088b28f56673dbed345498fa1565456", "score": "0.68955743", "text": "def load_text_file(filename):\n filepath = os.path.join(\"data\", filename)\n with open(filepath, 'r') as fin:\n ret = fin.readlines()\n return ret", "title": "" }, { "docid": "9792f453adf752259f512c2fbf193d27", "score": "0.6856483", "text": "def load_file(f_path, encoding=encoding.UTF8.value):\n\n raw_lines = []\n with open(f_path, 'r', encoding=encoding) as f_handle:\n for raw_line in f_handle:\n # split line into tokens\n raw_lines.append(raw_line.split())\n\n print(\"[INFO] Extracted {}, len(raw_lines) = {}\".format(f_path, len(raw_lines)))\n return raw_lines", "title": "" }, { "docid": "b4c455f5201304421531e24312169583", "score": "0.68284345", "text": "def load_data(self, path=\"\"):\n with open(path, \"r\", encoding=\"utf-8\") as fp:\n self._data = list(csv.reader(fp, dialect=csv.unix_dialect))", "title": "" }, { "docid": "7ee3e1be79e64c69fb564c52f83c2c1f", "score": "0.68238807", "text": "def read_data(self, filename):\n with open(filename, 'r') as f:\n sentence_lines = f.read().split(\"\\n\\n\")\n return sentence_lines", "title": "" }, { "docid": "201822e444b99cd77d60cf9ef3e4e5df", "score": "0.6816733", "text": "def load_file(file_name):\n f = open(file_name, encoding='utf8')\n split_lst = f.readlines()\n return split_lst", "title": "" }, { "docid": "fe30cbcfbf30a690aba1ad5db69e1e66", "score": "0.67951024", "text": "def read_lines(fpath: str, **kwargs) -> list:\n res = []\n with open(fpath, **kwargs) as f:\n for line in f:\n line = line.strip()\n if len(line) == 0:\n continue\n res.append(line)\n return res", "title": "" }, { "docid": "c094cf109a6ab00557bf756a1aef6120", "score": "0.67922026", "text": "def __readFile(self, filePath):\n lines = []\n if filePath == None:\n return lines\n file = open(filePath)\n for line in file:\n lines.append(line.strip())\n file.close()\n # Debug printing\n if _SHOW_DEBUG_FOR_READ_FILE:\n print('Reading file: ' + filePath)\n for each in lines:\n print(each)\n return lines", "title": "" }, { "docid": "8397ddee0773ae71b005496f32f73eb9", "score": "0.6785665", "text": "def parse_lines_from_file(lineFile):\n linelist = []\n with open(lineFile) as f:\n rawlines = f.readlines()\n for linename in rawlines:\n linename = linename.rstrip()\n linelist.append(linename)\n return linelist", "title": "" }, { "docid": "7099f9222c70b423e02e37611ce1d634", "score": "0.6772512", "text": "def load_file(filename: str) -> list:\n with open(filename) as f:\n entries = f.readlines()\n entries = [int(e) for e in entries]\n return entries", "title": "" }, { "docid": "8f09d073369bb9ffe0ddf733a640cf7b", "score": "0.67685", "text": "def load_file_as_list(file_path: str) -> list:\n _file = \"\"\n try:\n with open(file_path, \"r\") as fp:\n _file = fp.readlines()\n return _file\n except Exception:\n raise Exception(f\"Error reading file at: {file_path}\")", "title": "" }, { "docid": "083ad6efd3edc242293f30a82180c377", "score": "0.6767291", "text": "def load_list(filename):\n filepath = os.path.join(self._path, filename)\n with open(filepath) as fileobj:\n return [os.path.join(self._path, line.strip()) for line in fileobj]", "title": "" }, { "docid": "9bc37d90cee67061b63c5b53670325e3", "score": "0.6759414", "text": "def read_lines_from_file(file_name):\n with open(file_name, 'r') as file:\n lines = file.readlines()\n return lines", "title": "" }, { "docid": "61f31aa3d620f7f42440052c84dff81b", "score": "0.6742253", "text": "def get_data():\n r = []\n with open(source, \"r\") as d:\n for line in d:\n line.strip()\n r.append(line)\n return r", "title": "" }, { "docid": "1d2c315b48f8216e1c53545a9c0ddccb", "score": "0.6738643", "text": "def read_file(file_data):\n\n # create an empty list to store the data\n data = []\n\n # read the first line, values are separated by a semicolon\n line = file_data.readline().split(';')\n\n # add all lines to the list\n while line != ['']:\n line = map(lambda s: s.strip(), line) # erase the blanks in the string\n if line[-1] == '': # fixes a bug for one file (NOMAD) where there is an extra semicolon at the end of each line\n line = line[:-1]\n data.append(line) # add the line to the array\n line = file_data.readline().split(';') # get the next line\n\n return data", "title": "" }, { "docid": "ad4a9ee55e00fbe9f1401792095e97d4", "score": "0.67355794", "text": "def readlines(filename):\n with open(filename, 'r') as f:\n lines = f.read().splitlines()\n return lines", "title": "" }, { "docid": "ad4a9ee55e00fbe9f1401792095e97d4", "score": "0.67355794", "text": "def readlines(filename):\n with open(filename, 'r') as f:\n lines = f.read().splitlines()\n return lines", "title": "" }, { "docid": "51ce051e8e4ddbde794c3f04825556c9", "score": "0.67204726", "text": "def _load_tpn_lines(fname):\n lines = []\n append = False\n for line in open(fname):\n line = line.strip()\n if line.startswith(\"#\") or not line:\n continue\n if append:\n lines[-1] = lines[-1][:-1].strip() + line\n else:\n lines.append(line)\n append = line.endswith(\"\\\\\")\n return lines", "title": "" }, { "docid": "c6834bb70231e1b0d4195420a877fa29", "score": "0.6718322", "text": "def read_file(src_file):\n lines = []\n try:\n file = open(src_file, 'r')\n lines = file.read().splitlines()\n file.close()\n except Exception:\n pass\n finally:\n return lines", "title": "" }, { "docid": "d11514255a757275ac025c22699e0cea", "score": "0.6714825", "text": "def load_file_as_list(filepath: str) -> List[str]:\n with open_read_text(filepath) as f:\n contents = f.readlines()\n return contents", "title": "" }, { "docid": "41c8814131e90f8c80aabd348b2a1c4f", "score": "0.6711399", "text": "def read_by_lines(path, encoding=\"utf-8\"):\n result = list()\n with open(path, \"r\",encoding=encoding) as infile:\n for line in infile:\n result.append(line.strip())\n return result", "title": "" }, { "docid": "256ab308fd449eae393a86a30f39e88c", "score": "0.66793376", "text": "def read_data_from_file(file_name, list_of_rows):\n import pickle\n\n file_unpickle = open(file_name, \"rb\")\n list_of_rows = pickle.load(file_unpickle)\n file_unpickle.close()\n return list_of_rows", "title": "" }, { "docid": "b0c54bc78c229e3bd9b9bd8c431e97f2", "score": "0.6672024", "text": "def load(name):\r\n data = []\r\n file_name = get_path(name)\r\n if os.path.exists(file_name):\r\n with open(file_name) as fin:\r\n for entry in fin.readlines():\r\n data.append(entry.rstrip())\r\n\r\n return data", "title": "" }, { "docid": "4fded142a5dbed5971ddc960c59626e5", "score": "0.6670166", "text": "def get_lines(filename: str) -> list:\n try:\n with open(filename, \"r\") as f:\n return f.readlines()\n except ValueError:\n return [\"\"]", "title": "" }, { "docid": "8f6736576a2d3ad5244976eaf99b85b9", "score": "0.6668243", "text": "def __read(self):\n with open(self.file_sc, 'r') as r: orig = r.readlines()\n self.lines = list()\n for raw in orig:\n if raw.strip().startswith('#'): continue\n self.lines.append(raw.strip())", "title": "" }, { "docid": "291fdc24ec0eb2f63304538dacbe4ddf", "score": "0.66618323", "text": "def _read_file(cls, input_file):\n with open(input_file, \"r\", encoding=\"utf-8\") as f:\n return f.readlines()", "title": "" }, { "docid": "e14a016e89d4bc13288bb13ad1b3555f", "score": "0.6653559", "text": "def read_file_lines(file_):\n if type(file_) is str:\n try:\n file_ = open(file_, 'r')\n except IOError:\n logging.warning('file does not exist (can not read): ' + file_)\n return []\n cont_str = file_.read()\n file_.close()\n else:\n cont_str = file_.read()\n return [url_str.rstrip() for url_str in cont_str.splitlines()]", "title": "" }, { "docid": "f5b412f78784941e7a0c8cfacaff185f", "score": "0.6649842", "text": "def read_lines(file_name):\n buf = []\n with codecs.open(file_name, 'rb', 'UTF-8') as fh:\n for line in fh:\n buf.append(line)\n return buf", "title": "" }, { "docid": "8e93400aa4a590a093eda49ba72098f0", "score": "0.66436136", "text": "def read_file(p_file:str)->list:\n lines = []\n try:\n with open(p_file,encoding=\"utf-8\") as fp:\n for line in fp:\n line = line.strip()\n if len(line) == 0:\n continue\n lines.append(line)\n except Exception as e:\n logger.error(f\"Exception reading file {p_file}, Exception {e}\")\n return lines", "title": "" }, { "docid": "5effa1ccb847fc2d5e99e7f6dc2d4bf3", "score": "0.6615406", "text": "def read_file(self):\n with open(self.path, 'r') as rfile:\n lines = rfile.readlines()\n return lines", "title": "" }, { "docid": "bc53b3bdf66a7896c8e9ea60894980cc", "score": "0.66123", "text": "def data_parser(filepath):\n\n d = [list(line)[:-1] for line in open(filepath)]\n\n return d", "title": "" }, { "docid": "1fb9051bef8d71cf0bdf2ee52d09b058", "score": "0.6605385", "text": "def load_file(filename: str, directory: str) -> List[List[str]]:\n path = '/'.join((directory, filename))\n data_bytes = pkg_resources.resource_string(__name__, path)\n data = data_bytes.decode('UTF-8', 'replace')\n rows = []\n for line in data.split('\\n'):\n if line.startswith('#') or len(line) == 0:\n continue\n s = line.split(', ')\n rows.append(s)\n return rows", "title": "" }, { "docid": "391ec64faacdec50ec4fc95fcff2988b", "score": "0.6598473", "text": "def import_file(cls):\n\t\tcls.core_lab = []\n\n\t\twith open(Data.file) as f:\n\t\t\tfor line in f:\n\t\t\t\tData.core_lab.append(list(line))\n\n\t\t\t\tif '\\n' in Data.core_lab[-1]:\n\t\t\t\t\tData.core_lab[-1].remove('\\n')\n\t\treturn Data.core_lab", "title": "" }, { "docid": "7b1cdb46531eac99f883a64604a295ea", "score": "0.65909773", "text": "def _read_file(self):\n with open(self.in_file, 'r') as file:\n return file.readlines()", "title": "" }, { "docid": "40ef5dea26edf588fc94155128998135", "score": "0.6581492", "text": "def lines(self):\n if not self.path:\n raise ValueError('Path to input file is None')\n return _read_lines(self.path)", "title": "" }, { "docid": "5acd3d57898edff0644d2ab9b79b6838", "score": "0.65658134", "text": "def _read_lines(self, fname):\n path = os.path.join(self.config_dir, fname)\n lines = open(path).readlines()\n lines.reverse()\n return lines", "title": "" }, { "docid": "0c5a49942b787b810d77b639a65693cd", "score": "0.6565063", "text": "def read_list(fname):\n content = read(fname)\n retval = list(filter(None, content.split('\\n')))\n return retval", "title": "" }, { "docid": "f05796bf921869fead9d5925d162a7ba", "score": "0.65546584", "text": "def SafeReadLines(filename):\n lines = []\n with open(filename) as f:\n for line in f.readlines():\n lines.append(line.strip())\n return lines", "title": "" }, { "docid": "7a70a5c38e51c1b9208ebca603bd629e", "score": "0.65507513", "text": "def _read_file(cls, input_file, skip_first_line=False):\n with open(input_file, \"r\", encoding=\"utf-8\") as f:\n lines = []\n for i, line in enumerate(f):\n if skip_first_line:\n if i == 0:\n continue\n\n lines.append(line.strip())\n return lines", "title": "" }, { "docid": "148e331b0a178e096b170c2f95eb15c0", "score": "0.6549505", "text": "def load_data(fname):\n f = open(fname, \"r\")\n data = f.readlines()\n f.close()\n\n line_length = find_line_length(data[0])\n array_data = [] #\n print(\"id = \", line_length)\n for il, line in enumerate(data):\n array_data.append(list(data[il])[:line_length])\n return np.array(array_data)", "title": "" }, { "docid": "78003cbf48e9ec545b091e25e5ee4cc9", "score": "0.6549301", "text": "def get_input_data_as_list(file_name):\n with open(file_name) as input_file:\n #data_list = map(str.strip,input_file.readlines())\n data_list = input_file.readlines()\n return data_list", "title": "" }, { "docid": "6b9ddbc30b931be0ed11dc09260f1f00", "score": "0.6547776", "text": "def load_list(data_path: str, filename: Optional[str] = None) -> List[str]:\n if filename is not None:\n data_path = os.path.join(data_path, filename)\n with open(data_path, 'r', encoding='utf-8', errors='replace') as f:\n items = [line.strip() for line in f.readlines()]\n return items", "title": "" }, { "docid": "3c4d656767300267113d57ab23d72a6c", "score": "0.65473247", "text": "def file_readlines(filename):\n infile = open(filename, 'r')\n inlines = infile.readlines()\n infile.close()\n return(inlines)", "title": "" }, { "docid": "d46431683d2831c2416dc6cdcd605f55", "score": "0.6541277", "text": "def read_raw_data(self, file):\n self.file = file\n lines = []\n with open(file, \"r\", encoding=\"utf8\") as f:\n for line in f:\n msg_dict = json.loads(line)\n lines.append(msg_dict)\n\n print(\"\\tData read!\")\n return lines", "title": "" }, { "docid": "c3c570c641c7137fc24935982fa1b8c2", "score": "0.6532308", "text": "def load_file(self, file):\n for row_num, line in enumerate(file):\n row = self.load_row(line, row_num)\n if row: self.grid.append(row)\n return self.grid", "title": "" }, { "docid": "12adad398866ff5cdfeec96a78adc086", "score": "0.65299064", "text": "def read_file(filename):\n f = open(filename)\n lines = f.read().splitlines()\n return lines", "title": "" }, { "docid": "6d289073c737ecc965670d922fb9fcad", "score": "0.6527899", "text": "def read_lines(path): \n with open(path,'r') as file:\n lines=file.readlines()\n return lines", "title": "" }, { "docid": "1d94b9bb600dd11828f6bb087a837d75", "score": "0.65268147", "text": "def load(self,f):\n if type(f) is str:\n f = open(f,\"r\")\n line = f.readline()\n self.load_line(line)", "title": "" }, { "docid": "373a69fa0d23015ef14d30714d7bf01d", "score": "0.6523287", "text": "def get_lines(path) -> List[str]:\n with (file := open(path, \"r\")): # close file after opening\n return [line for line in file]", "title": "" }, { "docid": "ad651b2c2994a753bd543883370f4f51", "score": "0.65144664", "text": "def read_list(file):\n l = []\n if os.path.exists(file):\n with open(file) as f:\n l.extend(f.read().splitlines())\n else:\n logging.debug('%s not found' % file)\n return l", "title": "" }, { "docid": "2536ef113ef38192f7ca90de58d81388", "score": "0.65108645", "text": "def read_in_load(self, filename, number_of_orders):\n count = 0\n with open(filename, \"r\") as f:\n self.entire_load.append(LinkedList())\n for line in f.readlines()[1:]:\n if(\"##\" in line):\n count += 1\n self.entire_load.append(LinkedList())\n continue\n if(\"base\" in line): self.entire_load[count].insert_node(self.read_base(line))\n if(\"top\" in line): self.entire_load[count].insert_node(self.read_top(line))\n if(\"panel\" in line): self.entire_load[count].insert_node(self.read_panel(line))", "title": "" }, { "docid": "69166b4656869d3ed2c7caa5f6e0e11a", "score": "0.65102345", "text": "def load_data():\r\n try:\r\n fh = open('index.html')\r\n except:\r\n lstOfLines = None\r\n else: # Only gets executed if no exception was raised\r\n lstOfLines = fh.readlines()\r\n fh.close()\r\n return lstOfLines", "title": "" }, { "docid": "a905fcea211ad120b19549c0c79a1836", "score": "0.6500842", "text": "def read_lines_from_file(filename: str):\n if not os.path.isfile(filename):\n return None\n with open(filename, 'r') as file:\n for line in file:\n yield line.strip('\\n').strip(' ')", "title": "" }, { "docid": "01514fe52da25f22b10cddc25e71959d", "score": "0.6498004", "text": "def _load(file_path: Path) -> List:\n def _loader(line):\n return to_array_representation(json.loads(line)[\"labels\"])\n\n return [_loader(_) for _ in open(file_path)]", "title": "" }, { "docid": "28b2896efdc176368e159a948fe909fc", "score": "0.6497245", "text": "def loadFile(arg_file, lineEndings=True):\n result = []\n with open(arg_file, \"r\") as inventory_file:\n result = inventory_file.readlines()\n if(lineEndings is False):\n for i, row in enumerate(result):\n result[i] = row[:-1]\n\n return result", "title": "" }, { "docid": "f804f1dabc8873f01eb7c5e25d6c45e1", "score": "0.649663", "text": "def load_into_memory(self, filename):\n r = re.compile(r'Step\\s([A-Z]).+step\\s([A-Z])')\n with open(filename) as f:\n data = [re.findall(r, line)[0] for line in f.readlines()]\n return data", "title": "" }, { "docid": "a3b02cc232825c0d6da867056ef47573", "score": "0.6491406", "text": "def file_set_to_list(fname):\n with open(fname, encoding='utf-8') as file:\n lines = [line.strip().split('## ') for line in file]\n return lines", "title": "" }, { "docid": "0a0440e79039cdf6966ab18ec57c0f52", "score": "0.649136", "text": "def __read_data(self):\n data_list = []\n file_stream = open(self.data_id, \"r\", encoding=\"utf-8\")\n for line in file_stream:\n data_list.append(line.strip().split(\",\"))\n file_stream.close()\n return data_list", "title": "" }, { "docid": "c4fe6935332b1e635bc8211861e82107", "score": "0.64891964", "text": "def parse_txt_file(self, fpath, line_parse_fn=None):\n with open(os.path.join(self.data_path, fpath), 'r') as f:\n lines = f.readlines()\n data = [line_parse_fn(s) if line_parse_fn is not None else s for s in lines]\n return data", "title": "" }, { "docid": "777be98125a83ca3041549550c0e1076", "score": "0.64776975", "text": "def filelines_to_list(file):\n with open(file, 'rU') as file_handle:\n file_list = [ind.rstrip() for ind in file_handle.readlines()]\n return file_list", "title": "" }, { "docid": "eda383e6acaa0efa8caf7f95cf928c1e", "score": "0.64565986", "text": "def load_file(filepath, delimiter):\n fp = open(filepath, \"r\")\n for line in fp:\n read_data.append(line.split(delimiter))", "title": "" }, { "docid": "d8ae1b336bcd18bf869508ab97918e80", "score": "0.64509374", "text": "def openfile(input_file):\r\n with open(input_file) as f:\r\n my_list = f.read().splitlines()\r\n return my_list", "title": "" }, { "docid": "d8ae1b336bcd18bf869508ab97918e80", "score": "0.64509374", "text": "def openfile(input_file):\r\n with open(input_file) as f:\r\n my_list = f.read().splitlines()\r\n return my_list", "title": "" }, { "docid": "7e7dae50390188dacee6fc1965a57166", "score": "0.6450738", "text": "def read_file_to_list(filename):\n with open(filename, 'r') as inputfile:\n lines_list = inputfile.readlines()\n #lines_list = [line.rstrip('\\n') for line in lines_list] # via list comprehension\n lines_list = list(map(lambda it: it.rstrip(), lines_list)) # via map\n return lines_list", "title": "" }, { "docid": "7e7dae50390188dacee6fc1965a57166", "score": "0.6450738", "text": "def read_file_to_list(filename):\n with open(filename, 'r') as inputfile:\n lines_list = inputfile.readlines()\n #lines_list = [line.rstrip('\\n') for line in lines_list] # via list comprehension\n lines_list = list(map(lambda it: it.rstrip(), lines_list)) # via map\n return lines_list", "title": "" }, { "docid": "5a1b203648aebb4131611ba518f4c5f0", "score": "0.64388543", "text": "def get_lines(filename):\n result = []\n with open(filename) as f:\n for line in f.readlines():\n result.append(line.strip())\n return result", "title": "" }, { "docid": "6e40099e882f3164d1f2b2fadde91dce", "score": "0.6431946", "text": "def ListFromFile(self, filePath):\n try:\n # open file\n listFromFile = []\n f = open(filePath, \"rt\")\n for line in f:\n line = line.strip()\n listFromFile.append(line)\n f.close()\n return listFromFile\n except Exception as e:\n exc_type, exc_obj, exc_tb = sys.exc_info()\n errMsg = '''\n Error msg: %s\n Error line no: %s\n Error type: %s\n ''' % (str(e), exc_tb.tb_lineno, exc_type)\n print(errMsg)\n return []", "title": "" }, { "docid": "91954ef650a64697f5dadc16030642bd", "score": "0.6429139", "text": "def __read_file_content(file: str) -> list:\n with open(file, \"r\") as handle:\n return load_json(handle)", "title": "" }, { "docid": "ae0d42d650b9e344b04150e2d5110710", "score": "0.6424695", "text": "def load(self):\n with open(self.input_data, \"r\") as f:\n reader = csv.reader(f, delimiter=\",\")\n for line in reader:\n yield line", "title": "" }, { "docid": "5d817e1f5cce30a10d3d83561bd875b7", "score": "0.6423895", "text": "def read_file(file_path: str) -> Iterable[str]:\n with open(file_path) as opened_file:\n contents = opened_file.readlines()\n return [line.strip() for line in contents]", "title": "" } ]
3463fa2a7195aa259691ae9fe130ef14
Decorates a function with args so it can be used within an arg_scope.
[ { "docid": "0c53418651a9027814b4fb43bc4eca4c", "score": "0.7400191", "text": "def add_arg_scope(func):\n @functools.wraps(func)\n def func_with_args(*args, **kwargs):\n current_scope = _current_arg_scope()\n current_args = kwargs\n key_func = _key_op(func)\n if key_func in current_scope:\n current_args = current_scope[key_func].copy()\n current_args.update(kwargs)\n return func(*args, **current_args)\n _add_op(func)\n setattr(func_with_args, '_key_op', _key_op(func))\n setattr(func_with_args, '__doc__', func.__doc__)\n return func_with_args", "title": "" } ]
[ { "docid": "2ff98bbb53e97fe0c10e1aae00c485aa", "score": "0.77008224", "text": "def _decorator(func):\n add_arg(func, *args, **kwargs)\n return func", "title": "" }, { "docid": "8ca7fcfaf5b9f97666178435f80a7e76", "score": "0.73563796", "text": "def expanded_args(func):\n\n @mywraps(func, doc_prefix=f\"expanded_args version of {func_name(func)}\")\n def _func(*args):\n return func(args)\n\n return _func", "title": "" }, { "docid": "33df27bfe51a626410fd72a76400938e", "score": "0.73142", "text": "def arg(*args, **kwargs):\n def annotate(func):\n # Get the list of argparse args already added to func (if any).\n argparse_args_list = getattr(func, 'ARGPARSE_ARGS_LIST', [])\n # Since we're only annotating (not wrapping) the function, appending\n # the argument to the list would result in the decorators being applied\n # in reverse order. To prevent that, we simply add to the beginning.\n argparse_args_list.insert(0, (args, kwargs))\n setattr(func, 'ARGPARSE_ARGS_LIST', argparse_args_list)\n return func\n return annotate", "title": "" }, { "docid": "5df84a4ef3067c0b2ac9c348886e741b", "score": "0.7130687", "text": "def arg(*args, **kwargs):\n\n def _decorator(func):\n if not hasattr(func, 'arguments'):\n func.arguments = []\n if (args, kwargs) not in func.arguments:\n func.arguments.insert(0, (args, kwargs))\n\n return func\n\n return _decorator", "title": "" }, { "docid": "79ab495a170620aae8b37bb09fef5e61", "score": "0.6907225", "text": "def arg(*args, **kwargs):\n def _decorator(func):\n \"\"\" Auxiliary function to use decorator for CLI args.\n\n :param func: A function that will be executed.\n :return: A function decorator\n \"\"\"\n add_arg(func, *args, **kwargs)\n return func\n return _decorator", "title": "" }, { "docid": "daad6b4e5483eb85d7bf119338f038f6", "score": "0.6850717", "text": "def apply_guard(*args, **kwargs):\n if os.getenv(environment_name) is not None:\n # Noting application of decorator\n decorator_trace_key = \"DECORATOR_TRACE\"\n if os.getenv(decorator_trace_key) is not None:\n # Building trace for call\n trace = {\n \"function_name\": str(decorator_func),\n \"closures\": [\n str(closure.cell_contents)\n for closure in decorator_func.__closure__\n if closure.cell_contents\n ]\n if decorator_func.__closure__\n else None,\n }\n # Recording trace\n current_trace = json.loads(os.getenv(decorator_trace_key))\n current_trace.append(trace)\n os.environ[decorator_trace_key] = json.dumps(current_trace)\n\n # Use original function\n return original_func(*args, **kwargs)\n # Use decorated function\n return decorated_func(*args, **kwargs)", "title": "" }, { "docid": "09c1f24beea2890df5d266577ae67667", "score": "0.68431383", "text": "def autoinject(*injected_args):\n\n def decorator(func):\n @wraps(func)\n def wrapper(*args, **kwargs):\n add_injectd_args(kwargs, injected_args)\n return func(*args, **kwargs)\n\n return wrapper\n\n return decorator", "title": "" }, { "docid": "9245782c7e36ed0fc7811d13ba86f6ad", "score": "0.6802228", "text": "def to_decorator(wrapped_func):\n @functools.wraps(wrapped_func)\n def arg_wrapper(*outer_args, **outer_kwargs):\n def decorator(func):\n @functools.wraps(func)\n def wrapped(*args, **kwargs):\n return wrapped_func(func,\n args,\n kwargs,\n *outer_args,\n **outer_kwargs)\n return wrapped\n return decorator\n return arg_wrapper", "title": "" }, { "docid": "c89d83a45482fa9047de0e4eb91aa35d", "score": "0.66456354", "text": "def decorate(func):\n for k in kwargs:\n setattr(func, k, kwargs[k])\n return func", "title": "" }, { "docid": "8e84478f632446cff4a425568f49047b", "score": "0.652916", "text": "def __call__(self, f):\n print(\"Inside __call__()\")\n\n def wrapped_f(*args):\n print(\"Inside wrapped_f()\")\n print(\"Decorator arguments:\", self.arg1, self.arg2, self.arg3)\n f(*args)\n print(\"After f(*args)\")\n\n return wrapped_f", "title": "" }, { "docid": "a598f6d9bed117479f2b4aed8b2e19c2", "score": "0.65261", "text": "def wrapper(*args, **kwargs):\n print(f'Here are the args: {args}')\n print(f'Here are the kwargs: {kwargs}')\n return fn(*args, **kwargs)", "title": "" }, { "docid": "fe92e3d7ae0dadf19107845405ad0190", "score": "0.6506447", "text": "def evaluate_arguments(fcn):\r\n # no arguments passed to decorator -> expand all arguments\r\n @wraps(fcn)\r\n def wrap(self, *args, **kwargs):\r\n shell = get_core_plugin().shell\r\n new_args = []\r\n for i, param in enumerate(args):\r\n try:\r\n new_args.append(shell.ev(param))\r\n except Exception as ex:\r\n logging.debug('Unable to evaluate argument %d (%s): (%s) %s' %\r\n (i, param, ex.__class__.__name__, ex))\r\n new_args.append(param)\r\n\r\n return fcn(self, *new_args, **kwargs)\r\n\r\n return wrap", "title": "" }, { "docid": "b2249b057107ed959fbde9511e7152a4", "score": "0.6467903", "text": "def method(*args, expand_args=True):\n if len(args) > 1:\n raise TypeError(\n 'only one positional argument is expected (function to decorate)'\n )\n\n def decorator(function):\n decorator_chain = [(expand_args, utils.decorators.expand_args)]\n for enabled, decorator in decorator_chain:\n if enabled:\n function = decorator(function)\n setattr(function, EXPOSE_MARKER, True)\n return function\n\n if args:\n function, = args\n if not callable(function):\n raise TypeError('decorator should be applied to a callable')\n return decorator(function)\n return decorator", "title": "" }, { "docid": "b328eb3497dde152309fa269d8b9ee7b", "score": "0.6384378", "text": "def arg_scoped_arguments(func):\n assert has_arg_scope(func)\n return _DECORATED_OPS[_key_op(func)]", "title": "" }, { "docid": "b950acd79c29dfa30798dde8a61cb92d", "score": "0.6326641", "text": "def florinate(func):\n @functools.wraps(func)\n def wrapper(*wrapper_args, **wrapper_kwargs):\n \"\"\"Function wrapper that saves args and keyword arguments.\"\"\"\n @functools.wraps(func)\n def delayed(*args, **kwargs):\n \"\"\"Wrapper for deferred function calls with persistent arguments\"\"\"\n kwargs.update(wrapper_kwargs)\n if isinstance(args[0], Sequence) and isinstance(args[0][-1], FlorinContext):\n innerargs = tuple(args[0][:-1]) + wrapper_args\n return func(*innerargs, **kwargs), args[0][-1]\n else:\n innerargs = args + wrapper_args\n return func(*innerargs, **kwargs)\n return delayed\n return wrapper", "title": "" }, { "docid": "08a6136763e0a9ca1d5020a9273d0d38", "score": "0.6322143", "text": "def decorate( func ) :\n namespace = func2namespace( func )\n\n def cacheit( *args, **kwargs ) :\n \"\"\"This function replaces the target function 'func'. Accepts\n 'args' and 'kwargs' arguments when ever the target function is\n called. Caches the value returned by the target function `func`\n under previously computed `key`\"\"\"\n\n # Dirty heuristics,\n cachemgr = h.fromconfig( 'cachemgr' )\n\n cachenm[0]= cachenm[0] or cachemgr.get_cache( namespace, **cachekwargs )\n cache_key = key\n if useargs and args :\n cache_key += ( \" \" + \" \".join([ str(x) for x in args ]) )\n\n def dofun():\n if args and kwargs :\n return func( *args, **kwargs )\n elif args :\n return func( *args )\n elif kwargs :\n return func( **kwargs )\n\n # Do calling and caching.\n return cachenm[0].get( key=sha1(cache_key).hexdigest(),\n createfunc=dofun )\n\n cacheit._namespace = namespace\n cacheit._kwargs = cachekwargs\n cacheit._cache_key = key # Without including positional arguments\n cacheit._useargs = useargs\n\n return cacheit", "title": "" }, { "docid": "329fba378de39800ece085f57ded4540", "score": "0.63096994", "text": "def decorator(self, *args, **kwargs):\n \n def wrapper(*args):\n return adder_(self, *args, **kwargs)\n if args:\n return wrapper(*args)\n return wrapper", "title": "" }, { "docid": "8cb50e05e29d9d89b4c8f9e2671b5f7e", "score": "0.62366194", "text": "def with_args(self, *args: object, **kwargs: object) -> \"MarkDecorator\":\n mark = Mark(self.name, args, kwargs, _ispytest=True)\n return MarkDecorator(self.mark.combined_with(mark), _ispytest=True)", "title": "" }, { "docid": "f92f4c921764b7a5d0277b270f9a6b89", "score": "0.62211", "text": "def add_arg(func, *args, **kwargs):\n\n if not hasattr(func, 'arguments'):\n func.arguments = []\n\n # NOTE(sip): avoid dips that can occur when the module is shared across\n # tests.\n if (args, kwargs) not in func.arguments:\n # Because of the semantics of decorator composition if we just append\n # to the options list positional options will appear to be backwards.\n func.arguments.insert(0, (args, kwargs))", "title": "" }, { "docid": "b4f066047edcdb7da302341e7fb1dc4f", "score": "0.6191151", "text": "def placebo(*args, **kwargs):\n def decorate(function):\n return function\n return decorate", "title": "" }, { "docid": "51db472c22da88c5728eb4140355f551", "score": "0.6178955", "text": "def make_args_iterable(argnames):\n def decorator(f):\n\n @functools.wraps(f)\n def wrapper(*args, **kwargs):\n callargs = inspect.getcallargs(f, *args, **kwargs)\n for name in argnames:\n callargs[name] = make_iterable(callargs[name])\n return f(**callargs)\n\n wrapper.__signature__ = inspect.signature(f)\n return wrapper\n\n return decorator", "title": "" }, { "docid": "1212d9ddce90929b87f1db0558658036", "score": "0.6137547", "text": "def wrapper(func, *args, **kwargs):\n\n def wrapped():\n return func(*args, **kwargs)\n\n return wrapped", "title": "" }, { "docid": "47c12111717b4820b0cc950434e6188f", "score": "0.6128276", "text": "def apply_guard(*args, **kwargs):\n if os.getenv(\"TESTING\") is not None:\n return original_func(*args, **kwargs)\n return decorator_func(*decorator_args)(original_func)(*args, **kwargs)", "title": "" }, { "docid": "f48aecabef4f5018b861c0aafb9c9d3a", "score": "0.6117267", "text": "def apply(self, func, *args, **kwargs):\r\n func = _intercept_function(func)\r\n\r\n @wraps(func)\r\n def f(g):\r\n return func(g, *args, **kwargs)\r\n\r\n return self._python_apply_general(f)", "title": "" }, { "docid": "37d30277136b2375a99563a97523a001", "score": "0.6018616", "text": "def format_arguments(func):\n\n\tfrom functools import wraps\n\t@wraps(func)\n\tdef format_args_and_call(self,*args,**kwargs):\n\t\t# from pudb import set_trace;set_trace()\n\n\t\targs = list(args)\n\t\tif args:\n\t\t\tfirst = args[0]\n\t\t\targs = args[1:]\n\t\t\tformatter = \"\"\n\t\t\tfor arg in args:\n\t\t\t\tformatter += ' ' + str(arg)\n\t\t\tfirst = str(first) + formatter\n\t\telse:\n\t\t\tfirst = \"\"\n\t\treturn func(self,first,**kwargs)\n\treturn format_args_and_call", "title": "" }, { "docid": "27e5e6b68e60b71703036b859cf39343", "score": "0.5995178", "text": "def decorate(func):\n @wraps(func)\n def func_wrapper(*args, **kwargs):\n \"\"\"Perform outer functionality wrapping core functionality\"\"\"\n return '>>> {} <<<'.format(func(*args, **kwargs))\n return func_wrapper", "title": "" }, { "docid": "30ebbd1608b14efda1d973c89d559e94", "score": "0.5992306", "text": "def lru_cache_freezeargs(func):\n\n @functools.wraps(func)\n def wrapped(*args, **kwargs):\n args = tuple([frozendict(arg) if isinstance(arg, dict) else arg for arg in args])\n kwargs = {k: frozendict(v) if isinstance(v, dict) else v for k, v in kwargs.items()}\n args = tuple([tuple(arg) if isinstance(arg, list) else arg for arg in args])\n kwargs = {k: tuple(v) if isinstance(v, list) else v for k, v in kwargs.items()}\n return func(*args, **kwargs)\n\n # copy over the lru_cache extra methods to this wrapper to be able to access them\n # after this decorator has been applied\n wrapped.cache_info = func.cache_info\n wrapped.cache_clear = func.cache_clear\n return wrapped", "title": "" }, { "docid": "2acb1a104ec3ecd179033bebed0d3c86", "score": "0.5978282", "text": "def custom_decorator(func):\n def wrapper(*args, **kwargs):\n \"\"\"\n Custom decorator wrapper.\n \"\"\"\n return func(*args, **kwargs)\n\n return wrapper", "title": "" }, { "docid": "577b940a2f6c3187d926a576c7f006f5", "score": "0.5943994", "text": "def doublewrap(function):\n\n @functools.wraps(function)\n def decorator(*args, **kwargs):\n if len(args) == 1 and len(kwargs) == 0 and callable(args[0]):\n return function(args[0])\n else:\n return lambda wrapee: function(wrapee, *args, **kwargs)\n\n return decorator", "title": "" }, { "docid": "577b940a2f6c3187d926a576c7f006f5", "score": "0.5943994", "text": "def doublewrap(function):\n\n @functools.wraps(function)\n def decorator(*args, **kwargs):\n if len(args) == 1 and len(kwargs) == 0 and callable(args[0]):\n return function(args[0])\n else:\n return lambda wrapee: function(wrapee, *args, **kwargs)\n\n return decorator", "title": "" }, { "docid": "d5d07b41099baf5317d8c726e4ea2a79", "score": "0.5920096", "text": "def replacement(original_func):\n\n def apply_guard(*args, **kwargs):\n \"\"\"Decides whether to use decorator on function call.\"\"\"\n if os.getenv(\"TESTING\") is not None:\n return original_func(*args, **kwargs)\n return decorator_func(*decorator_args)(original_func)(*args, **kwargs)\n\n return apply_guard", "title": "" }, { "docid": "14c1a3e67fb0d3aad954b2a6f95d5e94", "score": "0.59054565", "text": "def decorator2(fn=None, dec_arg=''):\n def fn_wrapper(fn):\n @functools.wraps(fn)\n def wrapper(*args, **kwargs):\n return dec_arg + fn(*args, **kwargs)\n return wrapper\n if fn is not None:\n return fn_wrapper(fn)\n return fn_wrapper", "title": "" }, { "docid": "d2330efcf425aa7d50e83a4ee48e995f", "score": "0.590461", "text": "def modifies_argn(n):\n def modifies_arg(fn, self, *args, **kwargs):\n arg = args[n]\n _force_copies(arg)\n\n retval = fn(self, *args, **kwargs)\n\n arg._version_bump()\n\n return retval\n return decorator(modifies_arg)", "title": "" }, { "docid": "ae73d91acee621b8a9abd5eb75847c38", "score": "0.59008396", "text": "def decorator3(fn=None, dec_arg=''):\n if fn is None:\n return functools.partial(decorator3, dec_arg=dec_arg)\n\n @functools.wraps(fn)\n def wrapper(*args, **kwargs):\n return dec_arg + fn(*args, **kwargs)\n return wrapper", "title": "" }, { "docid": "1a97ac3dfff1905cc278739e95dfc75c", "score": "0.59004277", "text": "def wrapper(*wrapper_args, **wrapper_kwargs):\n @functools.wraps(func)\n def delayed(*args, **kwargs):\n \"\"\"Wrapper for deferred function calls with persistent arguments\"\"\"\n kwargs.update(wrapper_kwargs)\n if isinstance(args[0], Sequence) and isinstance(args[0][-1], FlorinContext):\n innerargs = tuple(args[0][:-1]) + wrapper_args\n return func(*innerargs, **kwargs), args[0][-1]\n else:\n innerargs = args + wrapper_args\n return func(*innerargs, **kwargs)\n return delayed", "title": "" }, { "docid": "db21ca74a29c73341f1e1f381b5887e7", "score": "0.58916664", "text": "def inject_sig(func, wrapper, *args, **kwargs):\n\n sig = inspect.signature(func)\n parms = list(sig.parameters.values())\n parms.append(inspect.Parameter(*args, **kwargs))\n wrapper.__signature__ = sig.replace(parameters=parms)", "title": "" }, { "docid": "0b76bb91f5ace7a500d37dcff61d6d27", "score": "0.5890802", "text": "def kwargify(f):\n @wraps(f)\n def wrapped(**kwargs):\n args = []\n for arg in inspect.getargspec(f).args:\n if arg not in kwargs:\n raise TypeError(\n \"Required parameter {0} not found in the \"\n \"context!\".format(arg)\n )\n args.append(kwargs[arg])\n return f(*args)\n return wrapped", "title": "" }, { "docid": "4b7b07eea01553f25ece22851610d46a", "score": "0.5873625", "text": "def args(\n self, short_name, long_name=None, nargs=1, help=None, action=\"store\", type=None\n ):\n\n def decorator(func):\n if long_name:\n self.map.set(\n name=long_name.strip(\"-\"),\n func=func,\n func_name=func.__name__,\n short_name=short_name,\n long_name=long_name,\n nargs=nargs,\n help=help,\n action=action,\n type=type,\n )\n else:\n self.map.set(\n name=short_name.strip(\"-\"),\n func=func,\n func_name=func.__name__,\n short_name=short_name,\n long_name=long_name,\n nargs=nargs,\n help=help,\n action=action,\n type=type,\n )\n\n @functools.wraps(func)\n def wrapper(*args, **kw):\n return func(*args, **kw)\n\n return wrapper\n\n return decorator", "title": "" }, { "docid": "5c5464f7287f45d8bc9f45b153812f7f", "score": "0.58726376", "text": "def decorator1(fn_or_dec_arg):\n if callable(fn_or_dec_arg):\n # in this case fn_or_dec_arg is original function\n @functools.wraps(fn_or_dec_arg)\n def wrapper(*args, **kwargs):\n return fn_or_dec_arg(*args, **kwargs)\n return wrapper\n else:\n # in this case fn_or_dec_arg in decorator argument\n def fn_wrapper(fn):\n @functools.wraps(fn)\n def wrapper(*args, **kwargs):\n return fn_or_dec_arg + fn(*args, **kwargs)\n return wrapper\n return fn_wrapper", "title": "" }, { "docid": "5161ad4f724b6885008294639901c0d2", "score": "0.58696914", "text": "def decorated_function(*args, **kwargs):\r\n print(\"I'm above foo\")\r\n f()\r\n print(\"I'm below foo\")\r\n print(\"d name\", decorated_function.__name__)", "title": "" }, { "docid": "fddf1045d11f04928c7f15d5fb1fbdf0", "score": "0.5868926", "text": "def wrapped(argparse_namespace=None, **kwargs):\n if not argparse_namespace:\n return func(**kwargs)\n\n reserved_namespace_keywords = ['func']\n new_kwargs = {}\n\n args = argspec.args or []\n for arg_name in args:\n passed_value = getattr(argparse_namespace, arg_name, None)\n if passed_value is not None:\n new_kwargs[arg_name] = passed_value\n\n for namespace_key in vars(argparse_namespace).keys():\n # ignore namespace keywords that have been set not passed in via cli\n if namespace_key in reserved_namespace_keywords:\n continue\n\n # make sure that we haven't passed something we should be processing\n if namespace_key not in args:\n raise ValueError('CLI argument \"{}\" does not match any argument in '\n 'function {}'.format(namespace_key, func.__name__))\n\n return func(**new_kwargs)", "title": "" }, { "docid": "25d2b05d2cea0b93c42080b9ec515b3a", "score": "0.5864549", "text": "def wrapper(*args, **kwargs):\n print('Hello')\n return func(*args, **kwargs)", "title": "" }, { "docid": "25d2b05d2cea0b93c42080b9ec515b3a", "score": "0.5864549", "text": "def wrapper(*args, **kwargs):\n print('Hello')\n return func(*args, **kwargs)", "title": "" }, { "docid": "25d2b05d2cea0b93c42080b9ec515b3a", "score": "0.5864549", "text": "def wrapper(*args, **kwargs):\n print('Hello')\n return func(*args, **kwargs)", "title": "" }, { "docid": "79005ba75e9fddaa91a14f289cc361fb", "score": "0.58532643", "text": "def decorate(self, fn) -> Callable:\n if callable(fn):\n return self._sub_decorate(fn, size_limit=self.size_limit)\n else:\n return self._sub_decorate(fn[0], fn[1])", "title": "" }, { "docid": "56e79b56ef1c6763570d7a481a215c5d", "score": "0.5846338", "text": "def signature_extender(fcn, extra_args):\r\n\r\n def closure(x, grad, *args):\r\n return fcn(x, grad, *extra_args)\r\n\r\n return closure", "title": "" }, { "docid": "da64800cbf92a2ca0db4897382d60048", "score": "0.5844084", "text": "def debug_decorator(func):\r\n def decorated_func(*args, **kwargs):\r\n with debug_context(func.__name__):\r\n return_value = func(*args, **kwargs)\r\n return return_value\r\n return decorated_func", "title": "" }, { "docid": "eb95e9a0c1a27fe6af6987fc7c32a26c", "score": "0.58183587", "text": "def apply(func, args, kwargs=None):\n if kwargs:\n return func(*args, **kwargs)\n else:\n return func(*args)", "title": "" }, { "docid": "bfb84fb79454d0c579615cee001a05cc", "score": "0.5805215", "text": "def func_wrapper(func, apply_nonce, context_key, args, kwargs):\n from importlib import import_module\n import types\n\n main = import_module('__main__')\n prefix = main.distarray.utils.DISTARRAY_BASE_NAME\n main.proxyize.set_state(apply_nonce)\n\n # Modify func to change the namespace it executes in.\n # but builtins don't have __code__, __globals__, etc.\n if not isinstance(func, types.BuiltinFunctionType):\n # get func's building blocks first\n func_code = func.__code__\n func_globals = func.__globals__ # noqa we don't need these.\n func_name = func.__name__\n func_defaults = func.__defaults__\n func_closure = func.__closure__\n\n # build the func's new execution environment\n main.__dict__.update({'context_key': context_key})\n new_func_globals = main.__dict__\n # create the new func\n func = types.FunctionType(func_code, new_func_globals,\n func_name, func_defaults,\n func_closure)\n # convert args\n args = list(args)\n for i, a in enumerate(args):\n if (isinstance(a, str) and a.startswith(prefix)):\n args[i] = main.reduce(getattr, [main] + a.split('.'))\n args = tuple(args)\n\n # convert kwargs\n for k in kwargs.keys():\n val = kwargs[k]\n if (isinstance(val, str) and val.startswith(prefix)):\n kwargs[k] = main.reduce(getattr, [main] + val.split('.'))\n\n return func(*args, **kwargs)", "title": "" }, { "docid": "f642c35425e1ac6140aca164c0b13688", "score": "0.5801676", "text": "def decorator(target):\r\n\r\n def decorate(fn):\r\n if not inspect.isfunction(fn):\r\n raise Exception(\"not a decoratable function\")\r\n spec = inspect_getfullargspec(fn)\r\n names = tuple(spec[0]) + spec[1:3] + (fn.func_name,)\r\n targ_name, fn_name = _unique_symbols(names, 'target', 'fn')\r\n\r\n metadata = dict(target=targ_name, fn=fn_name)\r\n metadata.update(format_argspec_plus(spec, grouped=False))\r\n\r\n code = 'lambda %(args)s: %(target)s(%(fn)s, %(apply_kw)s)' % (\r\n metadata)\r\n decorated = eval(code, {targ_name: target, fn_name: fn})\r\n decorated.func_defaults = getattr(fn, 'im_func', fn).func_defaults\r\n return update_wrapper(decorated, fn)\r\n return update_wrapper(decorate, target)", "title": "" }, { "docid": "09519d0b50ea33dd21a96230a40ce196", "score": "0.5785688", "text": "def adder(attribute, wrapper, name, docstring):\n \n def adder_(self, function, name=None):\n \"\"\"Add the function to the environment with wrappers, etc.\"\"\"\n \n key = name or function.__name__\n value = wrapper and wrapper(function) or function\n getattr(self, attribute)[key] = value\n return function\n \n def decorator(self, *args, **kwargs):\n \"\"\"Boilerplate which allows both normal calling and decoration.\"\"\"\n \n def wrapper(*args):\n return adder_(self, *args, **kwargs)\n if args:\n return wrapper(*args)\n return wrapper\n \n decorator.__name__ = name\n decorator.__doc__ = docstring\n return decorator", "title": "" }, { "docid": "1769e84558e36f489ca9e6d2aaecc98d", "score": "0.5785635", "text": "def _rebind(decorator, func, *args, **kwargs):\n parameters = signature(func).parameters.values()\n decorated_parameters = set(signature(decorator).parameters.keys())\n\n positional_kwargs = dict(\n zip(\n [\n parameter.name\n for parameter in parameters\n if parameter.kind\n in (Parameter.POSITIONAL_ONLY, Parameter.POSITIONAL_OR_KEYWORD)\n and parameter.name not in kwargs\n ],\n args,\n )\n )\n return {k: kwargs.get(k) or positional_kwargs[k] for k in decorated_parameters}", "title": "" }, { "docid": "d2c982e2898ea56d90011570fb35628f", "score": "0.5771336", "text": "def debug_wrapper(f, *args, **kwargs):\n with debug_context():\n f(*args, **kwargs)", "title": "" }, { "docid": "77440533ce27f701f432b948d46bcd4b", "score": "0.57667714", "text": "def decorate(cls, function):\n if cls.decoration_on:\n @wraps(function)\n def new_function(*args, **kwargs):\n print(\"Decoration!\")\n function(*args, **kwargs)\n print(\"End decoration!\")\n return new_function\n else:\n return function", "title": "" }, { "docid": "9757aff0591141d2e5b784702006289d", "score": "0.5762588", "text": "def tracer(func):\n @wraps(func)\n def wrapper(*args, **kwargs):\n params = ', '.join(tuple(f'{a}' for a in args)\n + tuple(f'{k}={v}' for k, v in kwargs.items()))\n LOG.debug(f'{func.__name__}({params})')\n return func(*args, **kwargs)\n return wrapper", "title": "" }, { "docid": "4ec41003a0713f3b556d48b8065cc810", "score": "0.57597244", "text": "def use_args(\n self,\n argmap: ArgMap,\n req: Optional[Request] = None,\n *,\n location: Optional[str] = None,\n unknown: Optional[str] = _UNKNOWN_DEFAULT_PARAM, # pylint: disable=unused-argument\n as_kwargs: bool = False,\n validate: Optional[ValidateArg] = None,\n error_status_code: Optional[int] = None,\n error_headers: Optional[Mapping[str, str]] = None,\n ) -> Callable:\n location = location or self.location\n request_obj = req\n # Optimization: If argmap is passed as a dictionary, we only need\n # to generate a Schema once\n if isinstance(argmap, Mapping):\n argmap = Schema.from_dict(argmap)()\n\n def decorator(func: Callable) -> Callable:\n req_ = request_obj\n\n @functools.wraps(func)\n def wrapper(*args: Any, **kwargs: Any) -> Callable:\n req_obj = req_\n\n if not req_obj:\n req_obj = self.get_request_from_view_args(func, args, kwargs) # pylint: disable=assignment-from-none # noqa: E501\n\n # NOTE: At this point, argmap may be a Schema, or a callable\n parsed_args = self.parse(\n args[0], # This should be the self of the resource object\n argmap,\n req=req_obj,\n location=location,\n validate=validate,\n error_status_code=error_status_code,\n error_headers=error_headers,\n )\n args, kwargs = self._update_args_kwargs( # type: ignore\n args, kwargs, parsed_args, as_kwargs, # type: ignore\n )\n return func(*args, **kwargs)\n\n wrapper.__wrapped__ = func # type: ignore\n return wrapper\n\n return decorator", "title": "" }, { "docid": "182c622b73468752c83076a7edc8fb1b", "score": "0.5758777", "text": "def decorated_function(*args, **kwargs):\n result = func(*args, **kwargs)\n if result is None:\n raise ContextError(msg)\n return result", "title": "" }, { "docid": "b391f31d6de64c1fb6ce09025cbf8900", "score": "0.5751768", "text": "def sheild_args(ctx):\n log_args = encrypt_args(list(ctx.args), ctx.func)\n\n for offset in ctx.service.get('__apis_to_shield', {})\\\n .get(ctx.func.func_name, set()):\n log_args[offset] = PLACE_HOLDER\n\n if not log_args:\n return '()'\n return '({0})'.format(','.join(repr(i) for i in log_args))", "title": "" }, { "docid": "760b831c815030c35f3ed598fa878eeb", "score": "0.57507384", "text": "def check_argument(func):\n\n def wrapper(*args, **kwargs):\n if len(args) == 2 and issubclass(args[0].__class__, BaseDecorator) and hasattr(args[1], '__call__'):\n ## decorate a class function\n args[0].func = args[1]\n return args[0]\n elif len(args) >= 2 and issubclass(args[0].__class__, BaseDecorator) and hasattr(args[1], '__call__'):\n ## decorate a class function\n return func(args[0], args[1], args[2], *args[3:], **kwargs)\n else:\n ## decorate a function\n return func(args[0], None, None, *args[1:], **kwargs)\n # return func(*args, **kwargs)\n\n return wrapper", "title": "" }, { "docid": "7dbc84ac5e993efb3f2e49625bccde52", "score": "0.5734796", "text": "def _allow_keyword_args(f):\r\n def new_fn(*args):\r\n assert args\r\n keyword_args = args[-1]\r\n args = args[:-1]\r\n return f(*args, **keyword_args)\r\n new_fn.func_name = f.func_name\r\n return new_fn", "title": "" }, { "docid": "5fe17eea57b0d1832157d65c65ec22aa", "score": "0.57260406", "text": "def wrapper(function):\n\n @functools.wraps(function)\n def wrapped(*args, **kwargs):\n \"\"\"\n Wrapped function.\n \"\"\"\n\n with context:\n return function(*args, **kwargs)\n\n return wrapped", "title": "" }, { "docid": "62a5ffe2459e3b2887d837cc05cb18e0", "score": "0.57127273", "text": "def decorate_erl_string_args(fn):\n @functools.wraps(fn)\n def wrapper(*args, **kwargs):\n args = [erlport.erlterms.List(arg).to_string().encode('raw_unicode_escape').decode('utf-8') for arg in args]\n return fn(*args, **kwargs)\n return wrapper", "title": "" }, { "docid": "d3bf7db9b0a0a5d3c7abe6951d29be67", "score": "0.5710847", "text": "def adds(arg):\r\n def decorator(fn):\r\n setattr(fn, '_sa_instrument_before', ('fire_append_event', arg))\r\n return fn\r\n return decorator", "title": "" }, { "docid": "190aaf3d784f9946aed58fd8a10762e3", "score": "0.5707951", "text": "def apply(function, args=(), kwds={}):\n return function(*args, **kwds)", "title": "" }, { "docid": "87a883756aa2e9d7b5a78f315ca4afa2", "score": "0.5703971", "text": "def RewriteAddArgs(builder, args):\n return AddArgs(builder, args)", "title": "" }, { "docid": "6db37903cc2ec5e8ff047e0c96618d03", "score": "0.570366", "text": "def _pass_func(*args, **kwargs):\n pass", "title": "" }, { "docid": "58f97ebf16669ea9e368654d6f7c0291", "score": "0.56919676", "text": "def floatArguments(func):\n\n def inner_func(*args, **kwargs):\n args = map(float, args)\n return func(*args, **kwargs)\n\n # Set the docstring of the original function\n inner_func.__name__ = func.__name__\n inner_func.__doc__ = func.__doc__\n\n return inner_func", "title": "" }, { "docid": "9b45c74d826478968142ed9a51f148a5", "score": "0.5685375", "text": "def _arg_replace(self, value_on_error=None, **arg_to_check):\n def _on_decorator_call(func):\n all_args = list(func.__code__.co_varnames)\n\n @wraps(func)\n def _on_call(*args, **kwargs):\n positional_args = all_args[:len(args)]\n\n new_args = list(args)\n for (arg_name, replace) in arg_to_check.items():\n if arg_name in kwargs:\n msg, val = self._validation_function(value_on_error, kwargs[arg_name], replace)\n kwargs[arg_name] = val\n elif arg_name in positional_args:\n msg, val = self._validation_function(value_on_error, args[positional_args.index(arg_name)], replace)\n new_args[positional_args.index(arg_name)] = val\n if msg != '':\n self._error_function(msg, func.__name__, self._logger)\n\n args = tuple(new_args)\n\n return func(*args, **kwargs)\n return _on_call\n return _on_decorator_call", "title": "" }, { "docid": "f67be2488f67e9993bef0624670c3bf1", "score": "0.5682839", "text": "def replaces(arg):\r\n def decorator(fn):\r\n setattr(fn, '_sa_instrument_before', ('fire_append_event', arg))\r\n setattr(fn, '_sa_instrument_after', 'fire_remove_event')\r\n return fn\r\n return decorator", "title": "" }, { "docid": "246c324919b336ef35f3aaddec5c1c62", "score": "0.5679856", "text": "def test_args_kwargs_are_honored(self):\n def patch_fn(original_fn, self, *args, **kwargs):\n return original_fn(self, *args, **kwargs)\n\n utils.patch_method(self.cls, 'sumargs')(patch_fn)\n obj = self.cls()\n self.assertEqual(obj.sumargs(1, 2), 10)\n self.assertEqual(obj.sumargs(1, 1, d=1), 6)\n self.assertEqual(obj.sumargs(1, 1, 1, 1), 4)", "title": "" }, { "docid": "78f5d4ed3e5baf68bc628e71fedf9d8f", "score": "0.56718504", "text": "def wrapper(func,*args):\n def f_hard_char(x):\n \"\"\"\n Argument\n --------\n x\n \"\"\"\n return func(x,*args)\n return f_hard_char", "title": "" }, { "docid": "df5c7e283ec33d0d99bdb1525ddf7ad6", "score": "0.56612426", "text": "def trace(*args, **kwargs):\n\n def decorator(func):\n \"\"\"The decorator itself.\"\"\"\n @wraps(func)\n def wrapper(*args, **kwargs):\n tracestack.on(*handler_args, **handler_kwargs)\n result = func(*args, **kwargs)\n tracestack.off()\n return result\n return wrapper\n\n if len(args) == 1 and callable(args[0]):\n # @tracestack was used as a decorator without arguments.\n # Return the decorated function.\n handler_args = []\n handler_kwargs = {}\n return decorator(args[0])\n else:\n # @tracestack(...) was called with arguments.\n # Return a decorator based on those arguments.\n handler_args = args\n handler_kwargs = kwargs\n return decorator", "title": "" }, { "docid": "6bc72ca0ae413e0f3c619cd1b50195ae", "score": "0.56585443", "text": "def decorating_function(user_function):\n _wrapper = wrapper(user_function)\n CACHED_FUNCTIONS.append(_wrapper)\n return _wrapper", "title": "" }, { "docid": "2ed5a66fca4d7b4f3fb6a2ca97820682", "score": "0.56563985", "text": "def anonymous(self, args, function):\n return lambda: function(*args)", "title": "" }, { "docid": "d1fba427ec4bd8b278a34968abc3f096", "score": "0.56514364", "text": "def MyDecorator2(function):\n # Lack of @wraps() here is intentional\n def new_function(*args, **kwargs):\n print(f\"Calling decorated function {function.__name__}\")\n function(*args, **kwargs)\n print(f\"Exiting decorated function {function.__name__}\")\n return new_function", "title": "" }, { "docid": "e7326f9043d4f0f5aadd2362ecc7e320", "score": "0.5646612", "text": "def decorator_from_middleware_with_args(middleware_class):\r\n return make_middleware_decorator(middleware_class)", "title": "" }, { "docid": "31338e9746a6c4e0da2e75e81f2b7a73", "score": "0.56445557", "text": "def wrapper(*args, **kwargs):\n print \"[DEBUG] {}: enter {}()\".format(datetime.now(), func.__name__)\n return func(*args, **kwargs)", "title": "" }, { "docid": "72842145013a14e378825b367ca9622a", "score": "0.56426156", "text": "def trace(func):\n @functools.wraps(func)\n def inner(*args, **kwargs):\n print(func.__name__, args, kwargs)\n res = func(*args, **kwargs)\n return res\n return inner", "title": "" }, { "docid": "52831942e9a8ebdb2900ee61764ec084", "score": "0.56422114", "text": "def wrap(function, *args, **kwargs):\n func = partial(function, *args, **kwargs)\n func.__name__ = function.__name__\n return func", "title": "" }, { "docid": "3b2b7a56612c4ed5ef0691c760bba68b", "score": "0.5641761", "text": "def func_wrapper(*args, **kwargs):\n return '>>> {} <<<'.format(func(*args, **kwargs))", "title": "" }, { "docid": "73a653310eb6c5237f4146deee2e3502", "score": "0.5641642", "text": "def ignoring_extra_args(fn):\r\n n = number_of_args(fn)\r\n kwa = all_keyword_args(fn)\r\n @wraps(fn)\r\n def wrapper(*args, **kwargs):\r\n return fn(*args[0:n], **keyfilter(lambda k: kwa is None or k in kwa, kwargs))\r\n return wrapper", "title": "" }, { "docid": "17c3f4715f4d2f8e56274ab9e6d5a1bc", "score": "0.5632852", "text": "def call_with_args(func, arg_data):\n args, kwargs = populate_args(func, arg_data)\n return func(*args, **kwargs)", "title": "" }, { "docid": "603f0cf47b8d23ae61106640ca2834d5", "score": "0.56286746", "text": "def decorator_1(fun):\r\n\r\n count = 0\r\n\r\n def wrapper(*args):\r\n nonlocal count\r\n count += 1\r\n start = time.time()\r\n with contextlib.redirect_stdout(io.StringIO()) as f: fun(*args)\r\n duration = time.time() - start\r\n print(f'{fun.__name__}' + f' call {count}' + ' executed in ' + f'{duration}' + ' sec')\r\n return wrapper", "title": "" }, { "docid": "c527b43a34e9dcb85a00cbb9825cccc9", "score": "0.5624835", "text": "def extend_args(self, args):\n self.register(args, \"arg\")\n\n # We need the order to check the types of function call arguments\n self.args_ordered += [symbol.name for symbol in args]", "title": "" }, { "docid": "f08cc02ee3316269f7e575f7e4ba8be7", "score": "0.56149286", "text": "def parametrize(arg_names: Iterable[str], arg_values: Iterable[Iterable[Any]]):\n\n def decorator(func):\n def wrapper(*args, **kwargs):\n for arg_value in arg_values:\n kwargs_extra = {name: value for name, value in zip(arg_names, arg_value)}\n func(*args, **kwargs, **kwargs_extra)\n\n return wrapper\n\n return decorator", "title": "" }, { "docid": "7adca6a948911f8e7dcc913eb3d398d0", "score": "0.5614659", "text": "def wrapper(*args,**kwargs):\n\t\tprint(f\"You are about to call the wrapped function {fn.__name__}\")\n\t\tprint(f\"Here's the documentation: {fn.__doc__}\")\n\t\treturn fn(*args,**kwargs)", "title": "" }, { "docid": "a5fb68fe59b431cdabe1be3eb563684d", "score": "0.5603432", "text": "def cache(self, *args, **kwargs):\r\n cache = [None]\r\n key = \" \".join(str(x) for x in args)\r\n \r\n def decorate(func):\r\n namespace = util.func_namespace(func)\r\n def cached(*args):\r\n if not cache[0]:\r\n cache[0] = self.get_cache(namespace, **kwargs)\r\n cache_key = key + \" \" + \" \".join(str(x) for x in args)\r\n def go():\r\n return func(*args)\r\n return cache[0].get_value(cache_key, createfunc=go)\r\n cached._arg_namespace = namespace\r\n return cached\r\n return decorate", "title": "" }, { "docid": "0093912eeb2dfb551d4edd54ae7e75fb", "score": "0.5596452", "text": "def test_decorate_function_with_args(self):\n\n @self.time_this\n def slow_function(n):\n time.sleep(n)\n return \"foo\"\n\n self.assertEquals(\"foo\", slow_function(2))\n self.assertAlmostEqual(2.0, slow_function.end, delta=1.0)", "title": "" }, { "docid": "30bf95a2d2962a145e86622a9feda061", "score": "0.5583225", "text": "def delayed(*args, **kwargs):\n kwargs.update(wrapper_kwargs)\n if isinstance(args[0], Sequence) and isinstance(args[0][-1], FlorinContext):\n innerargs = tuple(args[0][:-1]) + wrapper_args\n return func(*innerargs, **kwargs), args[0][-1]\n else:\n innerargs = args + wrapper_args\n return func(*innerargs, **kwargs)", "title": "" }, { "docid": "1ca276867b1fba7a75885813505d2f16", "score": "0.5581592", "text": "def optional_args(decorator):\r\n\r\n @wraps(decorator)\r\n def wrapper(*args, **kwargs):\r\n def dec(f):\r\n return decorator(f, *args, **kwargs)\r\n\r\n is_decorating = not kwargs and len(args) == 1 and callable(args[0])\r\n if is_decorating:\r\n f = args[0]\r\n args = []\r\n return dec(f)\r\n else:\r\n return dec\r\n\r\n return wrapper", "title": "" }, { "docid": "4108f51cf626318626fa89973e8118ab", "score": "0.5575432", "text": "def fapply(f, args):\n return f(*args)", "title": "" }, { "docid": "ab8f2b89603776f7b3e0c4418d05d135", "score": "0.55685997", "text": "def wrapper(*args, **kwargs):\n print(func.__doc__)\n return func(*args, **kwargs)", "title": "" }, { "docid": "1a5e74cc5f98dead78d673d5cbf64995", "score": "0.5564676", "text": "def rest(func, *args, **kwargs):\n @wraps(func)\n def bound(first):\n return func(first, *args, **kwargs)\n return bound", "title": "" }, { "docid": "0bae576cb2a9bac2ed6f03c277d846c9", "score": "0.55514747", "text": "def with_fn(decorator):\n def real_decorator(fn=None, *args, **kwargs):\n if fn is None:\n return partial(real_decorator, *args, **kwargs)\n return decorator(fn, *args, **kwargs)\n update_wrapper(real_decorator, decorator)\n return real_decorator", "title": "" }, { "docid": "7d750d97f83b75719c7d464394800d3f", "score": "0.553376", "text": "def show_args(fn):\n @wraps(fn)\n def wrapper(*args, **kwargs):\n \"\"\"Prints the params \"\"\"\n statement_1 = args\n statement_2 = kwargs\n # return statement_1, statement_2\n print('Here are the args: {}'.format(args))\n print(f'Here are the kwargs: {kwargs}')\n wrapper.__name__ = fn.__name__\n wrapper.__doc__ = fn.__doc__\n\n return wrapper", "title": "" }, { "docid": "7c210aa61f1ebc6ea3fd60ad1b6c8aa1", "score": "0.5527744", "text": "def wrap(self, func):\n cache = self.__cache # Avoid dot lookup in hot path\n\n # Skip wrapping for functions with no injection points\n if not self.inspect(func):\n return func\n\n @functools.wraps(func)\n def wrapper(**ka):\n # PERF: Inlined call_inject call.\n # Keep in sync with the implementation above.\n for key, producer in cache[func]:\n if key not in ka:\n ka[key] = producer()\n return func(**ka)\n\n wrapper.__injector__ = self\n return wrapper", "title": "" }, { "docid": "214a93d966c0e6edf685717607e9bec5", "score": "0.5513462", "text": "def wrap(self, decorator, func):\n def wrapped_func(*args, **kwargs):\n \"\"\"\n \"\"\"\n decorator.run_pre_callbacks()\n func(*args, **kwargs)\n decorator.run_post_callbacks()\n return wrapped_func", "title": "" }, { "docid": "f0a7f1794cefe598ac04595cd1352a8e", "score": "0.5498273", "text": "def wrap_decorator(fn, index=None):\n # Determine the name of the first argument, in case it is specified in kwargs instead.\n signature = inspect.signature(fn)\n arg = None\n param = next(iter(signature.parameters.values()), None)\n assert param\n if param.kind not in (inspect.Parameter.VAR_POSITIONAL, inspect.Parameter.VAR_KEYWORD):\n arg = param.name\n assert arg\n\n @functools.wraps(fn)\n def wrapper(*args, **kwargs):\n if (args and callable(args[0])) or (arg in kwargs and callable(kwargs[arg])):\n return fn(*args, **kwargs)\n\n def decorator(_fn):\n return fn(_fn, *args, **kwargs)\n return decorator\n return wrapper", "title": "" }, { "docid": "b43c42418fa654fb0aaf0159b13d79fb", "score": "0.5496057", "text": "def log_this_with_context(self, **kwargs: Any) -> Callable[..., Any]:\n\n def func(f: Callable[..., Any]) -> Callable[..., Any]:\n return self._wrapper(f, **kwargs)\n\n return func", "title": "" }, { "docid": "6f30eace29cfa60c5febd5bd6c356576", "score": "0.5481424", "text": "def function_intercept(intercepted_func, intercepting_func):\n\n def wrap():\n # call the function we are intercepting and get it's result\n real_results = intercepted_func()\n\n # call our own function a\n intercepted_results = intercepting_func(real_results)\n return intercepted_results\n\n return wrap", "title": "" } ]
984de8f76634970f9ee3ff2bd069c0de
returns dates to buy on
[ { "docid": "5d944fced81ddaaed27b318bf33a2d15", "score": "0.0", "text": "def buy_orders(self):\n \traise NotImplementedError", "title": "" } ]
[ { "docid": "3de819151e518daf959dce419ae9ac67", "score": "0.65969247", "text": "def daily():", "title": "" }, { "docid": "962b7089a27f4c95efe8c39661c98eb8", "score": "0.64363086", "text": "def get_buy_day(self):\n # Write your code here\n profit = 0\n buy_day = 0\n day = 0\n\n while day < self.number_of_days and day != (self.number_of_days - 1):\n if self.prices[day] < self.prices[day + 1] and abs(self.prices[day] - self.prices[day + 1]) > profit:\n buy_day = day\n day += 1\n\n return buy_day", "title": "" }, { "docid": "1bd817c4480583650a65068b311e02b3", "score": "0.6181659", "text": "def get_dates():\n # ---\n d1 = date(2015,8,31)\n #d1 = date(2018,8,25)\n d2 = date(2018,8,31)\n dates = [str(d1 + timedelta(days=x)) for x in range((d2-d1).days + 1)]\n\n # ---\n return dates", "title": "" }, { "docid": "57f2e3232823f86a25b6ffd73579438f", "score": "0.5986391", "text": "def get_date_discount(driver_page):\n time_list = driver_page.find_elements_by_css_selector('time')\n time_start = time_list[0].text\n time_end = time_list[1].text\n finish_date = (time_start + '{}' + time_end).format(' - ').split()\n discount_date = get_start_end_date(finish_date)\n\n return discount_date", "title": "" }, { "docid": "b79ca23b52a65bc5ca1f73ee14afcaec", "score": "0.59722996", "text": "def get_days_to_duedate(self):\n days = []\n for i,x in enumerate(self.active):\n days.append(utils.days_until_due(x[4]))\n self.days = days", "title": "" }, { "docid": "97c02c7896e1f8cd83f13cca7c4a31ca", "score": "0.5948199", "text": "def get_historical_prices(symbol, chosen_date):\n import datetime\n from datetime import date\n from datetime import timedelta\n \n #Test\n # >>> chosen_date = '2016-05-10'\n # >>> year = int(chosen_date[:4])\n # >>> month = int(chosen_date[5:7])\n # >>> day = int(chosen_date[8:])\n # >>> end_date = datetime.date(year, month, day)\n # >>> start_date = str(end_date - datetime.timedelta(days=2))\n\n past_n_days = 10 #fixed because we only care about the stock price of the chosen day and the stock prices of the two previous days\n\n year = int(chosen_date[:4])\n month = int(chosen_date[5:7])\n day = int(chosen_date[8:])\n\n end_date = datetime.date(year, month, day)\n \n if end_date > datetime.date.today():\n statement = \"Choose any date before today: \" + str(datetime.date.today())\n d0 = end_date\n d1 = datetime.date.today()\n delta = d0 - d1\n past_n_days += delta.days\n # from datetime import date\n # d0 = date(2008, 8, 18)\n # d1 = date(2008, 9, 26)\n # delta = d0 - d1\n # print delta.days\n if end_date == datetime.date.today():\n past_n_days += 1\n \n\n #assert end_date < datetime.date.today(), \"chosen date must be any previous day from today: %r\" % end_date\n #assert num == 4, \"len of set is not 4: %r\" % num #example\n\n #List of dates:\n date_list = [end_date - datetime.timedelta(days=x) for x in range(0, 3)]\n\n # >>> date_list = [end_date - datetime.timedelta(days=x) for x in range(0, 3)]\n # >>> print date_list\n # [datetime.date(2016, 5, 10), datetime.date(2016, 5, 9), datetime.date(2016, 5, 8)]\n\n #start_date = str(end_date - datetime.timedelta(days=past_n_days)) #doesn't work when we previously put from datetime import datetime \n start_date = str(end_date - timedelta(days=past_n_days)) #code is always functional\n end_date = chosen_date\n\n # #month, day and year\n url = 'http://ichart.yahoo.com/table.csv?s=%s&' % symbol + \\\n 'd=%s&' % str(int(end_date[5:7]) - 1) + \\\n 'e=%s&' % str(int(end_date[8:10])) + \\\n 'f=%s&' % str(int(end_date[0:4])) + \\\n 'g=d&' + \\\n 'a=%s&' % str(int(start_date[5:7]) - 1) + \\\n 'b=%s&' % str(int(start_date[8:10])) + \\\n 'c=%s&' % str(int(start_date[0:4])) + \\\n 'ignore=.csv'\n print \"url\"\n print url\n days = urllib.urlopen(url).readlines()\n data = [day[:-2].split(',') for day in days]\n return data", "title": "" }, { "docid": "81fd0021025722892014956f2b83a321", "score": "0.58773535", "text": "def get_workfree_dates (date):\n all_dates = get_same_month_dates (date)\n workfree_dates = []\n \n for date in all_dates:\n if holiday.is_workfree (date):\n workfree_dates.append (date)\n \n return workfree_dates", "title": "" }, { "docid": "63584f5753a9b17c4cee5f78995ff04f", "score": "0.5817576", "text": "def get_sell_day(self):\n # Write your code here\n profit = 0\n sell_day = 0\n day = 0\n\n while day < self.number_of_days and day != (self.number_of_days - 1):\n if self.prices[day] > self.prices[day + 1] and abs(self.prices[day] - self.prices[day + 1]) > profit:\n sell_day = day\n day += 1\n\n return sell_day", "title": "" }, { "docid": "8105d32b67fc2e098a3b735237ae34bf", "score": "0.58046496", "text": "def create_event_orders(self, events, sell_days, hold_days=5, amount=100):\n df_buy_events = pd.DataFrame(events, columns=[\"symbol\"], index=events.index)\n df_buy_events[\"action\"] =\"Buy\"\n df_buy_events[\"year\"] = events.index.year\n df_buy_events[\"month\"] = events.index.month\n df_buy_events[\"day\"] = events.index.day\n df_buy_events[\"amount\"] = 100\n df_sell_events = pd.DataFrame(events,columns=[\"symbol\"])\n df_sell_events.reset_index()\n df_sell_events.index = sell_days\n #sell_days = df_sell_events.index + timedelta(days=hold_days)\n df_sell_events[\"action\"] =\"Sell\"\n df_sell_events[\"year\"] = df_sell_events.index.year\n df_sell_events[\"month\"] = df_sell_events.index.month\n df_sell_events[\"day\"] = df_sell_events.index.day\n #df_sell_events.index = sell_days\n df_sell_events[\"amount\"] = 100\n df_orders = df_sell_events.append(df_buy_events).sort_index()\n df_orders.index.name = \"date\"\n return df_orders", "title": "" }, { "docid": "e9f4fdde43c68a0450a15aa990e82a9a", "score": "0.58044255", "text": "def get_dates(self, repo_page, repo_name):\r\n dates = repo_page.find_all('relative-time', class_='no-wrap')\r\n return [date(d.attrs['datetime'], repo_name) for d in dates]", "title": "" }, { "docid": "067d7f96f4fce9184f9a5c98079f8852", "score": "0.5781324", "text": "def get_semi_monthly_due_dates(table: element.ResultSet):\r\n\r\n if len(table) != 1:\r\n return []\r\n else:\r\n # Find tag that resembles <strong>\r\n strongs = table[0].find_all(\"strong\")\r\n due_dates = []\r\n for strong in strongs:\r\n strong_contents = strong.contents[0] # Should expect date to be in format: mm/dd/yyyy\r\n try:\r\n datetime_object = datetime.strptime(strong_contents, \"%m/%d/%Y\")\r\n due_dates.append(datetime_object)\r\n except:\r\n # strong tag did not contain a date, so skip\r\n continue\r\n\r\n return due_dates", "title": "" }, { "docid": "590dc13b41ed44afc9b79a4c45da354f", "score": "0.5779855", "text": "def dates(self):\n return self._dates", "title": "" }, { "docid": "056ee4b144da53cd260db72ce1ceb14e", "score": "0.5776556", "text": "def getDayPrice(self):\n ticker = self.ticker\n API = self.API\n dataset = API[\"dataset\"]\n name = dataset[\"name\"]\n # description = dataset[\"description\"]\n # newestAvailableDate = dataset[\"newest_available_date\"]\n # \"\"\"\n # [\"Date\",\"Open\",\"High\",\"Low\",\"Close\",\"Volume\",\"Dividend\",\"Split\",\"Adj_Open\",\"Adj_High\",\"Adj_Low\",\"Adj_Close\",\"Adj_Volume\"]\n # \"\"\"\n data = dataset[\"data\"]\n mostRecent = data[0]\n # date = mostRecent[0]\n open = mostRecent[1]\n # volume = mostRecent[5]\n # dividend = mostRecent[6]\n\n\n # print (\"newest_available_date:\" + newestAvailableDate)\n return open", "title": "" }, { "docid": "b5adfd42c259f1cd9da3e6167c13a86d", "score": "0.57355297", "text": "def generateDates(start_date, end_date):\n\n #List to save all dates in\n dates = []\n\n #Difference in days between start and end date\n delta = end_date - start_date\n\n #Loop over total number of days between start and end date\n for i in range(delta.days):\n \n #Save the date, which is i dates from the start date\n dates.append(start_date + pd.Timedelta(i, unit=\"D\"))\n\n return dates", "title": "" }, { "docid": "6b8f29907e0c13ec1085b0ff0f69068d", "score": "0.5715112", "text": "def dates_crawler(self):\r\n dates = []\r\n for url in self.repos:\r\n repo_page = crawl(master(url))\r\n repo_page = self.invalid_branch(repo_page, url)\r\n\r\n status(url)\r\n dates += self.get_dates(repo_page, url)\r\n button = self.get_button(repo_page)\r\n while scroll_down(button):\r\n sleep(self.slp)\r\n repo_page = crawl(button.attrs[LINK])\r\n dates += self.get_dates(repo_page, url)\r\n button = self.get_button(repo_page)\r\n return dates", "title": "" }, { "docid": "d5639130a5c8bb86233a6898f22803d9", "score": "0.5703914", "text": "def generate_dates() -> List[date]:\n\tstart = date(2019, 12, 31)\n\tend = (datetime.today() - timedelta(1)).date()\n\tdates = [d.date() for d in pd.date_range(start, end)]\n\treturn dates", "title": "" }, { "docid": "5dc16dd9d2160f8603839000149e2f6f", "score": "0.5676596", "text": "def get_dates_since_start(self):\n days = []\n start_data_date = date(2020, 3, 1)\n latest_data_date = date(2020, 5, 4)\n delta = timedelta(days=1)\n while start_data_date <= latest_data_date:\n days.append(start_data_date.strftime(\"%#m/%#d/%y\")) # Add dates to days list\n start_data_date += delta\n self.days = days\n self.start_data_date = date(2020, 3, 1).strftime(\"%#m/%#d/%y\")\n self.latest_data_date = date(2020, 5, 4).strftime(\"%#m/%#d/%y\")\n return days", "title": "" }, { "docid": "5dc16dd9d2160f8603839000149e2f6f", "score": "0.5676596", "text": "def get_dates_since_start(self):\n days = []\n start_data_date = date(2020, 3, 1)\n latest_data_date = date(2020, 5, 4)\n delta = timedelta(days=1)\n while start_data_date <= latest_data_date:\n days.append(start_data_date.strftime(\"%#m/%#d/%y\")) # Add dates to days list\n start_data_date += delta\n self.days = days\n self.start_data_date = date(2020, 3, 1).strftime(\"%#m/%#d/%y\")\n self.latest_data_date = date(2020, 5, 4).strftime(\"%#m/%#d/%y\")\n return days", "title": "" }, { "docid": "ef457189bd89e81bab355b91b456e6fc", "score": "0.56671613", "text": "def daily_sales (date):\n\n global log\n\n full_report = \"==%s SALES REPORT==\\n\" % date.strftime(\"%a, %b %-d %Y\")\n payments = get_payments(date)\n\n try:\n drawers = get_cash_drawer(date)\n except Exception as e:\n drawers = []\n ts = time.time()\n log += \"[%s]: %s\" % (datetime.datetime.fromtimestamp(ts).strftime(\"%Y-%m-%d %H:%M:%S\"), e)\n\n try:\n reportdate = datetime.datetime.strptime(date,\"%Y-%m-%d\")\n except:\n reportdate = date\n\n # @LOGAN? add some comments please.\n sales = sales_totals(payments, drawers, reportdate)\n full_report += report_string(sales)\n\n #####\n ## LAST YEAR\n #####\n\n last_year_report_date = reportdate + dateutil.relativedelta.relativedelta(years=-1, weekday=reportdate.weekday())\n last_year_payments = get_payments(last_year_report_date.strftime(\"%Y-%m-%d\"))\n\n try:\n last_year_drawers = get_cash_drawer(last_year_report_date.strftime(\"%Y-%m-%d\"))\n\n except Exception as e:\n last_year_drawers = []\n ts = time.time()\n log += \"[%s]: %s\"%(datetime.datetime.fromtimestamp(ts).strftime(\"%Y-%m-%d %H:%M:%S\"), e)\n\n full_report += \"\\n\"\n full_report += \"===LAST YEAR===\\n\"\n full_report += '==%s SALES REPORT==\\n'%last_year_report_date.strftime(\"%a, %b %-d %Y\")\n last_year_sales = sales_totals(last_year_payments,last_year_drawers,last_year_report_date)\n full_report += report_string(last_year_sales)\n full_report += \"\\n\"\n\n reportd = reportdate.strftime(\"%Y-%m-%d\")\n fill_db(reportd,sales, \"day\")\n \n full_report += \"===RECENT COMPARISONS===\\n\"\n full_report += \" =San Jac=\\n\"\n full_report += \"%s %-20s %-15s %-15s\\n\"%(\" \",date.strftime(\"%a, %b %-d %Y\"),\"Value\",\"Difference\")\n full_report += \"%s %-25s %-15s %-15s\\n\"%(\" 3 Week Average:\",format_money(sales['sjs_total']),format_money(get_recent_average(reportdate,3)[0]),format_money(sales['sjs_total'] - get_recent_average(reportdate,3)[0]))\n full_report += \"%s %-25s %-15s %-15s\\n\"%(\"3 Month Average:\",format_money(sales['sjs_total']),format_money(get_recent_average(reportdate,12)[0]),format_money(sales['sjs_total'] - get_recent_average(reportdate,12)[0]))\n full_report += \"%s %-25s %-15s %-15s\\n\"%(\" 6 Month Best:\",format_money(sales['sjs_total']),format_money(get_recent_sales_best(reportdate,26)[0]),format_money(sales['sjs_total'] - get_recent_sales_best(reportdate,26)[0]))\n full_report += \"%s %-25s %-15s %-15s\\n\"%(\" 6 Month Worst:\",format_money(sales['sjs_total']),format_money(get_recent_sales_worst(reportdate,26)[0]),format_money(sales['sjs_total'] - get_recent_sales_worst(reportdate,26)[0]))\n full_report += \"%s %-25s %-15s %-15s\\n\"%(\" 12 Month Best:\",format_money(sales['sjs_total']),format_money(get_recent_sales_best(reportdate,52)[0]),format_money(sales['sjs_total'] - get_recent_sales_best(reportdate,52)[0]))\n full_report += \"%s %-25s %-15s %-15s\\n\"%(\" 12 Month Worst:\",format_money(sales['sjs_total']),format_money(get_recent_sales_worst(reportdate,52)[0]),format_money(sales['sjs_total'] - get_recent_sales_worst(reportdate,52)[0]))\n full_report += \"\\n\"\n full_report += \" =Jack's=\\n\"\n full_report += \"%s %-20s %-15s %-15s\\n\"%(\" \",date.strftime(\"%a, %b %-d %Y\"),\"Value\",\"Difference\")\n full_report += \"%s %-25s %-15s %-15s\\n\"%(\" 3 Week Average:\",format_money(sales['jacks_total']),format_money(get_recent_average(reportdate,3)[1]),format_money(sales['jacks_total'] - get_recent_average(reportdate,3)[1]))\n full_report += \"%s %-25s %-15s %-15s\\n\"%(\"3 Month Average:\",format_money(sales['jacks_total']),format_money(get_recent_average(reportdate,12)[1]),format_money(sales['jacks_total'] - get_recent_average(reportdate,12)[1]))\n full_report += \"%s %-25s %-15s %-15s\\n\"%(\" 6 Month Best:\",format_money(sales['jacks_total']),format_money(get_recent_sales_best(reportdate,26)[1]),format_money(sales['jacks_total'] - get_recent_sales_best(reportdate,26)[1]))\n full_report += \"%s %-25s %-15s %-15s\\n\"%(\" 6 Month Worst:\",format_money(sales['jacks_total']),format_money(get_recent_sales_worst(reportdate,26)[1]),format_money(sales['jacks_total'] - get_recent_sales_worst(reportdate,26)[1]))\n full_report += \"%s %-25s %-15s %-15s\\n\"%(\" 12 Month Best:\",format_money(sales['jacks_total']),format_money(get_recent_sales_best(reportdate,52)[1]),format_money(sales['jacks_total'] - get_recent_sales_best(reportdate,52)[1]))\n full_report += \"%s %-25s %-15s %-15s\\n\"%(\" 12 Month Worst:\",format_money(sales['jacks_total']),format_money(get_recent_sales_worst(reportdate,52)[1]),format_money(sales['jacks_total'] - get_recent_sales_worst(reportdate,52)[1]))\n\n return sales, full_report", "title": "" }, { "docid": "88b1d9267f78a6736efeb27228f8f9d7", "score": "0.5648886", "text": "def get_workdays (date):\n return sorted (set (get_same_month_dates (date)) - set (get_workfree_dates (date)))", "title": "" }, { "docid": "f7e04c06d076768952d4f6947877246e", "score": "0.5640341", "text": "def get_all_dates(self):\n dates = []\n\n dict_data = self.stock_dates\n\n for date_ in dict_data.keys():\n dates.append(date_)\n\n return dates", "title": "" }, { "docid": "da41cf50f4db88f95d35c86536f59d39", "score": "0.56070566", "text": "def all_stock_values(start_date = \"01-01-1940\"):\n \n today = date.today()\n new_end_date = today - datetime.timedelta(days=1)\n new_end_date = new_end_date.strftime(\"%d-%m-%Y\")\n\n\n return extract_stock_values(start_date, new_end_date, get_stock_names())", "title": "" }, { "docid": "46881249c278807c89894dde0bb2c137", "score": "0.56048465", "text": "def gen_burndown_data(self):\n # calculate sum of all backlog item estimates, and estimates of finished items on each date\n total_est = 0.0\n item_estimates = {} #{'2012-01-01': 12.5,...} #[['2012-01-01',4.5],...]\n for itm in self.backlog_set.get_query_set():\n itp = itm.date_points() #finished tasks ['2012-01-01', 4.5]\n total_est += itp[1]\n if itp[0] in item_estimates:\n item_estimates[itp[0]] += itp[1]\n else:\n item_estimates[itp[0]] = itp[1]\n # gen list of dates from self.start to self.end or today\n dt = self.start\n xdates = []\n while True:\n xdates.append([datetime.strftime(dt, '%Y-%m-%d')])\n dt += timedelta(days=1)\n if dt > datetime.now() or self.end and dt > self.end:\n break\n # populate xdates with remaining estimates\n remaining = total_est\n for xd in xdates:\n ie = item_estimates.get(xd, 0)", "title": "" }, { "docid": "0e9e00af4110732cc07aa8849d6cd3a6", "score": "0.5593276", "text": "def get_dated_items(self):\n year = self.get_year()\n month = self.get_month()\n day = self.get_day()\n\n date = _date_from_string(\n year,\n self.get_year_format(),\n month,\n self.get_month_format(),\n day,\n self.get_day_format(),\n )\n\n return self._get_dated_items(date)", "title": "" }, { "docid": "80c3795e2edc94b621400436cd2a0e93", "score": "0.5572386", "text": "def _GenerateBackfillDates(self, duration, start_date):\n # First find the nearest Wednesday before the start.\n crawl_date = start_date - timedelta(days=((start_date.weekday() + 5) % 7))\n days = []\n while crawl_date > start_date - duration:\n days.append(crawl_date)\n crawl_date = crawl_date - timedelta(weeks=1)\n return days", "title": "" }, { "docid": "a517a2cf0fd4c059fc2d18ec7da132bb", "score": "0.55685365", "text": "def behind_the_wheel_date(self, data):\n payload = data.copy()\n payload.update({\n \"officeId\": self.id,\n \"numberItems\": \"1\",\n \"requestedTask\": \"DT\",\n \"resetCheckFields\": \"true\",\n })\n return get_dmv_date(payload)", "title": "" }, { "docid": "47dae736cbb104e172758c77bc9a195e", "score": "0.5563007", "text": "def rec_stocks(s,df_close_prices):\n watch = []\n nearly = []\n buy = []\n for stock in s.symbols:\n tag = (stock + 'BUY')\n u_tag = (stock + 'TrigU')\n d_tag = (stock + 'TrigD')\n m_tag = (stock + 'MACD')\n \n #Calculates Reco dict for today\n up = df_close_prices.ix[-1,u_tag]\n down = df_close_prices.ix[-2,d_tag]\n down2 = df_close_prices.ix[-1,d_tag]\n macd = df_close_prices.ix[-1,m_tag]\n macd2 = df_close_prices.ix[-2,m_tag]\n y = up + down \n #Watch are if EMAs are in correct order going down\n if down == 1.0 and macd<4:\n watch.append(stock)\n #Nearly if Watch is true, MACD < 0% and < yesterday\n if down2 == 0 and macd<0 and macd2>macd:\n nearly.append(stock)\n #Buy if down and up triggers are both true\n if y ==11.0:\n buy.append(stock)\n else:\n continue \n print(\"\\nToday's stocks to watch and buy are as follows....\")\n print('Watch = ',watch)\n print('Nearly = ',nearly)\n print('Buy = ',buy)", "title": "" }, { "docid": "0a6f1b1eb8f7039944ee3d15a1d6a19e", "score": "0.55620915", "text": "def getBusinessDaysBetween(date1: java.util.Date, date2: java.util.Date) -> int:\n ...", "title": "" }, { "docid": "c7a3f45e482a0df1e4be0d39bbd03c2f", "score": "0.55614525", "text": "def _get_dated_items(self, date):\n allow_future = self.get_allow_future()\n allow_empty = self.get_allow_empty()\n first_date = self.get_first_date()\n\n if not allow_future and date > timezone_today():\n raise Http404(_(\"Future dates not available\"))\n\n if first_date and date < first_date:\n raise Http404(_(\"Dates this old not available\"))\n\n object_lists = {}\n\n # The order we add things to object_lists is the order in which\n # they'll appear in the page...\n\n for blog, posts in self._get_weblog_posts(date).items():\n object_lists.update({blog: posts})\n\n object_lists.update(self._get_flickr_photos(date))\n\n object_lists.update(self._get_pinboard_bookmarks(date))\n\n object_lists.update(self._get_twitter_tweets(date))\n\n object_lists.update(self._get_twitter_favorites(date))\n\n if not allow_empty:\n if len(object_lists) == 0:\n raise Http404(_(\"Nothing available\"))\n\n # Count the total number of ALL items are in the QuerySets:\n object_count = 0\n for key, object_list in object_lists.items():\n object_count += len(object_list)\n\n return (\n None,\n object_lists,\n {\n \"day\": date,\n \"previous_day\": self.get_previous_day(date),\n \"next_day\": self.get_next_day(date),\n \"object_count\": object_count,\n },\n )", "title": "" }, { "docid": "58f62dc8c7f32aef0016f27d72136294", "score": "0.55299747", "text": "def getAllGameDates():\n start = date(2015, 11, 2)\n end = date(2015, 11, 11)\n\n delta = timedelta(days=1)\n\n date_array = []\n\n while start <= end:\n date_array.append(start.strftime(\"%Y-%m-%d\"))\n start += delta\n\n return date_array", "title": "" }, { "docid": "99c27386e57436bdb7dcadeae2d405ed", "score": "0.5506863", "text": "def get_values_of_stock(stock_names: str, start_date = \"01-01-1940\"):\n \n today = date.today()\n new_end_date = today - datetime.timedelta(days=1)\n new_end_date = new_end_date.strftime(\"%d-%m-%Y\")\n\n\n return extract_stock_values(start_date, new_end_date, [stock_names])", "title": "" }, { "docid": "44d5e6a4336857ce47fdb87b36c65844", "score": "0.5498179", "text": "def dates(date_info, air_visit_data, subTable):\n\n holidays = date_info.loc[date_info['holiday_flg'] == 1]\n holidays = holidays[['calendar_date']]\n holidays.columns = ['ds']\n holidays['holiday'] = 'Holiday'\n\n trainDays = pd.DataFrame(air_visit_data.visit_date.unique(),\n columns=['ds'])\n trainDays = trainDays.sort_values('ds')\n forecastDays = pd.DataFrame(subTable.ds.unique(), columns=['ds'])\n\n return holidays, trainDays, forecastDays", "title": "" }, { "docid": "0721623c641fc04ea677008f0da968d5", "score": "0.549523", "text": "def generate_sales_invoice_daily():\n\n # Dates Query\n dates_query = \"\"\"SELECT DATE(date) AS daily_date FROM `tabReceipts` GROUP BY DATE(date)\"\"\"\n\n # Get all dates from tab receipts\n dates = frappe.db.sql(dates_query, as_dict=1)\n\n # Hey jude\n for date in dates:\n #print 'Creating for {0}'.format(date.daily_date)\n generate_sales_invoice_by_date(date.daily_date)", "title": "" }, { "docid": "2cc137162dc123c47b1a907ef2d23af6", "score": "0.5494386", "text": "def check_in_out_dates(self):\n if self.checkout and self.checkin:\n #print \"=====\",self.checkin<self.date_order\n if self.checkin < self.date_order:\n raise except_orm(_('Warning'), _('Checkin date should be \\\n greater than the current date.'))\n if self.checkout < self.checkin:\n raise except_orm(_('Warning'), _('Checkout date \\\n should be greater than Checkin date.'))", "title": "" }, { "docid": "dec0743e663d0db1eaec750cb67bbfa4", "score": "0.5491623", "text": "def get_working_days():\n\ttoday = date.today()\n\tlast_days = [\"{:%Y-%m-%d}\".format(today - timedelta(days=days_delta)) for days_delta in range(15)]\n\treturn last_days", "title": "" }, { "docid": "87f1ef660943ed00dce550a1337af22d", "score": "0.54909474", "text": "def business_days_between(self, from_dates, to_dates):\n pass", "title": "" }, { "docid": "fa35f40ff989db2081b0fc00a9018dd9", "score": "0.54837537", "text": "def get_buy_list(self, date):\n signaldate = QA_util_get_last_day(date)\n try:\n buy = self.preload.loc[parser.parse(signaldate).date(), :, :]\n buy.index = buy.index.remove_unused_levels()\n return buy.index.levels[1].tolist()\n except:\n return []", "title": "" }, { "docid": "7d557d69aaed7f814327868ca7f5b21f", "score": "0.5450726", "text": "def get_prices(days, my_data, hotel):\n for day in days:\n if day.attrs['data-status'] == 'past-date':\n continue\n data_date = day.attrs[\"data-date\"]\n data_month = str(data_date.split('/')[0])\n data_year = str(data_date.split('/')[2])\n if not data_date:\n continue\n rate = day.find(\"span\", {\"class\": \"dateWrapper__button--rate\"})\n if not rate:\n continue\n try:\n rate = int(rate.text.split(\"$\")[1])\n except:\n rate = 9999\n date = data_date\n my_data.append({\"date\": date, \"price\": rate, \"hotel\": hotel})\n add_price_to_db(date, rate, hotel)\n return my_data", "title": "" }, { "docid": "a43981bcbb2254cb40f196b8c41ddd23", "score": "0.543319", "text": "def backtest_strategy(symbol_price, trade_orders, capital):\n symbol = symbol_price.columns[1]\n df = t.sort(pd.merge(symbol_price, trade_orders, on=[\"date\"], how=\"outer\", indicator=True), \"date\", ascending=True)\n df = t.drop(df, \"_merge\")\n \n df[\"date\"] = pd.to_datetime(df[\"date\"])\n \n df[\"pct_change\"] = df[symbol].pct_change()\n df[\"invested_start_day\"] = 0\n df[\"invested_end_day\"] = 0\n df[\"account_cash_start_day\"] = 0\n df[\"account_cash_end_day\"] = 0 \n df[\"net_worth\"] = 0\n df[\"nb\"] = \"\"\n\n calendar = pd.Series( t.column(df, \"date\") )\n \n for date in calendar:\n \n invested_start_day, invested_end_day, account_cash_start_day, account_cash_end_day, net_worth = 0,0,0,0,0\n \n if t.column(df, \"date\")[0] == date: # first day\n invested_start_day = 0\n account_cash_start_day = capital\n\n else:\n invested_start_day = t.select(t.where(df, \"date\", date, op.lt), \"invested_end_day\").tail(1).values[0][0] + ( t.select(t.where(df, \"date\", date, op.lt), \"invested_end_day\").tail(1).values[0][0] * t.select(t.where(df, \"date\", date, op.eq), \"pct_change\").values[0][0])\n account_cash_start_day = t.select(t.where(df, \"date\", date, op.lt), \"account_cash_end_day\").tail(1).values[0][0]\n \n invested_end_day = invested_start_day + t.select(t.where(df, \"date\", date, op.eq), \"order_size\").values[0][0]\n account_cash_end_day = account_cash_start_day - t.select(t.where(df, \"date\", date, op.eq), \"order_size\").values[0][0]\n net_worth = invested_end_day + account_cash_end_day\n \n # update table\n df.loc[t.where(df, \"date\", date, op.eq).index[0], \"invested_start_day\"] = invested_start_day\n df.loc[t.where(df, \"date\", date, op.eq).index[0], \"invested_end_day\"] = invested_end_day\n df.loc[t.where(df, \"date\", date, op.eq).index[0], \"account_cash_start_day\"] = account_cash_start_day\n df.loc[t.where(df, \"date\", date, op.eq).index[0], \"account_cash_end_day\"] = account_cash_end_day\n df.loc[t.where(df, \"date\", date, op.eq).index[0], \"net_worth\"] = net_worth\n # include warning in case we spend more than we have (borrow)\n # or we sell more than we have (shorting)\n if account_cash_end_day < 0:\n df.loc[t.where(df, \"date\", date, op.eq).index[0], \"nb\"] = \"account_cash_end_day is negative\"\n if invested_end_day < 0:\n df.loc[t.where(df, \"date\", date, op.eq).index[0], \"nb\"] = df.loc[t.where(df, \"date\", date, op.eq).index[0], \"nb\"] + \" invested_end_day is negative\"\n \n \n # lift\n net_worth_start = t.column(df, \"net_worth\")[0]\n net_worth_end = t.column(df, \"net_worth\")[-1]\n lift = (net_worth_end - net_worth_start) / net_worth_start\n\n df[\"flux\"] = - df[\"order_size\"]\n df.loc[df.index[-1], \"flux\"] = t.column(df, \"flux\")[-1] + net_worth_end\n _irr = xirr(list(zip(df.date, df.flux)))\n \n return ({'transactions': df, 'ROI': lift, 'IRR': _irr, 'return': net_worth_end})", "title": "" }, { "docid": "5573bc29411b3dc8c8b1f35ba925deed", "score": "0.5431218", "text": "def _compute_expected_date(self):\n for order in self:\n dates_list = []\n back_order_pickings = order.picking_ids.filtered(lambda r: r.picking_type_id == order.warehouse_id.pick_type_id and r.is_back_order and order.expected_date != r.scheduled_date)\n schedule_date_list = back_order_pickings and back_order_pickings.mapped('scheduled_date') or []\n if schedule_date_list:\n order.expected_date = schedule_date_list[0]\n continue\n for line in order.order_line.filtered(\n lambda x: x.state != 'cancel' and not x._is_delivery() and not x.display_type):\n dt = line._expected_date()\n dates_list.append(dt)\n if dates_list:\n order.expected_date = fields.Datetime.to_string(min(dates_list))\n else:\n order.expected_date = False", "title": "" }, { "docid": "1fb147870542284312c4b28f8f621ea8", "score": "0.54286754", "text": "def get_pre_workfree_dates (date):\n this_month_workdays = set (get_workdays (date))\n pre_work_free_dates = []\n for workdate in this_month_workdays:\n if holiday.is_workfree (workdate + datetime.timedelta (days = 1)):\n pre_work_free_dates.append (workdate)\n \n return sorted (pre_work_free_dates)", "title": "" }, { "docid": "facb772a86f071bbc603a7a7bce9d66a", "score": "0.5426865", "text": "def get_dates(cls, date_from=None, date_to=None):\n\n # Translate string format to the date object.\n if date_from and isinstance(date_from, str):\n date_from = datetime.datetime.strptime(date_from, \"%Y-%m-%d\").date()\n if date_to and isinstance(date_to, str):\n date_to = datetime.datetime.strptime(date_to, \"%Y-%m-%d\").date()\n\n if not date_to:\n date_to = datetime.date.today()\n\n if not date_from:\n date_from = date_to - datetime.timedelta(days=cls.DEFAULT_DATE_DELTA_DAYS)\n\n dates = []\n while date_from <= date_to:\n dates.append(date_from)\n date_from += datetime.timedelta(days=1)\n\n return dates", "title": "" }, { "docid": "cb1d88c21bed319218eee7e56a811997", "score": "0.5415063", "text": "def get_dates(self):\n defaults = [self.date_created_at, self.date_updated_at]\n\n return self.__dates__ + defaults", "title": "" }, { "docid": "f5985f6d2b641461c6091af63deeae33", "score": "0.54135007", "text": "def dates(self):\n # Return\n result = self._df_dates\n return result", "title": "" }, { "docid": "4aafefbcbb353a5058e0db484a9fc5be", "score": "0.54022646", "text": "def bahai_date(major, cycle, year, month, day):\n return [major, cycle, year, month, day]", "title": "" }, { "docid": "94758d47097b2b0cddd7ce560b11bf11", "score": "0.5396541", "text": "def by_date(items, yr, mo=None):\n\tres = []\n\tfor i in items:\n\t\tif fdate(i,yr,mo):\n\t\t\tres.append(i)\n\treturn res", "title": "" }, { "docid": "ff9cf9bf67ef7bf9e96695817c6f78f1", "score": "0.53943497", "text": "def get_trans_datelist(self):\n return list(self.datelist)", "title": "" }, { "docid": "5a836cca5a6b762301a8250c6909fc01", "score": "0.5394073", "text": "def processGetReportCard(start_date, end_date):\n stochasticknapsack.bootstrap()\n\n # check whether end date lesser than start date\n if end_date < start_date:\n return {\n 'status': 500,\n 'error_message': \"End date cannot be lesser than start date\"\n }\n\n reports = []\n today_date = datetime.datetime.now().date()\n\n try:\n for dt in rrule.rrule(rrule.MONTHLY,dtstart=start_date,until=end_date):\n date = dt.date()\n query_start_date = datetime.date(date.year, date.month, 1)\n query_end_date = datetime.date(date.year, date.month, calendar.monthrange(date.year, date.month)[1])\n\n # previous month's dates\n previous_month_start_date = query_start_date - relativedelta.relativedelta(months=1)\n previous_month_end_date = query_end_date - relativedelta.relativedelta(months=1)\n previous_month_end_date = datetime.date(previous_month_end_date.year, previous_month_end_date.month,\n calendar.monthrange(previous_month_end_date.year,\n previous_month_end_date.month)[1])\n\n try:\n report_card = ReportCard.objects.filter(\n date__month=date.month, date__year=date.year).latest('date')\n except ObjectDoesNotExist:\n if date.month < today_date.month:\n try:\n recommendation = Recommendation.objects.get(date__month=date.month, date__year=date.year)\n expected_demand = recommendation.expected_demand\n except:\n expected_demand = 0\n\n target_month = datetime.date(date.year, date.month, 1)\n t_1 = (target_month - datetime.timedelta(days=1)).replace(day=1)\n denominator = Order.objects.filter(\n shipping_date__month=t_1.month, shipping_date__year=t_1.year).count()\n\n if denominator == 0:\n percentageChangeDemand = 0\n else:\n percentageChangeDemand = (float(Order.objects.filter(\n shipping_date__month=date.month, shipping_date__year=date.year).count()) / Order.objects.filter(\n shipping_date__month=t_1.month, shipping_date__year=t_1.year).count() - 1) * 100\n report_card = ReportCard(\n date=datetime.date(date.year, date.month, 1),\n active_customers=getNumberOfActiveCustomersByDate(query_start_date, query_end_date),\n orders=countOrdersByDate(query_start_date, query_end_date),\n churn=getNumberOfChurnCustomersByDate(query_start_date, query_end_date),\n new_signups=getNumberOfNewSignupsByDate(query_start_date, query_end_date),\n actions={\n 'facebook_advertising_cost' : 0.00,\n 'blog_posts' : 0,\n 'email_campaigns' : 0,\n 'adwords_cost' : 0.00,\n 'roadshows': 0,\n 'new_coffees' : 0\n },\n percentage_changes={},\n expected_demand=expected_demand,\n demand_actualising=percentageChangeDemand,\n deviation=0.0\n )\n report_card.save()\n\n report_card.percentage_changes = getPercentageChange(previous_month_start_date, previous_month_end_date, report_card)\n\n report_card.save()\n\n else:\n reports.append(\n {\n 'date': '%s-%s' % (date.month, date.year),\n 'report_card': {\n 'report_id' : 0,\n 'active_customers': 0,\n 'orders': 0,\n 'churn': 0,\n 'new_signups': 0,\n 'percentage_active_customers' : 0,\n 'percentage_orders' : 0,\n 'percentage_churn' : 0,\n 'percentage_new_signups' : 0,\n 'actions' : {},\n 'percentage_changes' : {},\n 'expected_demand' : 0.0,\n 'demand_actualising' : 0.0,\n 'deviation' : 0.0\n }\n }\n )\n continue\n reports.append(\n {\n 'date': '%s-%s' % (date.month, date.year),\n 'report_card' : {\n 'report_id' : report_card.id,\n 'active_customers' : report_card.active_customers,\n 'orders' : report_card.orders,\n 'churn' : report_card.churn,\n 'new_signups' : report_card.new_signups,\n 'percentage_changes' : report_card.percentage_changes,\n 'actions' : report_card.actions,\n 'expected_demand': float(report_card.expected_demand),\n 'demand_actualising': float(report_card.demand_actualising),\n 'deviation': float(report_card.deviation)\n }\n }\n )\n except Exception as e:\n return {\n 'status': 500,\n 'error_message': str(e)\n }\n\n return {\n 'status' : 200,\n 'reports' : reports\n }", "title": "" }, { "docid": "2b7086b0412bdeb3c3152a97eeb3962a", "score": "0.5384157", "text": "def run(self,):\n\n for date in QA_util_get_trade_range(self.start, self.end):\n buylist = self.get_buy_list(date)\n selllist = self.get_sell_list(date)\n self.tradetable[date] = {}\n\n if len(selllist) == 0:\n pass\n\n else:\n data = self.closepanel.loc[parser.parse(date).date(), selllist].map(lambda x: round(x,2)).to_dict()\n cashpre = self.cashpre/len(selllist)\n for code in selllist:\n\n volume = self.tradetable[QA_util_get_last_day(\n date, self.rolling-1)][code]\n if volume < 100:\n pass\n else:\n order = self.account.send_order(\n code[0:6], volume, price=data[code], datetime=date+' 15:00:00', towards=-1)\n self.account.make_deal(order)\n\n if len(buylist) != 0:\n\n d = self.datacenter.selects(\n buylist, date, date).open.map(lambda x: round(x, 2))\n\n d.index = d.index.droplevel(0)\n\n data = d.to_dict()\n cashpre = self.cashpre/len(buylist)\n for code in buylist:\n try:\n volume = int(\n 0.01*cashpre/data[code])*100 if data[code] != 0 else 0\n if volume < 100:\n pass\n else:\n order = self.account.send_order(\n code[0:6], volume, price=data[code], datetime=date+' 09:30:00', towards=1)\n self.account.make_deal(order)\n self.tradetable[date][code] = volume\n except:\n \"\"\"\n 主要是停牌买不入 直接放弃\n \n 此处买入未加入连续一字板的检测 rust 会增加此处的逻辑\n \n \"\"\"\n pass\n else:\n pass\n\n holdinglist = [QA_util_code_change_format(code)\n for code in list(self.account.positions.keys())]\n pricepanel = self.closepanel.loc[parser.parse(date).date(), holdinglist].map(lambda x: round(x,2))\n #pricepanel.index = pricepanel.index.droplevel(0)\n pricepanel =pricepanel.to_dict()\n for code in holdinglist:\n\n self.account.on_price_change(code[0:6], pricepanel[code])\n self.account.settle()", "title": "" }, { "docid": "41a109cb50645c6644cc65f64fefab40", "score": "0.5383977", "text": "def dates(self):\n try:\n return self.data['datetime']\n except:\n return []", "title": "" }, { "docid": "ec973e85fcbbb999facd33d792c524a1", "score": "0.53811836", "text": "def getListDates( date_start, date_end):\n duration_days = (date_end - date_start).days\n list_dates = list([(date_start+timedelta(days=i)) for i in range(duration_days+1)])\n return list_dates", "title": "" }, { "docid": "3f66c08fd3c32936cb4b162efab86d8c", "score": "0.5363678", "text": "def history_commodity(commodity: str, start_date: datetime, stop_date: datetime) -> list:\n businessinsder_values = {v: k for k, v in businessinsder_key.items()}\n commodity_db = businessinsder_values[commodity]\n if commodity not in available_commodities():\n logger.error('commodity= {} not available in list'.format(commodity))\n raise NameError(commodity)\n commodity = available_commodities()[commodity]\n start = str(int(start_date.strftime('%d'))) + '.' + \\\n str(int(start_date.strftime('%m'))) + '.' + \\\n str(int(start_date.strftime('%Y')))\n stop = str(int(stop_date.strftime('%d'))) + '.' + \\\n str(int(stop_date.strftime('%m'))) + '.' + \\\n str(int(stop_date.strftime('%Y')))\n url = 'https://markets.businessinsider.com/commodities/historical-prices/' \\\n + commodity + '/usd/' + start + '_' + stop\n headers = {'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',\n 'Accept-Encoding': 'gzip, deflate',\n 'Accept-Language': 'uk,en-US;q=0.8,en;q=0.5,ru;q=0.3',\n 'Cache-Control': 'no-cache',\n 'Connection': 'keep-alive',\n 'Content-Length': '0',\n 'DNT': '1',\n 'Host': 'markets.businessinsider.com',\n 'Pragma': 'no-cache',\n 'User-Agent': 'Mozilla/5.0 (Windows NT 6.1; Win64; x64; rv:52.0) Gecko/20100101 Firefox/52.0',\n }\n\n responce_post1 = requests.post(url, headers=headers)\n logger.info('respoce_1= {}, from url= {}'.format(responce_post1.status_code, url))\n if responce_post1.status_code != 200:\n logger.error('wrong AUTH responce from businessinsider= {}'.format(responce_post1.status_code))\n logger.error('requested url= {}'.format(url))\n raise ValueError('bad responce')\n\n soup1 = BeautifulSoup(responce_post1.text, 'html.parser')\n # modify headers for data collection\n for key in ['__atts', '__ath', '__atcrv']:\n headers[key] = soup1.find('input', attrs={'name': key})['value']\n if key == '__atcrv':\n headers[key] = str(eval(headers[key]))\n\n########## 2-nd step ###############\n\n url2 = 'http://markets.businessinsider.com/Ajax/CommodityController_HistoricPriceList/' \\\n + commodity + '/USD/' + start + '_' + stop\n params = {'type': 'Brent'}\n\n responce_post2 = requests.post(url2, params=params, headers=headers)\n logger.info('respoce_2= {}, from url2= {}'.format(responce_post2.status_code, url2))\n if responce_post2.status_code != 200:\n logger.error('wrong DATA responce from businessinsider= {}'.format(responce_post2.status_code))\n logger.info('requested url= {}'.format(url2))\n raise ValueError('bad responce')\n\n soup2 = BeautifulSoup(responce_post2.text, 'html.parser')\n logger.debug('soup= {}'.format(soup2))\n logger.debug('soup text= {}'.format(soup2.div.get_text(strip=True)))\n # try another url\n if soup2.div.get_text(strip=True) == 'No data available':\n logger.warning('trying another url')\n url2 = 'http://markets.businessinsider.com/Ajax/CommodityController_HistoricPriceList/' \\\n + commodity + '/USc/' + start + '_' + stop\n responce_post2 = requests.post(url2, params=params, headers=headers)\n logger.info('respoce_2= {}, from url2= {}'.format(responce_post2.status_code, url2))\n if responce_post2.status_code != 200:\n logger.error('wrong DATA responce from businessinsider= {}'.format(responce_post2.status_code))\n logger.info('requested url= {}'.format(url2))\n raise ValueError('bad responce')\n soup2 = BeautifulSoup(responce_post2.text, 'html.parser')\n logger.debug('soup= {}'.format(soup2))\n\n result = []\n\n for row in soup2.find_all('tr', attrs={'class': False}):\n logger.debug('row= {}'.format(row))\n row_list = row.find_all('td')\n logger.debug('row_list= {}'.format(row_list))\n result.append({'time': pytz.utc.localize(datetime.strptime(row_list[0].get_text(strip=True), '%m/%d/%Y')),\n commodity_db: float(row_list[1].get_text(strip=True))})\n logger.debug('result= {}'.format(result))\n return result", "title": "" }, { "docid": "384b7141635cd4d92ddaec6883db5a71", "score": "0.5357981", "text": "def dates_between(start, end):\n\n dates = []\n date = datetime.datetime.strptime(start, \"%Y/%m/%d\")\n date_end = datetime.datetime.strptime(end, \"%Y/%m/%d\")\n while date < date_end:\n dates.append(date.strftime(\"%Y/%m/%d\"))\n date += datetime.timedelta(days=1)\n dates.append(date_end.strftime(\"%Y/%m/%d\"))\n return dates", "title": "" }, { "docid": "ed0408fafb8334b1b765a7ea20121156", "score": "0.5355744", "text": "def get_winners(soup):\n \n winning_bidders = soup.findAll(\"td\",class_='Ta-end')\n winners = []\n win_dates = []\n for winner in winning_bidders:\n winning_bidder = str(winner.find('a').text)\n winners.append(winning_bidder)\n win_date = str(winner.findAll('span',class_=\"Block F-timestamp Nowrap\")[0].text)\n win_dates.append(win_date)\n return winners,win_dates", "title": "" }, { "docid": "d07d67b0bf6e238bd402d03a5363316a", "score": "0.53534734", "text": "def day_gains(self):\n open_price = Data.quote(self.symbol)['open']\n current_price = Data.price(self.symbol)\n return round((current_price * self.shares) - (open_price * self.shares),\n 2)", "title": "" }, { "docid": "242bed9d1f298e15e91816c21b411525", "score": "0.53501123", "text": "def monthly():", "title": "" }, { "docid": "3aaf2a9e5e5db60465ec53cad71eb406", "score": "0.534938", "text": "def get_publishing_date(self, selenium_driver):", "title": "" }, { "docid": "b27be8a3b5f62b7287e007126ae8f3c2", "score": "0.5345881", "text": "def specificdates(country):\n if country == \"United-Kingdom\" or country == \"United-States\":\n print(\"This function is not available for \" + country + \" yet as I haven't configured all the different \"\n \"provinces, check back soon!\")\n else:\n date1 = input(\"Please enter a start date in the form dd/mm/yyyy:\\n\")\n date2 = input(\"Please enter an end date in the form dd/mm/yyyy:\\n\")\n date1 = date1.split(\"/\")\n date2 = date2.split(\"/\")\n\n date1 = datetime.datetime(int(date1[2]), int(date1[1]), int(date1[0]))\n date2 = datetime.datetime(int(date2[2]), int(date2[1]), int(date2[0]))\n\n temp = CountryAllStatus(country, date1, date2)\n r = CountryAllStatus.request(temp)\n\n firstitem = r[0]\n lastitem = r[-1]\n\n print(\"These are the results for \" + country + \" between\", date1.date(), \"and\", date2.date(), \":\\n\")\n print(\"Amount of New Cases:\\t\\t\" + f\"{int(lastitem['Confirmed']) - int(firstitem['Confirmed']):,d}\" +\n \"\\nAmount of New Deaths:\\t\\t\" + f\"{int(lastitem['Deaths']) - int(firstitem['Deaths']):,d}\" +\n \"\\nAmount of New Recoveries:\\t\" + f\"{int(lastitem['Recovered']) - int(firstitem['Recovered']):,d}\" +\n \"\\nAmount of New Active Cases:\\t\" + f\"{int(lastitem['Active']) - int(firstitem['Active']):,d}\" +\n \"\\nTotal amount of cases by\", date1.date(), \":\\t\\t\" + f\"{int(firstitem['Confirmed']):,d}\" +\n \"\\nTotal amount of cases by\", date2.date(), \":\\t\\t\" + f\"{int(lastitem['Confirmed']):,d}\")", "title": "" }, { "docid": "764c436049bfc5f417c8bcdd9c2bf59c", "score": "0.53347194", "text": "def get_historical_prices_plus_one_day(symbol, date):\n#the date goes month(jan=0) day year\n url = 'http://ichart.yahoo.com/table.csv?s=%s&' % symbol + \\\n 'd=%s&' % str(int(date[5:7]) - 1) + \\\n 'e=%s&' % str(int(date[8:10]) + 1) + \\\n 'f=%s&' % str(int(date[0:4])) + \\\n 'g=d&' + \\\n 'a=%s&' % str(int(date[5:7]) - 1) + \\\n 'b=%s&' % str(int(date[8:10]) + 1) + \\\n 'c=%s&' % str(int(date[0:4])) + \\\n 'ignore=.csv'\n days = urllib.urlopen(url).readlines()\n data = [day[:-2].split(',') for day in days]\n return data", "title": "" }, { "docid": "20905ebc926b3a1b70bdd9da48625c0c", "score": "0.53295696", "text": "def get_decays(self):\n return self.decays", "title": "" }, { "docid": "c9bf956f237b612e5c47f646357ccdc9", "score": "0.53212434", "text": "def create_dates_list(start_date, end_date):\n\n delta = end_date - start_date\n dates = []\n for d in range(delta.days + 1):\n dates.append(start_date + timedelta(days = d))\n return dates", "title": "" }, { "docid": "c158eace4f3d50e9531876d84d4001f2", "score": "0.5315381", "text": "def get_monthly_due_dates(table: element.ResultSet):\r\n\r\n if len(table) != 1:\r\n return []\r\n else:\r\n # Find tag that resembles <td style=\"font-weight: bold; color: red;\">\r\n tds = table[0].find_all(\"td\", {\"style\" : \"font-weight: bold; color: #ad0901;\"})\r\n due_dates = []\r\n for td in tds:\r\n td_contents = td.contents[0] # Should expect date to be in format: mm/dd/yyyy\r\n try:\r\n datetime_object = datetime.strptime(td_contents, \"%m/%d/%Y\")\r\n due_dates.append(datetime_object)\r\n except:\r\n # td tag did not contain a date, so skip\r\n continue\r\n\r\n return due_dates", "title": "" }, { "docid": "2d3795dfda7ac8089b2ec866ae2e2f41", "score": "0.53107476", "text": "def weekly():", "title": "" }, { "docid": "47586b6ab5781dc6dda3c32bfc1d9a27", "score": "0.52951574", "text": "def GetDates(self):\n begindate = datetime.datetime(1899, 12, 30)\n ntimes = range(self.nperiods)\n periods = [ntimes[0], ntimes[-1]]\n st_end = []\n for period in periods:\n date_offset = self.ResultsStartPos + period*self.bytesperperiod\n self.fp.seek(date_offset, 0)\n day = struct.unpack('d', self.fp.read(2*self.RECORDSIZE))[0]\n st_end.append(begindate + datetime.timedelta(days=int(day)))\n return st_end", "title": "" }, { "docid": "5dd31faba637ab38d20eb42d1e080d86", "score": "0.5291366", "text": "def days_to_return_book(borrow_date):\n with open('config.json') as file:\n data = json.load(file)\n return borrow_date + timedelta(days=Parser.get_instance().days_to_return)", "title": "" }, { "docid": "36f3d24713e63fe14c44c90cf874df8f", "score": "0.5290857", "text": "def daily_balances(transactions):\n dates = {}\n\n for trans in transactions:\n if trans['Date'] in dates:\n dates[trans['Date']] += float(trans['Amount'])\n else:\n dates[trans['Date']] = float(trans['Amount'])\n\n return dates", "title": "" }, { "docid": "c0ff493d8f2bff77e87aa424b74e40ef", "score": "0.52907073", "text": "def get_purchase_date(self):\r\n return self.__purchase_date", "title": "" }, { "docid": "087a7b2f068d06999f7ef14baf7a0c38", "score": "0.5288863", "text": "def dailysales():\n first = date2obj(total[0].date)\n last = date2obj(total[-1].date)\n daycount = (last - first).days + 1 #total days including the last and first\n increment = datetime.timedelta(days=1) # to count from first to the last day\n days = {}\n for i in range(daycount):\n now = first + increment * i\n nowstr = obj2date(now)\n days[nowstr] = 0\n for item in total: \n days[item.date] = days.get(item.date,0) +1\n return days", "title": "" }, { "docid": "789613e65001be73655853c865bceb6c", "score": "0.5283415", "text": "def wealth_index(stock_name: str, date: str, \n end_date = datetime.datetime.today().strftime(\"%d-%m-%Y\")): \n rets = returns(stock_name)\n # get all returns for specified stock with function returns\n \n date_start = datetime.datetime.strptime(date, \"%d-%m-%Y\")\n # convert \"date\" entered as string to datetime type\n \n end_date = datetime.datetime.strptime(end_date, \"%d-%m-%Y\")\n # convert \"date\" entered as string to datetime type\n \n rets = rets.loc[(rets[\"HGDG_TARIH\"]> date_start) & (rets[\"HGDG_TARIH\"]< end_date)]\n \n rets_columns = rets[\"HGDG_KAPANIS\"]\n dates = rets[\"HGDG_TARIH\"]\n \n wealth_index = 1 * (1 + (rets_columns)).cumprod()\n \n wealth_index = pd.concat([dates, wealth_index], axis = 1)\n \n wealth_index = pd.DataFrame({\n \"Date\": wealth_index[\"HGDG_TARIH\"],\n \"Wealth_Index\": wealth_index[\"HGDG_KAPANIS\"] \n })\n\n return wealth_index", "title": "" }, { "docid": "07fbf35ed97c41ec4aa30bf69724f7dd", "score": "0.5274807", "text": "def _detect_date(self):\n final_date_dict_list = []\n date_dict_list = self._detect_range()\n final_date_dict_list.extend(date_dict_list)\n self._update_processed_text(date_dict_list)\n date_dict_list = self._detect_return_date()\n final_date_dict_list.extend(date_dict_list)\n self._update_processed_text(date_dict_list)\n date_dict_list = self._detect_departure_date()\n final_date_dict_list.extend(date_dict_list)\n self._update_processed_text(date_dict_list)\n date_dict_list = self._detect_any_date()\n final_date_dict_list.extend(date_dict_list)\n self._update_processed_text(date_dict_list)\n\n return final_date_dict_list", "title": "" }, { "docid": "4431b41fc09fdb952bcb497e4c32a1ff", "score": "0.5270926", "text": "def date(date):", "title": "" }, { "docid": "b5b8795204b325210c59d7eda44959c4", "score": "0.52700645", "text": "def day(centuries):\n\td = centuries * 36525", "title": "" }, { "docid": "dc64b2f86eecc5bf6a2bf3281e8bfdb8", "score": "0.52671945", "text": "def get_historical_prices(symbol, date):\n#the date goes month(jan=0) day year\n url = 'http://ichart.yahoo.com/table.csv?s=%s&' % symbol + \\\n 'd=%s&' % str(int(date[5:7]) - 1) + \\\n 'e=%s&' % str(int(date[8:10])) + \\\n 'f=%s&' % str(int(date[0:4])) + \\\n 'g=d&' + \\\n 'a=%s&' % str(int(date[5:7]) - 1) + \\\n 'b=%s&' % str(int(date[8:10])) + \\\n 'c=%s&' % str(int(date[0:4])) + \\\n 'ignore=.csv'\n days = urllib.urlopen(url).readlines()\n data = [day[:-2].split(',') for day in days]\n return data", "title": "" }, { "docid": "77518f7e1ed66dc9746e247456e7cb1f", "score": "0.52614725", "text": "def Get_DateColumn(self):\r\n print(\"Length of stock_dates: %d\" % len(self.stock_dates)) \r\n return self.stock_dates", "title": "" }, { "docid": "08e78b4c1777c147706a3714adbf1cd1", "score": "0.5258313", "text": "def date() -> list:\n return ['date']", "title": "" }, { "docid": "d6c56a2a0fcd5fe8c8f9ea27d6eecf83", "score": "0.52554756", "text": "def _get_dattes(self, product_id=None,date=None):\n mapped_fields = {\n 'life_date': 'life_time',\n 'use_date': 'use_time',\n 'removal_date': 'removal_time',\n 'alert_date': 'alert_time'\n }\n \n str_date = str(date)\n print(str_date)\n date_r = datetime.datetime.strptime(str_date, '%Y-%m-%d %H:%M:%S')\n res = dict.fromkeys(mapped_fields, False)\n product = self.env['product.product'].browse(product_id) or self.product_id\n if product:\n i=0\n for field in mapped_fields:\n duration = getattr(product, mapped_fields[field])\n if duration and field != 'life_date':\n date = date_r + datetime.timedelta(days=duration)\n res[field] = fields.Datetime.to_string(date) \n if field == 'life_date':\n date = date_r + datetime.timedelta(days=0)\n res[field] = fields.Datetime.to_string(date)\n i = i+1\n return res", "title": "" }, { "docid": "86feeca616d2c78b72f5d4f3c1e51f11", "score": "0.52535295", "text": "def _find_dates(self):\n print(\"Checking for available dates...\")\n available_dates = []\n try:\n appointment_date_select = Select(self.driver.find_element_by_xpath('//*[@id=\"date\"]'))\n for option in appointment_date_select.options:\n # skip placeholder option\n if option.text != \"Please select a day\":\n available_dates.append(option.text)\n except Exception as e:\n print(\"Error checking available dates: \", e)\n self.driver.quit()\n return available_dates", "title": "" }, { "docid": "2e5678afc51d1c6a92e13059234ecfd2", "score": "0.5244338", "text": "def populate_database (date):\n\n # @LOGAN? why this default date?\n if not date:\n date = \"2017-01-01\"\n\n try:\n date = datetime.datetime.strptime(date, \"%Y-%m-%d\")\n except:\n pass\n\n while date < datetime.datetime.today():\n daily_sales(date)\n date = date + datetime.timedelta(days=1)", "title": "" }, { "docid": "24b6e872babd0a03ce2e36f9c9f5b040", "score": "0.5242202", "text": "def _filter_offer_dates(self, product=False, begin_date=None, end_date=None):\n\n begin_date = self._parse_to_datetime(begin_date)\n end_date = self._parse_to_datetime(end_date)\n\n begin_str = begin_date.strftime('%Y%m%d')\n end_str = end_date.strftime('%Y%m%d')\n\n # Filter by product if needed\n current = list(self._yield_product(self.current_files, product=product))\n historic = list(self._yield_product(self.historic_files, product=product))\n\n\n\n # Try the current files first\n if self._match_true(begin_str, current) and self._match_true(end_str, current):\n dates = [d.strftime(\"%Y%m%d\") for d in pd.date_range(begin_date, \n end_date)]\n\n search_results = [self._match_iterables(item, current) \n for item in dates]\n\n # See if it partially matches historic, partially matches current\n elif self._match_true(end_str, current) and not self._match_true(begin_str, current):\n \n daily_dates = [d.strftime(\"%Y%m%d\") for d in pd.date_range(begin_date, \n end_date)]\n monthly_dates = [d.strftime(\"%Y%m\") for d in pd.date_range(\n begin_date, \n end_date + timedelta(days=31), \n freq=\"M\", normalize=True)]\n\n daily_results = [self._match_iterables(item, current) \n for item in daily_dates]\n monthly_results = [self._match_iterables(item, historic) for item in\n monthly_dates]\n\n search_results = daily_results + monthly_results\n\n # Scrape the historic files\n elif begin_str not in current and end_str not in current:\n month_dates = [d.strftime(\"%Y%m\") for d in pd.date_range(\n begin_date, \n end_date + timedelta(days=31), \n freq=\"M\", normalize=True)]\n\n search_results = [self._match_iterables(item, historic) for item in\n month_dates]\n\n # Return the search results, save the last search to an internal\n # attribute for safe keeping\n search_results = filter(lambda x: x != None, search_results)\n self.search_results = search_results\n return search_results", "title": "" }, { "docid": "f20fce6e92372ee38c8991a6ea5a704a", "score": "0.52404726", "text": "def days_accessible(user, coupon, week_start_date):\n\n start = max(week_start_date, user['REG_DATE'], coupon['DISPFROM'])\n end = min(week_start_date+datetime.timedelta(days=7), user['WITHDRAW_DATE'], coupon['DISPEND'])\n\n try:\n interval = (end-start).total_seconds()/86400.0\n\n except:\n print('end: ', end)\n print('start: ', start)\n print('end-start:', end-start)\n \n return interval if interval > 0 else 0", "title": "" }, { "docid": "ce017821ddd152d4ac4b227a34e06bff", "score": "0.5239177", "text": "def get_daily_contracts(self):\n query = (SESSION.query(Contract.doc_cloud_id, Vendor.name)\n .filter(Contract.dateadded == TODAY_DATE)\n .filter(Contract.vendorid == Vendor.id)\n .all())\n\n SESSION.close()\n\n return query", "title": "" }, { "docid": "b1aabb6bd46de9095b8953026e786ad9", "score": "0.52351433", "text": "def _all_date(self, *para):\n if para is not None and para != ():\n if para[0] != 0:\n all_date_list = db.session.query(PortfolioEsgScoreModel.date_key). \\\n filter_by(portfolio_id=para[0]). \\\n distinct(). \\\n order_by(PortfolioEsgScoreModel.date_key.desc()). \\\n all()\n else:\n all_date_list = db.session.query(PortfolioEsgScoreModel.date_key) \\\n .filter(PortfolioEsgScoreModel.esg_factor_id == para[1]).\\\n distinct(). \\\n order_by(PortfolioEsgScoreModel.date_key.desc()). \\\n all()\n\n else:\n all_date_list = db.session.query(PortfolioEsgScoreModel.date_key). \\\n distinct(). \\\n order_by(PortfolioEsgScoreModel.date_key.desc()). \\\n all()\n\n return all_date_list", "title": "" }, { "docid": "77834e0a6b5564c17c495dc116d10cf6", "score": "0.5234292", "text": "def getAllDates(self):\n query = \"\"\"SELECT Date FROM AMZN LIMIT 100;\"\"\" \n cnx = mysql.connector.connect(**self._config)\n cursor = cnx.cursor()\n cursor.execute(query)\n\n try:\n result = cursor.fetchall()\n except Exception:\n logger.error('Unable to get all the dates: ', exc_info=True)\n\n return result", "title": "" }, { "docid": "838cdf8570d513920fc8743d78323fa5", "score": "0.5234204", "text": "def make_day_requests(start_date: pendulum.DateTime, end_date: pendulum.DateTime, mailto: str) -> List[DayRequest]:\n\n events = []\n period = pendulum.period(start_date, end_date)\n for day in period.range(\"days\"):\n if day != end_date:\n events.append(DayRequest(day, mailto))\n return events", "title": "" }, { "docid": "6692939e544013e5d0e4ecdcfbf0964f", "score": "0.5226655", "text": "def coupon_days_accessible(coupon, week_start_date):\n\n start = max(week_start_date, coupon['DISPFROM'])\n end = min(week_start_date+datetime.timedelta(days=7), coupon['DISPEND'])\n\n try:\n interval = (end-start).total_seconds()/86400.0\n\n except:\n print('end: ', end)\n print('start: ', start)\n print('end-start:', end-start)\n interval = 0.0\n \n return interval if interval > 0 else 0", "title": "" }, { "docid": "6c16ccebbd260daea315adf7b31961bf", "score": "0.52257305", "text": "def ethiopic_date(year, month, day):\n return [year, month, day]", "title": "" }, { "docid": "60db2763c1dceb8fec9c019d2efa33a1", "score": "0.52246094", "text": "def get_between_dates(self, start_date, end_date):\n\n sql = \"\"\"\n SELECT dt,open,high,low,close,vol FROM `ada-fd`.{table}\n WHERE tick = '{tick}' and dt between '{start_date}'\n and '{end_date}' and vol > 0\n order by dt ASC\n \"\"\".format(table=self.__table,\n tick=self.ticks,\n start_date=start_date,\n end_date=end_date)\n\n ret = self.__get_data(sql)\n return ret", "title": "" }, { "docid": "54a080c909d39f0b88c097b263a8a80f", "score": "0.522131", "text": "def test_thanksgiving():\n # November 2005\n # Su Mo Tu We Th Fr Sa\n # 1 2 3 4 5\n # 6 7 8 9 10 11 12\n # 13 14 15 16 17 18 19\n # 20 21 22 23 24 25 26\n # 27 28 29 30\n\n nyse = xnys_cal\n good_dates = nyse.valid_days('2001-01-01', '2016-12-31')\n\n # If Nov has 4 Thursdays, {0} Thanksgiving is the last Thursday.\n thanksgiving_with_four_weeks = pd.Timestamp(\"2005-11-24\", tz='UTC')\n assert thanksgiving_with_four_weeks not in good_dates\n\n # November 2006\n # Su Mo Tu We Th Fr Sa\n # 1 2 3 4\n # 5 6 7 8 9 10 11\n # 12 13 14 15 16 17 18\n # 19 20 21 22 23 24 25\n # 26 27 28 29 30\n\n # If Nov has 5 Thursdays, {0} Thanksgiving is not the last week.\n thanksgiving_with_five_weeks = pd.Timestamp(\"2006-11-23\", tz='UTC')\n assert thanksgiving_with_five_weeks not in good_dates\n\n # If NYE falls on a weekend, {0} the Tuesday after is the first trading day.\n first_trading_day_after_new_years_sunday = pd.Timestamp(\"2012-01-03\", tz='UTC')\n assert first_trading_day_after_new_years_sunday in good_dates", "title": "" }, { "docid": "94c6ec74eca30f6fabd689a37417aede", "score": "0.5219331", "text": "def getCurrentUpperairDate():", "title": "" }, { "docid": "58e7c8b06439b01a2ec617c52624fc8e", "score": "0.52113044", "text": "def _all_date(self, *para):\n if para is not None and para != ():\n if para[0] != 0:\n all_date_list = db.session.query(PortfolioMetricDateModel.date_key). \\\n filter_by(portfolio_id=para[0]). \\\n distinct(). \\\n order_by(PortfolioMetricDateModel.date_key.desc()). \\\n all()\n else:\n all_date_list = db.session.query(PortfolioMetricDateModel.date_key) \\\n .filter(PortfolioMetricDateModel.portfolio_id == para[1]).\\\n distinct(). \\\n order_by(PortfolioMetricDateModel.date_key.desc()). \\\n all()\n\n else:\n all_date_list = db.session.query(PortfolioMetricDateModel.date_key). \\\n distinct(). \\\n order_by(PortfolioMetricDateModel.date_key.desc()). \\\n all()\n\n return all_date_list", "title": "" }, { "docid": "0f21b9c6d8b0c39a574e2cbb5ddd7d12", "score": "0.5210785", "text": "def buy(self):\n pass", "title": "" }, { "docid": "aedbe734aed0bfe3b87938c4b09e6c32", "score": "0.5202451", "text": "def test_Dates_After_Today_pass(self):\r\n self.assertTrue(test_dates_pass('2015-06-12','2019-05-11','1972-02-10','1975-03-15'))\r\n self.assertTrue(test_dates_pass('2015-04-12','2020-04-16','1999-02-10','2002-07-16'))\r\n self.assertTrue(test_dates_pass('2012-06-12','2018-05-11','1965-02-10','1970-04-15'))", "title": "" }, { "docid": "a302ee1247b8f1925c37b7f1229c7d63", "score": "0.52020496", "text": "def daily( self ):\n return CashLedgerCollection( self.df.groupby( pd.Grouper( key=\"date\", freq='d' ) ) )", "title": "" }, { "docid": "1b1682ac666b2e7a7c78cd19fe8df1b1", "score": "0.5201697", "text": "def list_all_expiration_dates(cls, params: Dict = {}) -> List[str]:\n contracts = cls.list_all(params)\n exp_dates = unique_values_from_key(contracts, \"date_expires\")\n return sorted(exp_dates)", "title": "" }, { "docid": "97f6635e1a02b87001504d23569eb3ae", "score": "0.5185713", "text": "def get_DB_t_days(start, end, conn):\r\n \r\n query = \"\"\"\r\n SELECT TradingDate\r\n FROM JYDB..QT_TradingDayNew\r\n WHERE TradingDate >= '%s' AND TradingDate <= '%s' AND IfTradingDay = 1 AND SecuMarket=83\r\n \"\"\" % (start, end)\r\n\r\n t_days = pd.read_sql(query, conn)\r\n t_days = t_days[\"TradingDate\"].apply(lambda x:dt.datetime.strftime(x,'%Y%m%d'))\r\n return np.array(t_days)", "title": "" }, { "docid": "edb0ab85888223f25de4b68867d1f425", "score": "0.51834667", "text": "def get_winning_days(fund_ts):\n return tsu.get_winning_days(tsu.daily(fund_ts))", "title": "" }, { "docid": "1baf4db5ca51cc41996ac1227af9518f", "score": "0.51804405", "text": "def buy_expenses(self):\n\t\texpenses = 0\n\t\tlast_month_start = Scheduler().cur_tick - Scheduler().get_ticks_of_month()\n\t\tkeys_to_delete = []\n\t\tfor key, values in self.buy_history.iteritems():\n\t\t\tif key < last_month_start:\n\t\t\t\tkeys_to_delete.append(key)\n\t\t\telse:\n\t\t\t\texpenses += values[2]\n\t\t# remove old keys\n\t\tfor key in keys_to_delete:\n\t\t\tdel self.buy_history[key]\n\t\treturn expenses", "title": "" }, { "docid": "91d0f63ed7bfeab87cbcfe6775ddc5f1", "score": "0.51794463", "text": "def get_date_specifics(service, now):\n dates = get_dates(service)\n for date in dates:\n start = decypher_date(date['start'])\n end = decypher_date(date['end'])\n if start <= now:\n if end > now:\n return date['templateID'], int(date['send_id'])\n else:\n continue\n else:\n log.info(\"We haven't started yet!\")\n exit(1)", "title": "" }, { "docid": "56238f6ac35c3cc7a1a23dee07bfa00d", "score": "0.51758796", "text": "def calculate_trade_info(announce_date, ticker_info, market_info, stop_loss_rate=None, holding_days=None,\n sell_date=None):\n\n if sell_date is None and holding_days is None:\n raise Exception('Neither sell_date or holding_days has value')\n\n temp_result = {const.REPORT_RETURN_RATE: np.nan, const.REPORT_SELL_DATE: np.nan,\n const.REPORT_BUY_DATE: np.nan, const.REPORT_MARKET_TYPE: np.nan,\n const.REPORT_MARKET_TICKER: np.nan, const.REPORT_BUY_PRICE: np.nan}\n\n # Get buy day\n trading_days = trading_day_list[trading_day_list > announce_date].tolist()\n if len(trading_days) == 0:\n return pd.Series(temp_result)\n trade_day = trading_days[0]\n\n used_stock_data = load_stock_info(trade_day, ticker_info, market_info)\n if used_stock_data.empty:\n return pd.Series(temp_result)\n\n buy_price = used_stock_data.loc[used_stock_data.first_valid_index(), const.STOCK_ADJPRCWD]\n buy_date = used_stock_data.loc[used_stock_data.first_valid_index(), const.STOCK_DATE]\n\n # this means there are not enough days to finish this operation\n if holding_days is not None:\n if len(trading_days) == 0:\n return pd.Series(temp_result)\n elif len(trading_days) < holding_days:\n sell_date = trading_days[-1]\n else:\n sell_date = trading_days[holding_days - 1]\n\n for date in trading_days[1:]:\n stock_info = load_stock_info(date, ticker_info, market_info)\n if stock_info.empty:\n continue\n\n # print stock_info\n\n current_price = stock_info.loc[stock_info.first_valid_index(), const.STOCK_ADJPRCWD]\n rate = current_price / buy_price - 1\n\n if date >= sell_date or (stop_loss_rate is not None and rate < stop_loss_rate):\n sell_price = current_price\n temp_result[const.REPORT_RETURN_RATE] = sell_price / buy_price - 1\n temp_result[const.REPORT_SELL_DATE] = date\n temp_result[const.REPORT_MARKET_TICKER] = stock_info.loc[stock_info.first_valid_index(),\n const.STOCK_TICKER]\n # temp_result[const.REPORT_MARKET_TYPE] = stock_info.loc[stock_info.first_valid_index(),\n # const.STOCK_MARKET_TYPE]\n temp_result[const.REPORT_BUY_DATE] = buy_date\n temp_result[const.REPORT_BUY_PRICE] = buy_price\n return pd.Series(temp_result)\n\n return pd.Series(temp_result)", "title": "" }, { "docid": "3b306778e9ffaaca696e43e47f21e542", "score": "0.5175284", "text": "def _all_date(self, *para):\n if para is not None and para != ():\n if para[0] != 0:\n all_date_list = db.session.query(PortfolioResultModel.date_key). \\\n filter_by(portfolio_id=para[0]). \\\n distinct(). \\\n order_by(PortfolioResultModel.date_key.desc()). \\\n all()\n else:\n all_date_list = db.session.query(PortfolioResultModel.date_key) \\\n .filter(PortfolioResultModel.portfolio_id == para[1]).\\\n distinct(). \\\n order_by(PortfolioResultModel.date_key.desc()). \\\n all()\n\n else:\n all_date_list = db.session.query(PortfolioResultModel.date_key). \\\n distinct(). \\\n order_by(PortfolioResultModel.date_key.desc()). \\\n all()\n\n return all_date_list", "title": "" }, { "docid": "7136a52e3ef0234c26e69f875fce6f5d", "score": "0.5171434", "text": "def get_historical_prices_plus_one_day(symbol, date):\n url = 'http://ichart.yahoo.com/table.csv?s=%s&' % symbol + \\\n 'd=%s&' % str(int(date[4:6]) - 1) + \\\n 'e=%s&' % str(int(date[6:]) + 1) + \\\n 'f=%s&' % str(int(date[0:4])) + \\\n 'g=d&' + \\\n 'a=%s&' % str(int(date[4:6]) - 1) + \\\n 'b=%s&' % str(int(date[6:]) + 1) + \\\n 'c=%s&' % str(int(date[0:4])) + \\\n 'ignore=.csv'\n\n data=[] #python3 method ,\n try:\n days = urllib.request.urlopen(url).readlines() #urllib.urlopen --> py3k needs .request. in there\n for day in days: #day[0] holds the fields names, day[1+] holds the data values\n dayStr = str(day, encoding='utf8')\n data.append( dayStr[:-2].split(','))\n #print('his',data) #Need to fix this so that we get the close data that we want.\n except urllib.error.HTTPError as err:\n if err.code == 404: #try incrementing date again \n counter+=1\n if (counter > _CounterSentinel) :\n print(\"uh oh\")\n done=True\n data=[[\"error\"]]\n\n return data[1][6] #return the Adj Close value, this takes splits into acct #this is kinda willy nilly since we don't check that we get valid results.", "title": "" } ]
3f1bd3557a40150f6f3867fe96f4f28d
display all the assignments as per subjects
[ { "docid": "18ffe5b655118cc61e341ba7eda2744a", "score": "0.5649913", "text": "def single(sub_id):\n db = con()\n \n assignments = db.execute(\"SELECT * FROM assignments WHERE subject_id =?\",(sub_id,)).fetchall()\n subject = db.execute(\"SELECT * FROM subjects WHERE subject_id =?\",(sub_id,)).fetchone()\n if len(assignments) < 1:\n \n return render_template(\"single.html\", message=\"No subject found\", subject=subject)\n return render_template(\"single.html\", assignments=assignments, subject=subject)", "title": "" } ]
[ { "docid": "a8c318c38ea786352f82bfc466e06831", "score": "0.7014018", "text": "def printSubjects(subjects):\n totalVal, totalWork = 0,0\n if len(subjects) == 0:\n return 'Empty SubjectList'\n res = 'Course\\tValue\\tWork\\n======\\t====\\t=====\\n'\n subNames = subjects.keys()\n subNames.sort()\n for s in subNames:\n val = subjects[s][VALUE]\n work = subjects[s][WORK]\n res = res + s + '\\t' + str(val) + '\\t' + str(work) + '\\n'\n totalVal += val\n totalWork += work\n res = res + '\\nTotal Value:\\t' + str(totalVal) +'\\n'\n res = res + 'Total Work:\\t' + str(totalWork) + '\\n'\n print res", "title": "" }, { "docid": "a8c318c38ea786352f82bfc466e06831", "score": "0.7014018", "text": "def printSubjects(subjects):\n totalVal, totalWork = 0,0\n if len(subjects) == 0:\n return 'Empty SubjectList'\n res = 'Course\\tValue\\tWork\\n======\\t====\\t=====\\n'\n subNames = subjects.keys()\n subNames.sort()\n for s in subNames:\n val = subjects[s][VALUE]\n work = subjects[s][WORK]\n res = res + s + '\\t' + str(val) + '\\t' + str(work) + '\\n'\n totalVal += val\n totalWork += work\n res = res + '\\nTotal Value:\\t' + str(totalVal) +'\\n'\n res = res + 'Total Work:\\t' + str(totalWork) + '\\n'\n print res", "title": "" }, { "docid": "5e7879d229e00096aa74fd821d2530b5", "score": "0.70125884", "text": "def printSubjects(subjects):\n totalVal, totalWork = 0,0\n if len(subjects) == 0:\n return 'Empty SubjectList'\n res = 'Course\\tValue\\tWork\\n======\\t=====\\t====\\n'\n subNames = subjects.keys()\n subNames.sort()\n for s in subNames:\n val = subjects[s][VALUE]\n work = subjects[s][WORK]\n res = res + s + '\\t' + str(val) + '\\t' + str(work) + '\\n'\n totalVal += val\n totalWork += work\n res = res + '\\nTotal Value:\\t' + str(totalVal) +'\\n'\n res = res + 'Total Work:\\t' + str(totalWork) + '\\n'\n print res", "title": "" }, { "docid": "f615bb4a8a23d6032a9c0b229ec625bf", "score": "0.67587334", "text": "def display_subject_details(data):\n for subject_details in data:\n print(\"{} is taught by {:1} and has {:2} students\".format(*subject_details))", "title": "" }, { "docid": "490ba142e9243dcd720c6bc550134699", "score": "0.6673414", "text": "def printSubjects():\r\n mapping=loadSubjects(SUBJECT_FILENAME)\r\n totalVal, totalWork = 0,0\r\n if len(mapping) == 0:\r\n return 'Empty SubjectList'\r\n #\\t means tab\r\n res = 'Course\\tValue\\tWork\\n======\\t====\\t=====\\n'\r\n #dict.keys is a list of keys of a dict\r\n subNames = mapping.keys()\r\n ## values should be sorted with floats but not the first value of a float\r\n ## 10.09 should after 9.2 while if key=float is neglected, 10.09 comes before 9.2\r\n subNames.sort(key=float)\r\n print subNames\r\n for s in subNames:\r\n val = mapping[s][0]\r\n work = mapping[s][1]\r\n res = res + s + '\\t' + str(val) + '\\t' + str(work) + '\\n'\r\n totalVal += int(val)\r\n totalWork += int(work)\r\n res = res + '\\nTotal Value:\\t' + str(totalVal) +'\\n'\r\n res = res + 'Total Work:\\t' + str(totalWork) + '\\n'\r\n print res", "title": "" }, { "docid": "0b957b2afbb67b2b00eae61c6e047a7c", "score": "0.660154", "text": "def renderSubjects(subjects):\n for subject_id in sorted(list(subjects.keys())):\n print(subject_id)\n for predicate in sorted(list(subjects[subject_id].keys())):\n print(\" \", predicate)\n for obj in subjects[subject_id][predicate]:\n print(\" \", obj)", "title": "" }, { "docid": "081994b294f9ab941fb258f7063191ab", "score": "0.63691497", "text": "def currentSubjects(self):", "title": "" }, { "docid": "9073d24fb9e977114a9a3205c20f967e", "score": "0.6206391", "text": "def assignments(ctx, *args, **kwargs):\n callbacks.list_subcommand(\n ctx, display_fields=ASSIGNMENT_FIELDS, grep_name='assignments'\n )", "title": "" }, { "docid": "65ce29248318b95614e7cc348212d841", "score": "0.61780787", "text": "def students_assignments(request):\n\n user_tests = TestUserAssignment.objects.filter(user__username=request.user,\n assignment_completed=False)\n all_tests = Test.objects.all()\n return render(request, 'students/students-assigned-tests.html',\n context={'user_tests': user_tests,\n 'all_tests': all_tests})", "title": "" }, { "docid": "5bd3bfd3abf9083f7f35783160266451", "score": "0.6053379", "text": "def get_all_in_subjects():\r\n q = db.get_col_subjects(\"category\", \"subject\", \"topic\")\r\n return q", "title": "" }, { "docid": "a8c781a1efc6110ca61702449c13870b", "score": "0.6010947", "text": "def __get_aggregated_subjects__(self,workflow_id):\n stmt = \"select subject_id from aggregations where workflow_id = \" + str(workflow_id)\n postgres_cursor = self.postgres_session.cursor()\n postgres_cursor.execute(stmt)\n\n subjects = []\n\n for r in postgres_cursor.fetchall():\n subjects.append(r[0])\n\n return subjects", "title": "" }, { "docid": "1cc014db630fdf3b3b161a8f5010e806", "score": "0.58827966", "text": "def session_assignments(session_id, course_id):\n session = sessions.get_session(session_id)\n course = courses.get_course(course_id)\n\n if session['course_id'] != course['course_num']:\n abort(403)\n\n cur = db.get_db().cursor()\n \n cur.execute(\"\"\"\n SELECT * FROM assignments\n WHERE sessions_id = %s\"\"\",\n (session_id,))\n\n assignments = cur.fetchall()\n\n cur.close()\n return render_template(\"student_views/your_assignments.html\", session=session, assignments=assignments, course=course)", "title": "" }, { "docid": "1e7b5aa4a8794b6ac332e623fc5931d1", "score": "0.58004814", "text": "def get_subjects(self):\n return list(self.subjects.values())", "title": "" }, { "docid": "39edab880ba21191b2b680860e467ee8", "score": "0.57944715", "text": "def get(self, request, *args, **kwargs):\n subject = Subject.objects.all()\n context = {\n \"subject_list\": subject\n }\n\n return render(request, self.template_name, context)", "title": "" }, { "docid": "5c358dcfa6e6fb9d12f094aef9203acc", "score": "0.5763922", "text": "def subjects(self, subjects):\n\n self._subjects = subjects", "title": "" }, { "docid": "4e2afb4d097b348b9f6d6c351539ed45", "score": "0.5741078", "text": "def main():\n data = get_data()\n display_subject_details(data)", "title": "" }, { "docid": "c89749472fe6ab2319d4ed6f3255fc33", "score": "0.573881", "text": "def query_associated_subjects(self) -> models.QuerySet:\n Subject = get_subject_model()\n return Subject.objects.filter(\n mri_session_set__scan__study_groups__study=self.id\n ).distinct()", "title": "" }, { "docid": "ac9d00ea3731651996d310b1b2943595", "score": "0.57098675", "text": "def get_category_subjects():\r\n q = db.get_col_subjects(\"category\", \"subject\")\r\n return q", "title": "" }, { "docid": "6a1881011b8941d81efbf747a123453a", "score": "0.5707057", "text": "def instructorsummary1(self):\r\n\r\n for course,studentnumber in self.coursestudents.items():\r\n return [self.instructorcwid,self.instructorname,self.instructordept, course, studentnumber]", "title": "" }, { "docid": "357e43eaf291abe339103bdd5a0f8bd8", "score": "0.5626068", "text": "def get_subjects(self, options=None):\n\n secure = self.options[\"force_ssl\"]\n try:\n secure = options[\"force_ssl\"]\n except:\n pass\n\n projectsToGet = [project for project in self.selectedProjectIds if not project in self.fetchedProjectIds]\n\n requests = (grequests.get(self.server + \"/data/projects/\" + project + \"/subjects\",\n headers={\"Cookie\": \"JSESSIONID=\" + self.JSESSIONID},\n params={\n \"format\": \"json\",\n \"columns\": \"\"\"age,birth_weight,dob,education,educationDesc,\n ethnicity,gender,gestational_age,group,handedness,\n height,insert_date,insert_user,last_modified,pi_firstname,\n pi_lastname,post_menstrual_age,race,ses,src,weight,yob\"\"\"\n },\n verify=secure) for project in projectsToGet)\n\n results = grequests.map(requests)\n\n # if self.data == None:\n # self.data = SubjectData({\"id_field\": \"ID\"})\n\n for count, result in enumerate(results):\n if result.status_code == 200:\n try:\n subjects = result.json()[\"ResultSet\"][\"Result\"]\n except:\n pass\n else:\n self.fetchedProjectIds.append(projectsToGet[count])\n for i in range(len(subjects)):\n subjects[i][\"project\"] = projectsToGet[count]\n subjects[i][\"experiments\"] = {}\n self.add_subject(subjects[i])\n\n self.add_group({}, \"All Subjects\")\n\n return self", "title": "" }, { "docid": "11397855adecc841c36142ed15c09ce3", "score": "0.5619291", "text": "def subject_views(request):\n try:\n session = request.session.get(\"user_id\")\n if session:\n user_info_obj = UserInfo.objects.get(id=session)\n user_operation_obj = UserOperation.objects.filter(fk_user_role_id=user_info_obj.fk_user_role.id)\n subject_obj = Subject.objects.all()\n course_obj = Course.objects.all()\n semester_obj = Semester.objects.all()\n return render(request, \"master_subject.html\",\n {\"user_operation_obj\": user_operation_obj, \"user_info_obj\": user_info_obj,\n \"subject_obj\": subject_obj, \"course_obj\": course_obj, \"semester_obj\": semester_obj})\n else:\n return redirect(\"/\")\n except Exception:\n error_save(str(traceback.format_exc()))\n return redirect(\"error_handler_500\")", "title": "" }, { "docid": "e44d58f1abed0a079e2089a10f2956f0", "score": "0.5581853", "text": "def show(self):\n centers = self.centers()\n subj = self.get_feature('subjects')\n prevalence = self.roi_prevalence()\n print \"index\", \"prevalence\", \"mean_position\", \"individuals\"\n for i in range(self.k):\n print i, prevalence[i], centers[i], np.unique(subj[i])", "title": "" }, { "docid": "00ec49cda0eb2c2117b721a83f222ffe", "score": "0.55746514", "text": "def list_assignments(p):\n students_assignments = Assignment.get_assignments_by_student(p.student_id)\n students_assignments_dump = AssignmentSchema().dump(students_assignments, many=True)\n return APIResponse.respond(data=students_assignments_dump)", "title": "" }, { "docid": "23d8accbe310597808d649321bc1e341", "score": "0.55666864", "text": "def get_assignments(course_id, header, host_site):\n \n path = '/api/v1/courses/%s/assignments' % (course_id)\n url = '%s%s%s' % (PROTOCOL, host_site, path)\n params = {'include[]':'submission',\n 'per_page': PAGE_PAGINATION_LIMIT\n }\n \n assignments = requests.get(url, headers=header, params=params).json() \n return sorted(assignments, key = lambda assignment: assignment['due_at'])", "title": "" }, { "docid": "2068335873f23712018d564d8ac8f641", "score": "0.5558842", "text": "def subjects(concept_graph, concept, type=None):\n assert concept_graph.subjects('Sarah') == {'Peter'}\n assert concept_graph.subjects('Mary', 'likes') == {'John'}\n assert concept_graph.subjects('Mary', 'dislikes') == {'Peter'}", "title": "" }, { "docid": "a0dfe96a2995b7cd021c51a11522e019", "score": "0.55412096", "text": "def instructor_table(self):\n print()\n print(\"Instructor Summary:\")\n # decided to display 'Dept' as well, though not required by assignment\n header = [\"CWID\", \"Name\", \"Dept\", \"Course\", \"Students\"]\n x = PT(header)\n for item in self.instructors:\n class_list = list(item.courses.keys())\n for subject in class_list:\n x.add_row([str(item.CWID), str(item.name), str(item.dept), subject, item.courses[subject]])\n print(x)\n return None", "title": "" }, { "docid": "50f937f63f0c7150c92ea1d3da644039", "score": "0.5535205", "text": "def get_subject(self):\n for subject in self.subjects.keys():\n if self.subjects[subject].assigned:\n continue\n else:\n self.subjects[subject].assigned = True\n return self.subjects[subject]", "title": "" }, { "docid": "40ef04f71cfc8ef9bf923cc6e4623e93", "score": "0.553517", "text": "def get_data(self, subjects):\n data = []\n for subject in subjects:\n data.append(self._get_single_subject_data(subject))\n return data", "title": "" }, { "docid": "10273bb4694aa0b7ba8b46b62f690452", "score": "0.5534084", "text": "def get_subjects(subject_nodes):\n arXiv_subjects = []\n INSPIRE_subjects = []\n\n for subject in subject_nodes:\n category = get_subfield(subject, \"2\")\n subject = get_subfield(subject, \"a\")\n\n if category == \"arXiv\":\n arXiv_subjects.append(subject)\n elif category == \"INSPIRE\":\n INSPIRE_subjects.append(subject)\n\n arXiv_subjects = (arXiv_subjects + [np.nan] * SUBJECT_NCOLS)[:SUBJECT_NCOLS]\n INSPIRE_subjects = (INSPIRE_subjects + [np.nan] * SUBJECT_NCOLS)[:SUBJECT_NCOLS]\n\n subjects_df = pd.DataFrame(data=[arXiv_subjects + INSPIRE_subjects], columns=ARXIV_INSPIRE_SUBJECT_COLS)\n\n return subjects_df", "title": "" }, { "docid": "64e17ea938cadb661919b810f09cb6ab", "score": "0.55337095", "text": "def read_subjects(contents):\n course_list = []\n for subj in contents.get(\"selectedSubjects\", []):\n if SUBJECT_ID_KEY in subj:\n course_list.append(subj[SUBJECT_ID_KEY])\n elif SUBJECT_ID_ALT_KEY in subj:\n course_list.append(subj[SUBJECT_ID_ALT_KEY])\n return course_list", "title": "" }, { "docid": "a732e353e7ce21fc46bcf789bea7635b", "score": "0.5526596", "text": "def show_subjects(dir_path):\r\n\tdata_dir = os.listdir(dir_path)\r\n\treturn data_dir", "title": "" }, { "docid": "b210428a8f7c047c5a2c0ac6e992331d", "score": "0.550513", "text": "def compare_subjects_within_student(subj1_all_students,\n subj2_all_students):\n for student, grades in subj1_all_students.items():\n if student in subj2_all_students.keys() and student != ('subject'):\n if max(subj1_all_students[student]) > max(subj2_all_students[student]):\n print (student, subj1_all_students['subject'])\n elif max(subj1_all_students[student]) < max(subj2_all_students[student]):\n print (student, subj2_all_students['subject'])\n elif max(subj1_all_students[student]) == max(subj2_all_students[student]):\n print (student, subj2_all_students['subject'], 'and', subj1_all_students['subject'] )", "title": "" }, { "docid": "f704b732e2f007d47d5c103f541d1efd", "score": "0.5490344", "text": "def get_assignments(self, course_id):\n return self.api.get('/courses/%s/assignments' % course_id)", "title": "" }, { "docid": "caf8b2afed8295174706136cafcda821", "score": "0.546389", "text": "def list(name_or_id=\"\", filter=None, human: bool = False):\n db_conf = configuration.get_configuration(key='management')\n manager_api_key = connect_from_env()\n _subjects_req = requests.get(\"{}/subjects\".format(db_conf.get(\"url\")),\n headers={\"x-api-key\": manager_api_key}\n )\n if _subjects_req.status_code == 200:\n if name_or_id:\n _subjects = None\n if name_or_id in _subjects_req.json().get(\"subjects\"):\n _subjects = _subjects_req.json().get(\"subjects\").get(name_or_id)\n else:\n for _subjects_key in _subjects_req.json().get(\"subjects\"):\n _name = _subjects_req.json().get(\"subjects\").get(_subjects_key).get(\"name\")\n if _name == name_or_id:\n _subjects = _subjects_req.json().get(\"subjects\").get(_subjects_key)\n name_or_id = _subjects_key\n break\n if not _subjects:\n raise Exception(\"Cannot find Subjects with name or ID {}\".format(name_or_id))\n else:\n if human:\n result = {\"subjects\": {name_or_id: _subjects}}\n else:\n result = {\"subjects\": [{name_or_id: _subjects}]}\n elif filter:\n return \"\\n\".join(\n [\"\\t\".join(_t) for _t in filter_dict(_subjects_req.json().get(\"subjects\"),\n filter.split(\",\"))]\n )\n else:\n result = _subjects_req.json()\n\n if human:\n return SubjectsCLI.human_display(result)\n else:\n return result\n LOGGER.error('Cannot list Subjects {}'.format(_subjects_req.status_code))", "title": "" }, { "docid": "3846677f1c57e681c1e254c5ca2be05c", "score": "0.54636914", "text": "def show_submitted_courses(self):\n with open('../databases/users_db/students_info.json') as std_info:\n units = 0\n info = json.load(std_info)\n table = PrettyTable(\n ['course code', 'course name', 'units', 'teacher name', 'field code', 'total quantity'])\n for course in info[f'{self.user_id}'][1]:\n values = []\n for i in ['course_code', 'name', 'units', 'teacher_name', 'field_code', 'total_quantity']:\n values.append(course[i])\n table.add_row(values)\n units += course['units']\n print(f'TOTAL UNITS : {units}')\n print(table)", "title": "" }, { "docid": "a611d41ea323ef1a9d7f8f3116404ea3", "score": "0.54625446", "text": "def public_assignments():\n\n # Get optional class filter from get query\n course_id = request.args.get(\"courseId\", default=None)\n\n # Get (possibly cached) assignment data\n assignment_data = get_assignments(current_user.netid, course_id)\n\n # Iterate over assignments, getting their data\n return success_response({\"assignments\": assignment_data})", "title": "" }, { "docid": "bb9340e25767b1657be55e5a1e228693", "score": "0.54577607", "text": "def get_all_assignments(self):\n hits = self.connection.get_all_hits()\n assignments = []\n for hit in hits:\n page = 1\n while True:\n page_assignments = self.connection.get_assignments(hit.HITId, page_size=100, page_number=page)\n assignments.extend(page_assignments)\n page += 1\n if len(page_assignments) == 0:\n break\n return assignments", "title": "" }, { "docid": "75bcc6a3fdd0549142a346b6b0aaf20f", "score": "0.54342514", "text": "def list_subjects_and_categories(command, root_path):\n subjects=[]\n categories=[]\n groups=[\"all\"]\n \n with open(command['fullpath'],'rt',encoding=\"utf-8-sig\") as fh:\n header=fh.readline().strip().split(',')\n subj_idx = header.index(\"id\")\n cat_idx = header.index(\"category\")\n try:\n group_idx = header.index(\"group\")\n except:\n group_idx = -1\n\n from csv import reader\n for line in reader(fh):\n if line[subj_idx] not in subjects:\n subjects.append(line[subj_idx])\n if line[cat_idx] not in categories:\n categories.append(line[cat_idx])\n if group_idx > -1:\n group_label = line[group_idx]\n if group_label not in groups:\n groups.append(group_label)\n return { \"type\": \"list_subjects_and_categories\",\n \"subjects\": subjects, \n \"categories\": categories,\n \"groups\": groups,\n \"subject\": subjects[0],\n \"category\": categories[0],\n \"group\": groups[0] }", "title": "" }, { "docid": "3648fe0f497949953bfc8bb6beaa06fa", "score": "0.54211396", "text": "def assign_view(assign_id, session_id, course_id):\n assignment = assign.get_assignment(assign_id)\n session = sessions.get_session(session_id)\n course = courses.get_course(course_id)\n if session['course_id'] != course['course_num']:\n abort(403)\n if session['id'] != assignment['sessions_id']:\n abort(403)\n\n return render_template(\"student_views/assignment_details.html\", session=session, assignment=assignment, course=course)", "title": "" }, { "docid": "d60883324d25fa431a5a90d6ceeda5b0", "score": "0.5397596", "text": "def filter_subjects(course_id=None, staff_id=None, request=None):\r\n # get list of submitted data.\r\n list_data = [course_id, staff_id]\r\n # give none value to all querysets as initial values.\r\n subjects_course_qs = None\r\n subjects_staff_qs = None\r\n if course_id:\r\n # filter subjects by selected course.\r\n subjects_course_qs = Subject.objects.filter(course__id=course_id)\r\n # if queryset doesn't exist.\r\n if not subjects_course_qs.exists():\r\n subjects_course_qs = 'Subject Course'\r\n if staff_id: \r\n # filter subjects by selected staff.\r\n subjects_staff_qs = Subject.objects.filter(staff__id=staff_id)\r\n # if queryset doesn't exist.\r\n if not subjects_staff_qs.exists():\r\n subjects_staff_qs = 'Subject Staff'\r\n # make a list of all filter querysets.\r\n filter_qs_list = [subjects_course_qs, subjects_staff_qs]\r\n # if course, session or gender is selected.\r\n if any(list_data):\r\n # return the intersection between all querysets.\r\n return filter_list(filter_qs_list) \r\n # if not.\r\n else:\r\n # get all staffs of current adminhod's courses.\r\n return Subject.objects.user_subjects(request.user)", "title": "" }, { "docid": "e0c340a7fdb2576dff26c7a4cf286924", "score": "0.5395771", "text": "def get_subjects(datapath):\n df = read_tsv(datapath)\n subjects = df.participant_id.tolist()\n baseline = df.baseline.tolist()\n retest = df.retest.tolist()\n sub_ses = []\n for sub, bas, trt in zip(subjects, baseline, retest):\n sub_ses.append((sub, bas))\n sub_ses.append((sub, trt))\n return sub_ses", "title": "" }, { "docid": "40c446ccfa8466074e29f5bc49364dc2", "score": "0.53862315", "text": "def subjects(self, subject_ids):\n return self.db.get_golds(subject_ids)", "title": "" }, { "docid": "28248e8c679be52c14a29589a576e8dc", "score": "0.5384386", "text": "def parse_subject(self, response):\n current_term_id = self.determine_current_term(response)\n subject_id = re.search('\\?subject=(\\d+)', response.url).group(1)\n\n subject_name = response.xpath(\"//h2[@class='vac']\")[0].xpath('./text()').extract()\n subject_name = \"\".join([l.strip('\\n') for l in subject_name if l.strip('\\n')])\n\n for student in response.xpath(\"//td[@class='student-assessment']\"):\n student_id = student.xpath('./div/@id').extract()[0].split('_')[-1]\n if self.student_id and self.student_id != student_id:\n continue\n\n # First we have to set up and get the report section (subject) in the database\n item = PrimaryReportSectionItem()\n comment = response.xpath(\"//div[@id='user_comments_{}']\".format(student_id))\n comment = comment.xpath('./textarea/text()').extract()\n comment = comment[0] if comment else \"\"\n\n item['student_id'] = student_id\n item['course_id'] = response.meta.get('class_id')\n item['term_id'] = current_term_id\n\n item['subject_id'] = subject_id\n item['subject_name'] = subject_name\n item['comment'] = comment\n\n strands = response.xpath(\"//div[@id='user_strand_marks_{}']\".format(student_id))\n if not strands:\n # This is a single subject that has been changed from strands to just an overall comment\n overall_comment = response.xpath(\"//div[@id='user_final_grade_marks_{}']\".format(student_id))\n if overall_comment:\n overall_comment = overall_comment[0]\n selection = overall_comment.xpath(\"./table/tbody/tr/td/select/option[@selected='selected']/text()\").extract()\n selection = selection[0] if selection else \"\"\n item['overall_comment'] = {'G': 'Good', 'O':'Outstanding', 'N':'Needs Improvement'}.get(selection, '')\n else:\n item['overall_comment'] = \"N/A\"\n else:\n item['overall_comment'] = \"N/A\"\n\n yield item\n\n for student_strand in strands:\n which = 1\n for strand in student_strand.xpath('./table/tbody/tr'):\n strand_label = (strand.xpath('./td[1]/text()').extract()[0]).strip('\\n').strip()\n strand_label = strand_label[0].upper() + strand_label[1:]\n strand_label += \".\" if strand_label[-1] != '.' else \"\"\n value = strand.xpath(\"./td[2]/select/option[@selected='selected']/text()\").extract()\n strand_value = value[0] if value else \"\"\n \n item = PrimaryReportStrandItem()\n item['student_id'] = student_id\n item['course_id'] = response.meta.get('class_id')\n item['term_id'] = current_term_id\n item['subject_id'] = subject_id\n item['which'] = which\n\n item['strand_label'] = strand_label\n item['strand_label_titled'] = strand_label.title().replace('And', 'and')\n item['strand_text'] = strand_value\n\n if strand_label:\n yield item\n which += 1\n\n for student_outcome in response.xpath(\"//div[@id='user_learning_outcomes_marks_{}']\".format(student_id)):\n which = 1\n for outcome in student_outcome.xpath('./table'):\n\n # For this, we have to inspect the headers to see \n\n for section_num in range(len(outcome.xpath('./thead'))):\n\n strand_heading = outcome.xpath('./thead/tr/th/strong/text()').extract()[section_num * 2]\n outcome_body = outcome.xpath('./tbody')[section_num]\n\n for outcome_content in outcome_body.xpath('./tr'):\n\n cells = outcome_content.xpath('./td')\n if len(cells) == 2:\n outcome_label = (cells[0].xpath('./text()').extract()[0]).strip('\\n').strip()\n outcome_label = outcome_label[0].upper() + outcome_label[1:]\n outcome_label += '.' if outcome_label[-1] != \".\" else \"\"\n value = cells[1].xpath(\"./select/option[@selected='selected']/text()\").extract()\n\n outcome_value = value[0] if value else \"\"\n\n item = PrimaryReportOutcomeItem()\n item['student_id'] = student_id\n item['course_id'] = response.meta.get('class_id')\n item['term_id'] = current_term_id\n item['subject_id'] = subject_id\n item['heading'] = strand_heading.title().replace('And', 'and')\n item['which'] = which\n item['outcome_label'] = outcome_label\n item['outcome_label_titled'] = outcome_label.title().replace('And', 'and')\n item['outcome_text'] = outcome_value\n\n if outcome_label.strip('\\n'):\n yield item\n which += 1", "title": "" }, { "docid": "5f9bbac8972ea23c44007e81f3a91216", "score": "0.53774905", "text": "def show_chosen_courses(self):\n if len(self.chosen_courses) == 0:\n print('No courses have been added yet.')\n\n else:\n table = PrettyTable(\n ['course code', 'course name', 'units', 'teacher name', 'field code', 'total quantity'])\n\n for lesson in self.chosen_courses:\n table.add_row(\n [lesson.course_code, lesson.name, lesson.units, lesson.teacher_name, lesson.field_code,\n lesson.total_quantity])\n print(table)", "title": "" }, { "docid": "f5b8740e22088a47e45006e7695c29fc", "score": "0.537559", "text": "def __str__(self):\n return self.subject_name", "title": "" }, { "docid": "51a9da0ced6340d1894761b738ad67eb", "score": "0.5369715", "text": "def handle_asklist(bot, event):\n event.reply('available subjects: ', experts.data.keys())", "title": "" }, { "docid": "be36259696316828b2303f7956f63924", "score": "0.5363911", "text": "def get_assignments() -> list:\n return _assignments", "title": "" }, { "docid": "eff4550afccd51d2b57263602d1b668a", "score": "0.53589725", "text": "def show_available_courses(self):\n\n table = PrettyTable(\n ['course code', 'course name', 'units', 'teacher name', 'field code', 'total quantity'])\n\n for lesson in self.defined_available_courses():\n table.add_row(\n [lesson.course_code, lesson.name, lesson.units, lesson.teacher_name, lesson.field_code,\n lesson.total_quantity])\n print(table)", "title": "" }, { "docid": "4be05bd2766a16affe63f3c15f4ed69c", "score": "0.53571683", "text": "def get_all(cls):\n assignments = cls.query.all()\n return assignments", "title": "" }, { "docid": "6af62829fcac9275703e0011a83d0850", "score": "0.5352533", "text": "def get_summary(self):\n if len(self.courses) > 0:\n print_array = []\n for course in self.courses:\n print_array.append([self.cwid,self.name,self.department,course,self.courses[course]])\n return print_array\n else:\n raise ValueError('Not enough courses')", "title": "" }, { "docid": "ccc62b1dd243148cddb21b8ca25bc7ea", "score": "0.5334023", "text": "def get_assignments0(self, team_name):\n\n _cards = []\n _assignments = dict()\n\n if team_name == 'SysEng':\n for list in self.syseng_assignments.all_lists():\n if list.name == 'In Progress'.encode('utf-8'):\n _cards = self.syseng_assignments.get_list(list.id).list_cards()\n\n for _card in _cards:\n _label = 'default'\n\n for label in _card.labels:\n if label.name == b'Ok':\n _label = 'success'\n if label.name == b'Issues':\n _label = 'warning'\n if label.name == b'Blocked':\n _label = 'danger'\n\n _assignments[_card.id] = assignment.Assignment(_card.id, _card.name, None, _status = _label)\n\n return _assignments", "title": "" }, { "docid": "31d0f81e924ceba0401beac23856ac5c", "score": "0.53136635", "text": "def show_subject(request,subject_slug,template_name=\"resources/subject.html\"): \r\n subject = get_object_or_404(Subject,slug=subject_slug)\r\n resources = subject.resource_set.all()\r\n page_title = subject.name\r\n is_ratted = False\r\n #dont't allow people to rate a subject more than once\r\n if request.user.subjectrate_set.filter(subject=subject):\r\n is_ratted=True\r\n rate_form = get_rate_form(Subject)()\r\n #save comment\r\n if request.method==\"POST\":\r\n if request.POST.get('action')=='comment':\r\n comments_form = CommentsForm(request.POST)\r\n if comments_form.is_valid():\r\n comments_form.save(user=request.user,subject=subject)\r\n comments= Comments.objects.filter(subject=subject).order_by('-date_comment')\r\n comments_form = CommentsForm()\r\n return render_to_response(template_name,locals(),context_instance=RequestContext(request))", "title": "" }, { "docid": "2e6ccf933dba183c0f2f8d77d9ab0f6a", "score": "0.5307436", "text": "def view_announcements_student(request):\r\n # get list of all staffs' ids of current student's course.\r\n staff_ids_list = Subject.objects.staff_ids(request.user, is_student=True)\r\n # display all announcements of current student's course.\r\n announcements = Announcement.objects.filter(\r\n Q(adminhod=request.user.student.course.adminhod)|\r\n Q(staff__user__id__in=staff_ids_list) \r\n ).distinct()\r\n # update announcements from not seen to be seen. \r\n Announcement.objects.announcements_updated(request.user, staff_ids_list, is_student=True)\r\n context = {'announcements':announcements}\r\n return render(request, 'student/view_announcements.html', context)", "title": "" }, { "docid": "a4a26eb7d7dc10461d0a364bea1672e2", "score": "0.53056204", "text": "def subjects(*reports) -> Iterator:\n for report in reports:\n yield from report.get(\"subjects\", {}).values()", "title": "" }, { "docid": "864712af21aad61da766d66f4500a8d3", "score": "0.5293205", "text": "def instructorsummary(self):\r\n\r\n for course,studentnumber in self.coursestudents.items():\r\n yield [self.instructorcwid,self.instructorname,self.instructordept, course, studentnumber]", "title": "" }, { "docid": "10368b3aaf3891162e89440019084cf1", "score": "0.52924657", "text": "def subject_set(self) -> models.QuerySet:\n return self.query_associated_subjects()", "title": "" }, { "docid": "db79f4a4d6fd0e2875a5e6759b4cae80", "score": "0.5276414", "text": "def select_distinct_subjects(**kwargs):\n return select_distinct(M.SUBJECT, **kwargs)", "title": "" }, { "docid": "98c4de9eec22f972e4a99dbf6a531ef4", "score": "0.5271705", "text": "def list_all(request):\n if \"full\" in request.GET:\n full = request.GET[\"full\"].lower() in TRUE_SET\n else:\n full = False\n return HttpResponse(json.dumps([c.to_json_object(full=full) for c in Course.public_courses().order_by(\"subject_id\")]), content_type=\"application/json\")", "title": "" }, { "docid": "a4d3828bf82dc4919d034ce113193846", "score": "0.5263679", "text": "def display(self, assignment):\n # Subclasses can print in a prettier way, or display with a GUI\n print('CSP:', self, 'with assignment:', assignment)", "title": "" }, { "docid": "1290ca40f5c22f428f5b2ef8444eb0bf", "score": "0.52548254", "text": "def get_subjects(fname_in):\n fin = open(fname_in, 'r')\n cin = csv.reader(fin)\n h = next(cin)\n\n subject_s = set()\n for l in cin:\n subject_s.add(l[3])\n\n subject_l = list(subject_s)\n pickle_data('subject_set.pkl', subject_s)\n fout = open('subjects.csv', 'w')\n cout = csv.writer(fout)\n cout.writerow(['Subject', 'In Allston'])\n for l in subject_l:\n cout.writerow([l, ''])\n\n fout.close()\n fin.close()", "title": "" }, { "docid": "32a346bbee0158751cd6fea9f135d31d", "score": "0.5251306", "text": "def do_assignments(self, s):\n try:\n assignments = list(self.game.assign())\n except GameError as e:\n print('Cannot get assignments: {}'.format(e))\n else:\n if assignments:\n self._print_assignments(assignments)\n else:\n print('No new assignments available')", "title": "" }, { "docid": "9801b5bddd8f98af7f0c97adde8248bd", "score": "0.5214523", "text": "def scheduleTeachingAssignmentHtml(request):\n getSchedule().sort(key=lambda x: x.instructor, reverse=False)\n assignment = sorted(getSchedule(), key=lambda x: x.instructor, reverse=False)\n additional_assignments = getAssignments()\n for instructor in additional_assignments:\n print(instructor.instructor)\n courses = getCourses()\n assignemnt_hours = list()\n total = 0\n final_total = 0\n currentinstructor = \"\"\n for instructor in assignment:\n if currentinstructor != instructor.instructor:\n final_total = total\n total = 0\n assignemnt_hours.append(final_total)\n for course in courses:\n currentinstructor = instructor.instructor\n if instructor.course == course.course:\n total = total + course.workloadhours\n assignemnt_hours.append(final_total)\n return render(request, \"polls/teaching_assignments.jinja\", {'assignment': assignment, 'courses': courses,\n 'assignment_hours': assignemnt_hours,\n 'additional_assignments': additional_assignments})", "title": "" }, { "docid": "cc19fb4cce625b586095e683804d7cc4", "score": "0.5196651", "text": "def subjects(data, demo, cond, group):\r\n subs = list(set(data.index.values.tolist()))\r\n subs = [i for i in subs if (list(demo.loc[i])[0] == group)]\r\n return subs", "title": "" }, { "docid": "453f9f6230ae2695d56373f986a8b19f", "score": "0.5194091", "text": "def dpAdvisor(subjects, maxWork):\r\n # TODO...\r", "title": "" }, { "docid": "45925fb52bfc956f77864de8b2cb9c31", "score": "0.5182091", "text": "def get_subject_areas():\n result = requests.get(BASE_URL + '/course/api/subjectAreas', cookies=cookies)\n result.raise_for_status()\n return result", "title": "" }, { "docid": "b0e220944cef6419a6d693f0fe5bd838", "score": "0.5162329", "text": "def list_assignments(assignments, upload):\n \n assignments_map ={}\n \n for i, assignment in enumerate(assignments, 1):\n if upload: \n assignments_map[i] = assignment['id'] \n else:\n assignments_map[i] = assignment['html_url'] \n print \"%s. %s\" % (str(i), assignment['name'])\n if upload:\n print 'Enter the number corresponding to the assignment you want to turn in: '\n return assignments_map[int(raw_input())]\n return assignments_map", "title": "" }, { "docid": "3d230587bc09766e821a66d1920b525c", "score": "0.51359594", "text": "def get_all_assignment_ids(self):\n\n return self.assignments.keys()", "title": "" }, { "docid": "1a962ec4e30f80efe5f0b11734f5d8f7", "score": "0.5125519", "text": "def get_students_list():\n\n titles = [\"Idx\", \"Password\", \"Name\", \"Surname\", \"Email\"]\n students = instances_manager.prepare_data_to_visualize(Student.student_list)\n codecooler_view.print_table(titles, students)\n get_students_grades()", "title": "" }, { "docid": "eacaeec5b29408c7b89c24eb2049e30f", "score": "0.5116473", "text": "def n_subjects(self):\n return len(self.te.subjects_index)", "title": "" }, { "docid": "3d924a66e5689cab3f6bec713610216c", "score": "0.5108318", "text": "def student_table(self):\n print()\n print(\"Student Summary:\")\n header = [\"CWID\", \"Name\", \"Major\", \"Courses Completed w/ grade >= C\", \"Remaining Required\", \"Remaining Electives\"]\n x = PT(header)\n for item in self.students:\n # First get difference between required and completed classes passed satisfactorily (grade >= 'C')\n passed_courses, required_remaining, elective_remaining = self.courses_remaining(item)\n x.add_row([str(item.CWID), str(item.name), str(item.major), sorted(passed_courses), sorted(required_remaining), sorted(elective_remaining)])\n print(x)\n return None", "title": "" }, { "docid": "f190d3f0a99b3482263ca9f6eda1dda9", "score": "0.50919473", "text": "def find_subjects(experiment, rootdir=\"/\"):\n json_reader = JsonIndexReader(os.path.join(rootdir,\n \"protocols\",\n \"r1.json\"))\n subjects = json_reader.aggregate_values(\n 'subject_alias', experiment=experiment)\n return subjects", "title": "" }, { "docid": "6750cb9a498fdd224f3d715723f94f9b", "score": "0.5086884", "text": "def associations_subjects(self, **kwargs) -> Iterator[CURIE]:\n # individual implementations should override this to be more efficient\n yielded = set()\n for a in self.associations(**kwargs):\n s = a.subject\n if s in yielded:\n continue\n yield s\n yielded.add(s)", "title": "" }, { "docid": "0ec9fe4651bd7c208382943864489dbe", "score": "0.5068389", "text": "def n_subjects(self) -> int:\n return self.subject_set.count()", "title": "" }, { "docid": "0d517e711d0eb2ca96d432911fcf48d6", "score": "0.50627875", "text": "def assign_users(request):\n teacher_created_assignments = AssignmentCreator.objects.filter(teacher=request.user)\n assignments_all_existing = TestUserAssignment.objects.filter(\n pk__in=teacher_created_assignments.values_list('assignment', flat=True))\n if request.method == 'POST':\n form = AssignmentsForm(request.POST)\n if form.is_valid():\n assignment_form = form.save()\n teacher = request.user\n creation = AssignmentCreator.objects.create(teacher=teacher,\n assignment=assignment_form)\n creation.save()\n teacher_created_assignments = \\\n AssignmentCreator.objects.filter(teacher=request.user)\n assignments_all_existing = TestUserAssignment.objects.filter(\n pk__in=teacher_created_assignments.values_list('assignment', flat=True))\n\n messages.info(request, \"Student {} was assigned a test {}\".format(form.cleaned_data[\"user\"],\n form.cleaned_data[\"test\"]))\n else:\n messages.info(request, \"Student {} is already assigned this test {}\".format(form.cleaned_data[\"user\"],\n form.cleaned_data[\"test\"]))\n\n else:\n form = AssignmentsForm()\n return render(request, 'teachers/assign-users-tests.html',\n {'form': form, 'existing_assignments': assignments_all_existing})", "title": "" }, { "docid": "9a4ba812a269371e3607422d5fe9e261", "score": "0.5043249", "text": "def top_subjects(self,top_n = 25):\n return self._top_items(self.te.subjects_index,top_n)", "title": "" }, { "docid": "1a59182dab6a872948f0aba87687f011", "score": "0.5030182", "text": "def get_all_classroom_data(url):\n for prefix in subjects:\n url = url + \"/\" + prefix\n html_data = get_all_html_content_from_url(url)\n extract_data_from_html(html_data)", "title": "" }, { "docid": "a119ee699b7f8afc9650d68e9949f5bb", "score": "0.50187707", "text": "def index(course_name = None, assignment_name = None):\n courses = Course.accessible_by(request.username)\n course = None\n assignments = None\n assignment = None\n\n if course_name is not None:\n course = next((c for c in courses if c.name == course_name), None)\n if course is None:\n return abort(404)\n # Only show public unit tests\n assignments = course.assignments.filter_by(visible=True).filter(Assignment.testfiles.any(Testfile.unittests.any(Unittest.is_public)))\n if course is not None and assignment_name is not None:\n assignment = assignments.filter_by(name=assignment_name).first_or_404()\n\n return render_template(\"main.html\", courses=courses, course=course, assignments=assignments, assignment=assignment)", "title": "" }, { "docid": "d58a86e75f7e3461a4d424b07a12f660", "score": "0.50122947", "text": "def getDFSubjects(self):\n return self.c_cells", "title": "" }, { "docid": "b0154db1069d2a23685763d95a51729c", "score": "0.50106895", "text": "def score(self, article: Article, subjects: List[Subject]) -> List[Subject]:\n scores = self._calc_scores(article, subjects)\n for i, score in enumerate(scores):\n subjects[i].score = score\n return subjects", "title": "" }, { "docid": "08672dfbfafbebf9e892e47190583def", "score": "0.5009317", "text": "def get_student_summary(self)-> Tuple[str, str, List[str]]:\n \n return [self.cwid, self.name,self.major,sorted(self.passed_courses),sorted(self.remaining_rc),sorted(self.remaining_ec),round(self.gpa,2)]", "title": "" }, { "docid": "21342c3085f19ad0de2089c65ec59862", "score": "0.5006905", "text": "def studentcourses():\n rows = db.execute(\"select course_id, semester, year, grade from takes where ID = :ID\", ID = session[\"user_id\"])\n positions = []\n for row in rows:\n positions.append({\"course_id\": row['course_id'], \"semester\": row['semester'], \"year\": row['year'], \"grade\": row['grade']})\n\n return render_template(\"studentcourses.html\", positions = positions)", "title": "" }, { "docid": "d7afc2567e2967e285133dea0dbb81b2", "score": "0.50011986", "text": "def subjects2rdfa(labels, subjects):\n html = [\"ul\"]\n for subject_id in sorted(list(subjects.keys())):\n html.append(subject2rdfa(labels, subject_id, subjects[subject_id]))\n return html", "title": "" }, { "docid": "a3809f06612c69684a8439866d573b44", "score": "0.4996725", "text": "def _set_subject_variables(self, subject_dictionary):\n self.subject_column = subject_dictionary[\"subject_id_column\"]\n self.subject_info = subject_dictionary[\"subject_data_columns\"]\n self.subject_target = subject_dictionary[\"target_variable\"]\n self.subject_df = df_utils.get_subjects_data_frame(df=self.df,\n subject_column_name=self.subject_column,\n subject_info_columns=self.subject_info)", "title": "" }, { "docid": "40596cbc81f75d0fd46f3b525bd30f50", "score": "0.4993491", "text": "def get(self, format=None):\n subjects = Subject.objects.all()\n serializer = SubjectSerializer(subjects, many=True)\n return Response(serializer.data)", "title": "" }, { "docid": "c234350073b598332cae0c9b80fbda3b", "score": "0.49841318", "text": "def count_subjects_per_demographics(self):\n all_demographics = list(self.subject_demographic.values())\n\n # Number of subjects per demographic\n subjects_per_demographics = dict(\n [\n (d, sum(np.array(all_demographics) == d))\n for d in set(all_demographics)\n ]\n )\n\n return subjects_per_demographics", "title": "" }, { "docid": "1749ddb15c85512431f21591ef87994d", "score": "0.4983279", "text": "def content_dist_courseinfo(self):\n for item in self.get_teaching_learning_courses():\n yield item", "title": "" }, { "docid": "4e150542cf82b86a5c435b7f6d36fbae", "score": "0.49807706", "text": "def subjects(\n self,\n ids=None,\n types=None,\n slugs=None,\n levels=None,\n hidden=None,\n updated_after=None,\n fetch_all=False,\n ):\n\n url = self.url_builder.build_wk_url(\n constants.SUBJECT_ENDPOINT, parameters=locals()\n )\n return self._wrap_collection_in_iterator(\n self.authorized_request_maker(url), fetch_all\n )", "title": "" }, { "docid": "7e8d89c0f57f1ff381ac060b228515e3", "score": "0.4980363", "text": "def courselist(self, indexed=True, standalone=False):\n self.asciiout.h1(\"Meine Veranstaltungen\")\n\n \"\"\" Prints a list of courses \"\"\"\n for i in range(0, len(self.courses)):\n key = self.courses.keys()[i]\n\n if indexed:\n print \"[\"+str(i)+\"] \"+self.courses[key]\n else:\n print self.courses[key]\n \n # courselist() is also utilized by other actions. the standalone flag prevents conflicts\n if standalone:\n courseId = selectId(\"Veranstaltungsdetails\", len(self.courses)) \n coursePage = pq(self.read(\"seminar_main.php?auswahl=\"+self.courses.keys()[courseId]))\n detailPage= pq(self.read(\"print_seminar.php\"))\n self.asciiout.h1(detailPage(\"h1\").eq(0).text())\n rows = detailPage(\"table\").eq(0).find(\"tr\")\n for i in range(1, len(rows)):\n tds = rows.eq(i).children(\"td\")\n self.asciiout.h2(tds.eq(0).text())\n self.asciiout.text(br2nl(tds.eq(1)).text())", "title": "" }, { "docid": "d76f6c0a2370d98c6c1528b209f60963", "score": "0.49786335", "text": "def get_assignments(self, course_id, **kwargs):\n from canvasapi.assignment import Assignment\n\n return PaginatedList(\n Assignment,\n self._requester,\n 'GET',\n 'users/%s/courses/%s/assignments' % (self.id, course_id),\n _kwargs=combine_kwargs(**kwargs)\n )", "title": "" }, { "docid": "f9e3a15ddd04e38185027387e3efef15", "score": "0.49757263", "text": "def testWebPage_setSubjectList(self):\n web_table = self.portal.web_page_module.newContent(portal_type=\"Web Table\")\n self.tic()\n self.changeSkin(\"UNGDoc\")\n web_table.WebPage_setSubjectList(\"VPN\")\n self.tic()\n subject_list = web_table.getSubjectList()\n self.assertEqual([\"VPN\"], subject_list)\n self.changeSkin(\"UNGDoc\")\n web_table.WebPage_setSubjectList(\"VPN,ERP5\")\n self.tic()\n self.changeSkin(\"UNGDoc\")\n subject_list = web_table.getSubjectList()\n self.assertEqual([\"ERP5\", \"VPN\"], sorted(subject_list))", "title": "" }, { "docid": "2d8bee3718b4edecc01b7a11b5cebd93", "score": "0.4973076", "text": "def subject_page(subject, page_number):\n subject_data = dc.data_get(subject, page_number)\n button_data = dc.button_data_get(subject, subject_data)\n\n if subject == c.SUBJECT.PEOPLE:\n subject_data = dc.data_change_url_to_name(subject_data, c.KEY.People.HOMEWORLD)\n\n return render_template(\n 'index.html',\n subjects_list=c.SUBJECT_ORDER,\n subject_name=subject,\n subject_data=subject_data,\n button_data=button_data,\n column_names=dc.column_names_get(subject),\n pages_number=util.pagination_number_get(subject),\n page_active=page_number\n )", "title": "" }, { "docid": "f7bff6fd2d1a33906b32dada0ad64e11", "score": "0.4963504", "text": "def group_assignments(self) -> Sequence[str]:\n return pulumi.get(self, \"group_assignments\")", "title": "" }, { "docid": "47d12efe930433da8948b7d881dc7319", "score": "0.49576584", "text": "def list():\n\tif g.is_default_school:\n\t\tdiscussions = Discussion.objects.filter(published=True).order_by('-created')\n\telse:\n\t\tdiscussions = Discussion.objects.filter(schools=g.school, published=True).order_by('-created')\n\treturn render_template('discussion/list.html',\n\t\ttitle=_('Discussions'),\n\t\tdiscussions=discussions)", "title": "" }, { "docid": "4f5d8c195c2634d294e123397180f306", "score": "0.4957587", "text": "def get_assignment_data(aid):\n matches = assignments.get_assignments(aid=aid)\n if len(matches) < 1:\n raise WebException(\"Assignment does not exist\")\n assignment = matches[0]\n assignment[\"groups\"] = [groups.get_group(gid=gid)[0] for gid in assignment.get(\"groups\", [])]\n return { \"success\": 1, \"data\": assignment }", "title": "" }, { "docid": "8fa32a6e8dbd7102a5bcc7e3397ff007", "score": "0.49546313", "text": "def dpBruteForceAdvisor(subjects, maxWork):\n nameList = list(subjects.keys())\n tupleList = list(subjects.values())\n memo = {}\n bestSubset, bestSubsetValue = dpBruteForceAdvisorHelper(tupleList, maxWork,\n 0, None, None, [],\n 0, 0, memo)\n outputSubjects = {}\n\n for i in bestSubset:\n outputSubjects[nameList[i]] = tupleList[i]\n\n return outputSubjects", "title": "" }, { "docid": "af50fce1e2e64969f08d272d4c6280a3", "score": "0.49513468", "text": "def loadSubjects(filename):\n\n # The following sample code reads lines from the specified file and prints\n # each one.\n inputFile = open(filename)\n output = {}\n for line in inputFile:\n data = line.rstrip().split(',')\n name = data[0]\n value = int(data[1])\n work = int(data[2])\n output[name] = (value, work)\n\n # print data\n\n return output", "title": "" }, { "docid": "20c3faf143e90f76c28334af58aafa78", "score": "0.49489135", "text": "def get_subject_list(year_term):\n result = requests.get(URL_ROOT)\n soup = BeautifulSoup(result.text)\n select_box = soup.find('select', id='subject')\n subjects = select_box.find_all('option', class_=year_term)\n subject_str = [s['value'] for s in subjects]\n return subject_str", "title": "" }, { "docid": "6e272554c2b2e048799aea7deffef1bc", "score": "0.49430734", "text": "def get_subjects(self, subject_group_id=None, **kwargs):\n params = dict((k, v) for k, v in kwargs.items() if k in\n ('subject_group', 'include_inactive'))\n\n return self._get('subjects', params=params)", "title": "" }, { "docid": "99aadfe3874772525b6b2607b125bf41", "score": "0.49289337", "text": "def meeting_session_assignments(self, schedule : Schedule) -> Iterator[Assignment]:\n url = \"/api/v1/meeting/schedtimesessassignment/?schedule=\" + str(schedule.id)\n return self._retrieve_multi(url, Assignment)", "title": "" }, { "docid": "eb58e62dec8a58983f4b361e662ff992", "score": "0.49181697", "text": "def _get_subject(params):\n params[\"SUBJECT_STATEMENT\"] = render_to_string(\"saml/xml/subject.xml\", params)", "title": "" } ]
14dc3b3ec7b206da8a0aac6584868cdb
Marks the list of barcodes as ready in the databse and sends email
[ { "docid": "7e79f1f9bffed340ce4ad21cb22ac6c3", "score": "0.7422296", "text": "def mark_results_ready(self, barcodes, debug=False):\n debug = {}\n ready_sql = \"\"\"UPDATE ag.ag_kit_barcodes\n SET results_ready = 'Y'\n WHERE barcode IN %s\n AND (results_ready != 'Y' OR results_ready IS NULL)\n RETURNING barcode\"\"\"\n new_bcs = tuple(x[0] for x in\n self._con.execute_fetchall(\n ready_sql, [tuple(barcodes)]))\n debug['new_bcs'] = new_bcs\n if len(new_bcs) == 0:\n # No new barcodes, so no emails to send\n return\n\n bc_sql = \"\"\"UPDATE ag.ag_kit_barcodes\n SET date_of_last_email = '{0}'\n WHERE barcode IN %s\"\"\".format(datetime.now())\n subject = \"Your American/British Gut results are ready\"\n message = (\n \"Good afternoon American & British Gut participants!\\n\\n\"\n \"We are pleased to let you know that your results are now \"\n \"available. You may access them by signing onto \"\n \"microbio.me/americangut or microbio.me/britishgut. If you have \"\n \"forgotten your login credentials, you may retrieve them using \"\n \"the \\\"Forgot kit ID/password\\\" functions.\\n\\n\"\n \"We thank you for being a part of the project. While we emphasize \"\n \"getting results back to you, the participant, we and the broader \"\n \"American/British Gut scientific collaborative network are \"\n \"extremely excited about the population-scale microbiome \"\n \"observations that are for the first time becoming possible thanks\"\n \" to you and the other participants!\\n\\n\"\n \"Regards,\\n\"\n \"The American Gut Team\\n\")\n barcode_info = self.get_ag_barcode_details(new_bcs)\n # Make sure email only sent once if multiple barcodes with same email\n seen_emails = set(i['email'] for bc, i in viewitems(barcode_info))\n mail = send_email(message, subject, bcc=list(seen_emails), debug=debug)\n debug['mail'] = mail\n self._con.execute(bc_sql, [new_bcs])\n if debug:\n return debug", "title": "" } ]
[ { "docid": "68b6afc64486c9532780ec1bc33d5e9d", "score": "0.64812684", "text": "def mark_barcodes_sent_to_qiita(self, barcodes):\n if barcodes:\n sql = \"\"\"UPDATE project_qiita_buffer\n SET pushed_to_qiita = 'Y'\n WHERE barcode IN %s\"\"\"\n self._con.execute(sql, [tuple(barcodes)])", "title": "" }, { "docid": "8888136d049bfcbeaf922f5a95396c5d", "score": "0.59725255", "text": "def _revert_ready(self, barcodes):\n sql = \"\"\"UPDATE ag.ag_kit_barcodes\n SET results_ready = NULL\n WHERE barcode IN %s\"\"\"\n self._con.execute(sql, [tuple(barcodes)])", "title": "" }, { "docid": "8488079b31af48be10688011a3eb0ab3", "score": "0.5670122", "text": "def push_barcode_to_qiita_buffer(self, barcode):\n sql = \"\"\"SELECT barcode\n FROM project_qiita_buffer\"\"\"\n present = {i[0] for i in self._con.execute_fetchall(sql)}\n\n if barcode in present:\n return \"Barcode in queue or already sent to Qiita\"\n\n else:\n sql = \"\"\"INSERT INTO project_qiita_buffer (barcode)\n VALUES (%s)\"\"\"\n self._con.execute(sql, [barcode])\n return \"Barcode inserted\"", "title": "" }, { "docid": "eb164cfc0637099183755492c8631dfa", "score": "0.53908557", "text": "def after_send(self, mails):", "title": "" }, { "docid": "8d8643f5a241ed6297e6e12fbcabd654", "score": "0.5222265", "text": "def finally_send(self, mails: list, status: bool):", "title": "" }, { "docid": "49d38db4d8bf2e6b9566ac92c810eed6", "score": "0.5191911", "text": "def send_queue_as_ready():\n global serial_port, gcode_queue, grbl_buffer_current, GRBL_BUFFER_MAX\n global total_items_queued_in_batch, gcode_queue_count\n \n if serial_port:\n try:\n lines = serial_port.readlines()\n for line in lines:\n print \"grbl: \" + line\n # append_response(\"grbl: \" + line)\n grbl_buffer_current -= 1\n \n if gcode_queue:\n if grbl_buffer_current < GRBL_BUFFER_MAX:\n line = gcode_queue.popleft()\n gcode_queue_count -= 1\n print \"sending to grbl: %s\" % (line)\n serial_port.write(line)\n grbl_buffer_current += 1\n # append_response(\"sent to grbl: %s - buffer_current: %d\" % (line,grbl_buffer_current))\n else:\n total_items_queued_in_batch = 0\n except OSError:\n # Serial port appears closed => reset\n close()\n except ValueError:\n # Serial port appears closed => reset\n close()", "title": "" }, { "docid": "81b8644dd4d7f65d147f354c0be77273", "score": "0.5150376", "text": "def check_barcode_assigned(barcode):\n sql = \"\"\"SELECT barcode, assigned_on\n FROM barcodes.barcode\n WHERE barcode = %s\n \"\"\"\n with TRN:\n TRN.add(sql)\n barcode_info = TRN.execute_fetchindex()\n if barcode_info is None:\n raise ValueError('Barcode %s does not exist in the DB' % barcode)\n # Check if assigned on date is set or not\n return False if barcode_info['assigned_on'] is None else True", "title": "" }, { "docid": "6da1503fcd70fd9136b60f51de67f159", "score": "0.5108158", "text": "def test_transcode_oncomplete(self):\n\n # assume an encode job was submitted\n item = Item.objects.create(name='Hello')\n\n ctype = ContentType.objects.get_for_model(item)\n\n job = EncodeJob()\n job.id = '1396802241671-jkmme8'\n job.content_type = ctype\n job.object_id = item.id\n job.save()\n\n # \n with open(os.path.join(FIXTURE_DIRS, 'oncomplete.json')) as f:\n resp = json.loads(f.read())\n message = json.loads(resp['Message'])\n\n # send signal\n transcode_oncomplete.send(sender=None, message=message)\n\n #\n job = EncodeJob.objects.get(pk=message['jobId'])\n\n #\n self.assertEqual(1, EncodeJob.objects.count())\n self.assertEqual('1396802241671-jkmme8', job.id)\n self.assertEqual('Success', job.message)\n self.assertEqual(4, job.state)", "title": "" }, { "docid": "36c137fa863ff5b68afce997ea200612", "score": "0.5064364", "text": "def run(self):\n # subscriptions = Subscription.query.all()\n subscriptions = []\n for s in subscriptions:\n if s.email_verified == 1:\n self.send_email_to(s.email, \"SG Incidents: New Incident Reported in Your Area\", self.message)", "title": "" }, { "docid": "db2a27569d85f272c39ec8c11d21317a", "score": "0.5035769", "text": "def ready(self):\n\n if canAppAccessDatabase():\n self.create_stock_item_labels()\n self.create_stock_location_labels()\n self.create_part_labels()", "title": "" }, { "docid": "181264dff670d5e663fb89d83feff736", "score": "0.5018453", "text": "def send_MStoDataBase(id_arduino, mechanism_status):\n #Corentin : Ecrire code", "title": "" }, { "docid": "1e94c532d5959f0bdae163fa7b5ed677", "score": "0.49977615", "text": "def done(self):\n self._kwargs['content'] = '\\n\\n'.join(self._buffer)\n util.send_email(**self._kwargs)", "title": "" }, { "docid": "5d4beb79a6db91ac9811f16790b98a86", "score": "0.4962012", "text": "def check_consent(self, barcodes):\n sql = \"\"\"SELECT barcode\n FROM ag.ag_kit_barcodes\n LEFT JOIN ag.source_barcodes_surveys USING (barcode)\n WHERE barcode in %s AND survey_id IS NOT NULL\"\"\"\n consented = [x[0] for x in\n self._con.execute_fetchall(sql, [tuple(barcodes)])]\n\n failures = set(barcodes).difference(consented)\n\n return consented, self._explain_pulldown_failures(failures)", "title": "" }, { "docid": "8b4c923029fcb4cb59a6eca6acfa24c2", "score": "0.49563253", "text": "def barcode(self, barcode):\n\n self._barcode = barcode", "title": "" }, { "docid": "8b4c923029fcb4cb59a6eca6acfa24c2", "score": "0.49563253", "text": "def barcode(self, barcode):\n\n self._barcode = barcode", "title": "" }, { "docid": "cc95a0ea6f166591b002c292bbb5a6a1", "score": "0.49437463", "text": "async def populate_codes(self) -> None:\n async with self.bot.pg_pool.acquire() as conn:\n with open(self.bot.get_data().ACTIVITY_CODES_CSV, newline=\"\") as csvfile:\n data = [\n (item[0], item[1], int(item[2])) for item in reader(csvfile, delimiter=\",\", quotechar=\"|\")\n ]\n\n async with conn.transaction():\n await conn.execute(\"DELETE FROM Codes\")\n await conn.executemany(\"INSERT INTO Codes (code, title, points) VALUES ($1, $2, $3)\", data)", "title": "" }, { "docid": "1320eb3a71b7eb7128f1e430d0c6fadc", "score": "0.49425897", "text": "def add_barcodes_to_kit(self, ag_kit_id, num_barcodes=1):\n barcodes = self.get_unassigned_barcodes(num_barcodes)\n # assign barcodes to projects for the kit\n sql = \"\"\"SELECT DISTINCT project_id FROM barcodes.project_barcode\n JOIN ag.ag_kit_barcodes USING (barcode)\n WHERE ag_kit_id = %s\"\"\"\n proj_ids = [x[0] for x in self._con.execute_fetchall(sql, [ag_kit_id])]\n barcode_project_insert = \"\"\"INSERT INTO project_barcode\n (barcode, project_id)\n VALUES (%s, %s)\"\"\"\n project_inserts = []\n for barcode in barcodes:\n for project in proj_ids:\n project_inserts.append((barcode, project))\n self._con.executemany(barcode_project_insert, project_inserts)\n\n # Add barcodes to the kit\n sql = \"\"\"INSERT INTO ag_kit_barcodes\n (ag_kit_id, barcode, sample_barcode_file)\n VALUES (%s, %s, %s || '.jpg')\"\"\"\n barcode_info = [[ag_kit_id, b, b] for b in barcodes]\n self._con.executemany(sql, barcode_info)\n return barcodes", "title": "" }, { "docid": "81a15ada55b98a14cf08bc4c00c3938e", "score": "0.48794436", "text": "def run():\n errors = add_all_to_db()\n\n for err in errors:\n logging.error(\"Error: {0}\".format(err))\n\n # Attempt to email:\n try:\n dt = datetime.date.today()\n date = str(dt.year) + '-' + str(dt.month) + '-' + str(dt.day)\n fromaddr = '[email protected]'\n toaddrs = '[email protected]'.split()\n # Construct the message\n subject = \"StockDB report\"\n body = 'Date: ' + date + '\\n'\n body += 'Number of errors: ' + str(len(errors)) + '\\n\\n'\n for err in errors:\n body += \"Error: {0}\".format(err) + '\\n'\n msg = 'Subject: %s\\n\\n%s' % (subject, body)\n\n server = smtplib.SMTP('localhost')\n server.sendmail(fromaddr, toaddrs, msg)\n server.quit()\n except Exception as err:\n logging.error(\"Error: {0}\".format(err))", "title": "" }, { "docid": "ae90309b171b0324c3371c500b5580cc", "score": "0.487113", "text": "def barcode(self, barcode: str):\n\n self._barcode = barcode", "title": "" }, { "docid": "529363f27fb09b2f704371f5d8aa00ba", "score": "0.48509368", "text": "async def codes(self, ctx: Context) -> None:\n async with self.bot.pg_pool.acquire() as conn:\n async with conn.transaction():\n codes = await conn.fetch(\"SELECT code, title, points FROM Codes\")\n\n embed = discord.Embed(\n title=\"Activity Codes\",\n description=\"\\n\".join([f\"`{code}` {title} - {points}\" for code, title, points in codes]),\n color=self.bot.get_data().HACKATHON_BLUE,\n )\n\n await ctx.author.send(embed=embed)", "title": "" }, { "docid": "60b0d5630086304b1cc1d8162597ea24", "score": "0.4849058", "text": "def has_succeed():\n token.status = TokenEmailStatus.Success\n db.session.add(token)\n db.session.commit()", "title": "" }, { "docid": "cdc77dfc263f1b05df194c5ae1778d51", "score": "0.4847711", "text": "def get_ag_barcode_details(self, barcodes):\n sql = \"\"\"SELECT DISTINCT barcode, *\n FROM ag.ag_kit_barcodes\n JOIN ag.ag_kit USING (ag_kit_id)\n LEFT JOIN ag.source_barcodes_surveys USING (barcode)\n LEFT JOIN ag.ag_login_surveys USING (survey_id)\n LEFT JOIN ag.ag_login\n ON (ag.ag_kit.ag_login_id = ag.ag_login.ag_login_id)\n WHERE barcode in %s\"\"\"\n res = self._con.execute_fetchall(sql, [tuple(b[:9] for b in barcodes)])\n return {row[0]: dict(row) for row in res}", "title": "" }, { "docid": "c1047c4f22f00b663ed530694ef39938", "score": "0.48430327", "text": "def on_ready(self):\n self.ready = True\n self.bm.on_interesting_shit()", "title": "" }, { "docid": "eedf8323a6a665b63452c66598da9e74", "score": "0.48342258", "text": "def write_email(debtors_database):\n queue = []\n\n # define email charset\n charset = email.charset.Charset('utf-8')\n charset.body_encoding = email.charset.QP\n\n cur = debtors_database.cursor()\n cur.execute('SELECT email, name FROM debtors')\n for debtor in cur.fetchall():\n to_addr, name = debtor\n cur.execute('''SELECT amount, description FROM debts\n WHERE debtor_email=?''', (debtor[0], ))\n rows = cur.fetchall()\n debt_lines = [CONFIG['debt_line'] %\n {'amount': float(row[0]), 'description': row[1]} for row in rows]\n total = sum([float(row[0]) for row in rows])\n\n payload = CONFIG['email_template'] % {'name': name, 'debt_lines': u'\\n'.join(debt_lines), 'total': total}\n\n msg = MIMENonMultipart('text', 'plain')\n msg['To'] = to_addr\n msg['From'] = CONFIG['from_addr']\n msg['Subject'] = CONFIG['email_subject']\n msg.set_payload(payload, charset=charset)\n\n queue.append(msg)\n return queue", "title": "" }, { "docid": "d9b32b9e68c7e3ecbc3e3c5bb2f027ec", "score": "0.48256025", "text": "def scanBanshee(self):\n print(\"Starting to scan Banshee\")\n db = self.db\n bdb = self.bdb\n unmatched = db.getBUnmatched() # Get songs without a Banshee id\n for i in unmatched:\n db.addBID(i, bdb.getID(db.getPath(i))) # Add Banshee id to song\n\n print(\"Finished scanning Banshee\")", "title": "" }, { "docid": "06f91f40f1c76a39f1c4f0b88954ef31", "score": "0.47979128", "text": "def relay(mailFields, key, msgMailRequest, exeSql):\n \n status = 0\t\t\t\t\t\t\t# status = 0 (new combo), i.e. \"from\" + \"subject\" + \"date\" combo is not in DB, so consider it as new combo and relay if counters allow\n\t\t\t\t\t\t\t\t# status = 1 (old combo), i.e. \"from\" + \"subject\" + \"date\" combo is already there but check if new md5 attachment has come. If yes, save it\n \n # Retrieve existing combos from DB to get checked against combo of spam under analysis\n #checkData = \"SELECT spamPot.spam.from, spamPot.spam.subject, spamPot.spam.firstSeen FROM spamPot.spam\"\n #checkData = \"SELECT spamPot.spam.comboMD5 FROM spamPot.spam\"\n #checkData = \"SELECT spamPot.spam.id FROM spamPot.spam WHERE spamPot.spam.id = '\"+ str(mailFields['spam_id'])+\"'\" - 09262012\n #checkData = \"SELECT spamPot09252012.spam.id FROM spamPot09252012.spam WHERE spamPot09252012.spam.id = '\"+ str(mailFields['spam_id'])+\"'\"\n checkData = \"SELECT spam.id FROM spam WHERE spam.id = '\"+ str(mailFields['spam_id'])+\"'\"\n \n #logging.critical(\"Control reaches here\")\n #logging.critical(\"DB Name: %s\" % ShivaConfig.dataBaseName)\n #checkData = \"SELECT %s\" % (ShivaConfig.dataBaseName) + \".\" + \"spam.id FROM spamPot09252012.spam WHERE spamPot09252012.spam.id = '\"+ str(mailFields['spam_id'])+\"'\"\n #logging.critical(\"\\n\\n\\t\\tSQL Query: %s\" % checkData)\n #sys.exit(1)\n \n try:\n exeSql.execute(checkData)\n #print \"exiting......b0nd\"\n #sys.exit(1)\n if len(exeSql.fetchall()) >= 1:\n status = 1\n \n '''\n exeSql.execute(checkData)\n while(1):\n row = exeSql.fetchone()\n \n #logging.critical(\"type of row[0]: %s\", type(row[0].encode('utf-8')))\n #logging.critical(\"type of row[1]: %s\", type(row[1]))\n if row == None:\t\t\t\t\t\t# After reading last entry of DB, row returns None\n\tbreak\n #elif mailFields['from'] == row[0].encode('utf-8') and mailFields['subject'] == row[1].encode('utf-8') and str(datetime.date.today()) == str(row[2]).split(\" \")[0]:\t# MySQL from and subject are utf8 collated, so all data retrieved of type unicode. Need to convert before comaprision\n elif str(mailFields['comboMD5']) == row[0]:\n\tstatus = 1\t\t\t\t\t\t# i.e, recent spams \"from\" + \"subject\" + \"date\" combo is already in DB\t\n\tbreak\n else:\t\t\t\t\t\t\t# continue the loop if no match yet\n\tpass\n \n exeSql.execute(checkData)\n rows = exeSql.fetchall()\n \n for row in rows:\n if str(mailFields['comboMD5']) == row[0]:\n\t status = 1\n\t break\n else:\n\t pass\n #str(mailFields['comboMD5']) == row[1]:\n \n #for row in rows:\n #print \"%s, %s\" % (row[0], row[1])\n #print \"Number of rows returned: %d\" % cursor.rowcount\n '''\n except mdb.Error, e:\n logging.critical(\"[-] Error (ShivaMailRelayer - retriving combos from DB) - %d: %s\" % (e.args[0], e.args[1]))\n #print \"exiting1......b0nd\"\n #sys.exit(1)\n ShivaConfig.errorHandling(key, msgMailRequest)\n return None\n \n # Old Combo\n # Step 1: check if new attachment(s) has/have come with old combo\n # Step 2: if yes, save attachment(s), retrive spamPot.spam.totalCounter for combo and increase it by 1\n # Step 3: if no, don't save attachment(s), retrive spamPot.spam.totalCounter for combo and increase it by 1\n # Step 4: put relay test cases:\n #\t : if individual_relayed_today < 10 && totalRelayed_today < 100 -> relay, retrive individual_relayed_today & totalRelayed and increment by 1\n #\t : if (individual_relayed_today = 10 && totalRelayed_today < 100 ) or (individual_relayed_today < 10 && totalRelayed_today = 100) - don't relay\n # Step 5: remove message from queue\n \n if status == 1:\t\t\t\t\t\t# i.e. it's old combo\n logging.critical(\"status = 1, i.e. Old Combo - found in DB\")\n ShivaOldSpam.pushIntoDB(mailFields, key, msgMailRequest, exeSql)\n \n # status = 0, i.e., combo not found in DB, so push data in it\n \n # New Combo:\n # Step 1: push data into DB\n # Step 2: initialize totalCounter for new combo\n # Step 3: compare with total spams relayed for the day. \n # Step 4: if haven't reached limit yet, relay spam, initialize relayCounter and totalRelayed, and remove message from queue\n # Step 5: if limit for the day is reached, don't relay, but remove message from queue\n else:\n logging.critical(\"status = 0, i.e. New Combo - from and subject not found in DB\")\n ShivaNewSpam.pushIntoDB(mailFields, key, msgMailRequest, exeSql) \n \n #exeSql.close() ", "title": "" }, { "docid": "524b84840ece9b3235168a547a4f346b", "score": "0.47937396", "text": "def createBarCodes():\n c = canvas.Canvas(\"STOKBARHAZIRAN2017.pdf\", pagesize=A4)\n c.setFont(\"tahoma\", 12)\n\n\n\n\n\n # code93 also has an Extended and MultiWidth version\n\n\n\n\n\n\n x = 1 * mm\n y = 270* mm\n x1 = 6.4 * mm\n\n c.drawImage(\"./images/bishop.png\",x+5,y-15)\n\n b=sonuc[0][2]\n c.setFont(\"tahoma\", 30)\n c.drawString(x+90,y-15,str(b))\n c.setFont(\"tahoma\", 21)\n c.drawString(x + 190, y + 35, \"SAYIM BAR 30/06/2017\")\n c.setFont(\"tahoma\", 12)\n y = y - 20 * mm\n orhan=0\n sayfa=1\n\n for code in sonuc:\n if b!=code[2]:\n if orhan!=0:\n c.drawString(x + 190, 20 , \"SAYIM BAR 30/06/2017 SAYFA \"+str(sayfa))\n sayfa=sayfa+1\n c.setFont(\"Courier\", 60)\n # This next setting with make the text of our\n # watermark gray, nice touch for a watermark.\n c.setFillGray(0.3, 0.3)\n # Set up our watermark document. Our watermark\n # will be rotated 45 degrees from the direction\n # of our underlying document.\n c.saveState()\n c.translate(500, 100)\n c.rotate(45)\n c.drawCentredString(0, 0, \"BISHOP NEN ©\")\n c.drawCentredString(0, 300, \"BISHOP NEN ©\")\n c.drawCentredString(0, 600, \"BISHOP NEN ©\")\n c.restoreState()\n\n c.showPage()\n y = 280* mm\n y = y - 20 * mm\n c.setFont(\"tahoma\", 30)\n c.drawString(x+90,y+35,str(code[2]))\n c.setFont(\"tahoma\", 12)\n\n b=code[2]\n orhan=orhan+1\n print code[0],code[1],orhan\n barcode93 = code93.Standard93(code[0])\n barcode93.drawOn(c, x, y+10)\n c.drawString(x+90,y+15,str(code[0])+\" \"+ code[1])\n c.rect(x,y+3,200*mm,10*mm, fill=0)\n y = y - 10 * mm\n if y<20 :\n c.drawString(x + 190, 20, \"SAYIM BAR 30/06/2017 SAYFA \" + str(sayfa))\n sayfa = sayfa + 1\n\n c.setFont(\"Courier\", 60)\n # This next setting with make the text of our\n # watermark gray, nice touch for a watermark.\n c.setFillGray(0.3, 0.3)\n # Set up our watermark document. Our watermark\n # will be rotated 45 degrees from the direction\n # of our underlying document.\n c.saveState()\n c.translate(500, 100)\n c.rotate(45)\n c.drawCentredString(0, 0, \"BISHOP NEN ©\")\n c.drawCentredString(0, 300, \"BISHOP NEN ©\")\n c.drawCentredString(0, 600, \"BISHOP NEN ©\")\n c.restoreState()\n\n c.showPage()\n y = 280* mm\n\n\n\n # draw a QR code\n qr_code = qr.QrCodeWidget('http://nen.duckdns.org/siparis/www')\n bounds = qr_code.getBounds()\n width = bounds[2] - bounds[0]\n height = bounds[3] - bounds[1]\n d = Drawing(45, 45, transform=[45./width,0,0,45./height,0,0])\n d.add(qr_code)\n renderPDF.draw(d, c, 15, 50)\n\n c.drawString(x + 190, 20, \"SAYIM BAR 30/06/2017 SAYFA \" + str(sayfa))\n\n c.setFont(\"Courier\", 60)\n # This next setting with make the text of our\n # watermark gray, nice touch for a watermark.\n c.setFillGray(0.3, 0.3)\n # Set up our watermark document. Our watermark\n # will be rotated 45 degrees from the direction\n # of our underlying document.\n c.saveState()\n c.translate(500, 100)\n c.rotate(45)\n c.drawCentredString(0, 0, \"BISHOP NEN ©\")\n c.drawCentredString(0, 300, \"BISHOP NEN ©\")\n c.drawCentredString(0, 600, \"BISHOP NEN ©\")\n c.restoreState()\n\n c.save()", "title": "" }, { "docid": "166461cf9ae8e933b9f94571d223f99a", "score": "0.47863185", "text": "def email():\n\n ##value 'email' is taken out of the users table, input at registration\n rows = db.execute(\"SELECT * FROM users WHERE userid=:userid\", userid=session[\"user_id\"])\n email = rows[0][\"email\"]\n\n\n msg = Message(\"Confirmation Code\",\n sender=\"[email protected]\",\n recipients=['%(email)s' % {'email': email}])\n\n confcode = rows[0][\"confcode\"]\n\n msg.body = ('Your confirmation code is %(confcode)s' %\n {'confcode': confcode})\n\n mail.send(msg)\n\n return redirect(\"/confirm\")", "title": "" }, { "docid": "c6aca83fa32431cbd5d7e6d19d770fec", "score": "0.47752067", "text": "def report_success(self): \n \n #Channel Report\n if self.tranche_trackausschluss == True: channel_list = self.tranche_channellist + [\"AUSSCHLUSS\"]\n else: channel_list = self.tranche_channellist\n \n def channel_reporter(self, channel_list, aux_kg): \n total_size = self.tranche_df[(self.tranche_df.TRANCHE_KG.isin(aux_kg)) & (self.tranche_df.KANAL.isin(channel_list))].shape[0]\n aux_list = [self.tranche_df[(self.tranche_df.TRANCHE_KG.isin(aux_kg)) & (self.tranche_df.KANAL == i)].shape[0] for i in channel_list]\n aux_list = [[channel_list[i], aux_list[i], round(aux_list[i] * 100 / total_size, 1)] for i in range(len(aux_list)) if aux_list[i] != 0]\n return \" / \".join([str(i[0]) + \": \" + str(i[1]) + \" (\" + str(i[2]) + \"%)\" for i in aux_list])\n \n channel_report_total = channel_reporter(self, channel_list, [0, 1])\n channel_report_zg = channel_reporter(self, channel_list, [0])\n \n #Hybris Report \n if self.kpi_hybris == None: self.kpi_hybris = \"\" \n \n #eMail Subject & Body\n email_subject = \"{0}Kampagne {1} {2}: Datenaufbereitung erfolgreich\".format({\"PROD\": \"\"}.get(self.stage, \"TESTMODE \"), self.campaign_id, self.campaign_name)\n \n email_body = \"\"\"\\\n <html><head></head>\n <body><span style=\"font-size: 14\"><font face=\"arial\">\n <p>Liebe Kampagnen-Verantwortliche,</p>\n <p>die Daten der Kampagne {0} {1} für den {2} stehen zur Weiterverarbeitung bereit.</p><br>\n <font size=\"4\"><b>Tranchen-Report</b></font>\n <hr>\n <b>Identifizierte Kunden total:</b> {3}<br>\n <b>Kunden nach Deselektion:</b> {4} ({5}%)<br>\n <b>Ausgespielte Ziel- / Kontrollgruppe:</b> {6} / {7}<br>\n <b>Kanalzuteilung (Total):</b> {8}<br>\n <b>Kanalzuteilung (Zielgruppe):</b> {9}<br>\n <b>Kampagne:</b> {0}<br>\n <b>Tranche:</b> {10}<br><br><br>\n {11}\n <font size=\"4\"><b>Deselektions-Report</b></font>\n <hr>\n {12}\n <br>\n <br><br>\n Beste Grüsse,<br>\n Competence Center Data & Analytics<br><br> \n \n <div>\n <img src=\"https://upload.wikimedia.org/wikipedia/commons/thumb/f/f8/Python_logo_and_wordmark.svg/230px-Python_logo_and_wordmark.svg.png\" width=\"115\" height=\"34\"><br>\n <span style=\"font-size: 12\"><i>This mail is powered by Python, the Programming Language of Leading Data Scientists.</i></span> \n </div> \n </span></font></body>\n </html>\"\"\".format(self.campaign_id, self.campaign_name, self.tranche_timestamp.strftime(\"%d.%m.%Y\"),\n self.kpi_customers_identified, self.kpi_customers_selected, \n round(self.kpi_customers_selected / self.kpi_customers_identified * 100, 1),\n self.kpi_size_targetgroup, self.kpi_size_controlgroup, \n channel_report_total, channel_report_zg,\n self.tranche_number, self.kpi_hybris, \"<br>\".join(self.kpi_reports))\n \n if self.stage == \"PROD\": email_to = \"; \".join(self.campaign_manager + self.campaign_techsupport)\n else: email_to = \"; \".join(self.campaign_techsupport)\n \n sf.send_email(subject = email_subject, body = email_body , to = email_to, use_amg = sf.platform_is_server())\n return", "title": "" }, { "docid": "b7708a363880e273ac0afad23068a614", "score": "0.47407392", "text": "def scrape_to_db(scrape_result):\n\n plist_name = scrape_result[0]\n all_books = scrape_result[1]\n plist_check = server_helper.check_public_list(plist_name)\n\n if plist_check:\n print \"Since on DB, don't go any further.\"\n\n else: \n server_helper.add_to_public_list(plist_name)\n print \"new list added to DB & now ADD books\" \n\n # Start looping through each book in list: check title & author before adding to DB\n book_order = 0\n for book in all_books: \n title = book[0] \n author = book[1]\n cover = book[2]\n book_order = 1 + book_order \n\n # Function below checks in book exists in database \n book_in_db = server_helper.check_books(title, author)\n\n # If book exists in books table, only add to pl_books table to capture book_order \n if book_in_db:\n pl_id = Public_List.query.filter(Public_List.pl_name == plist_name).first().pl_id\n book_id = Book.query.filter(Book.book_title == title).first().book_id\n add_pl_table = server_helper.add_to_pl_book(book_id, pl_id, book_order)\n \n\n # Book not found? Add to books table & pl_books table \n else: \n new_book = server_helper.add_to_books_table(title, author, cover)\n new_book_id = new_book.book_id \n pl_id = Public_List.query.filter(Public_List.pl_name == plist_name).first().pl_id \n add_pl_table = server_helper.add_to_pl_book(new_book_id, pl_id, book_order)", "title": "" }, { "docid": "e6ba58d1f53a8934d8266fe14e4e75b6", "score": "0.4714744", "text": "def _is_ready_to_fulfill(self, data):\n return True", "title": "" }, { "docid": "9221f1d61c01aadcde51833661c7679c", "score": "0.47078028", "text": "def _is_ready_to_fulfill(self, data):\n return False", "title": "" }, { "docid": "8b2e84d9a41046b31b0f697d3281edde", "score": "0.4703318", "text": "async def _publish_online_banking_mailer_events(rows: List[Dict[str, str]], paid_amount: float):\n # Publish message to the Queue, saying account has been created. Using the event spec.\n pay_account = _get_payment_account(rows[0]) # All rows are for same account.\n # Check for credit, or fully paid or under paid payment\n credit_rows = list(\n filter(lambda r: (_get_row_value(r, Column.TARGET_TXN) == TargetTransaction.RECEIPT.value), rows))\n under_pay_rows = list(\n filter(lambda r: (_get_row_value(r, Column.TARGET_TXN_STATUS).lower() == Status.PARTIAL.value.lower()), rows))\n\n credit_amount: float = 0\n if credit_rows:\n message_type = 'bc.registry.payment.OverPaid'\n for row in credit_rows:\n credit_amount += float(_get_row_value(row, Column.APP_AMOUNT))\n elif under_pay_rows:\n message_type = 'bc.registry.payment.UnderPaid'\n else:\n message_type = 'bc.registry.payment.Payment'\n\n queue_data = {\n 'accountId': pay_account.auth_account_id,\n 'paymentMethod': PaymentMethod.ONLINE_BANKING.value,\n 'amount': '{:.2f}'.format(paid_amount), # pylint: disable = consider-using-f-string\n 'creditAmount': '{:.2f}'.format(credit_amount) # pylint: disable = consider-using-f-string\n }\n\n payload = {\n 'specversion': '1.x-wip',\n 'type': message_type,\n 'source': f'https://api.pay.bcregistry.gov.bc.ca/v1/accounts/{pay_account.auth_account_id}',\n 'id': f'{pay_account.auth_account_id}',\n 'time': f'{datetime.now()}',\n 'datacontenttype': 'application/json',\n 'data': queue_data\n }\n\n try:\n await publish(payload=payload,\n client_name=APP_CONFIG.NATS_MAILER_CLIENT_NAME,\n subject=APP_CONFIG.NATS_MAILER_SUBJECT)\n except Exception as e: # NOQA pylint: disable=broad-except\n logger.error(e)\n logger.warning('Notification to Queue failed for the Account Mailer %s - %s', pay_account.auth_account_id,\n payload)\n capture_message('Notification to Queue failed for the Account Mailer {auth_account_id}, {msg}.'.format(\n auth_account_id=pay_account.auth_account_id, msg=payload), level='error')", "title": "" }, { "docid": "05091e462857296b743f07b0bcb5b5a8", "score": "0.46943724", "text": "def make_copy_available(request_id):\n\n barcode_requested = db.get_requested_barcode(request_id)\n db.update_item_status('available', barcode_requested)\n\n return", "title": "" }, { "docid": "fd20f72376007a0f0fd0aa7a512926cb", "score": "0.46917847", "text": "def email_notification(form):\n email = form.get(\"g587-email\",\"\").lower()\n body = \"\"\"New Library Card Request\nName: {0} {1}\nBirthday: {2}\nAddress: {3}, {4}, {5} {6}\nPhone number: {7}\nEmail: {8}\nTemporary Library Card Number: {9}\n\"\"\".format(form.get(\"g587-firstname\"),\n form.get(\"g587-lastname\"),\n form.get(\"g587-birthday\"),\n form.get(\"g587-address\"),\n form.get(\"g587-city\"),\n form.get(\"g587-state\"),\n form.get(\"g587-zipcode\"),\n form.get(\"g587-telephone\"),\n email,\n form.get(\"temp_card_number\")\n )\n msg = MIMEText(body)\n msg['Subject'] = \"New Card Request\"\n msg['From'] = app.config[\"EMAIL_SENDER\"] \n msg['To'] = ','.join(app.config[\"EMAIL_RECIPIENTS\"])\n msg['To'] += \",{}\".format(email)\n mail_server = smtplib.SMTP('localhost')\n mail_server.send_message(msg)\n mail_server.quit()", "title": "" }, { "docid": "e6f24be06b7d2932c31cb7b965d8d746", "score": "0.46847552", "text": "def send_code_email():\n\n code = request.get_json().get(\"verificationCode\")\n\n email = request.get_json().get(\"email\")\n\n crud.email_handler(email, f'Verification code for Good Samaritan Finder: {code}')\n\n return jsonify({\"success\": True})", "title": "" }, { "docid": "5ad1f9c738bcfea5b8a69950794e41b7", "score": "0.4671018", "text": "def spider_closed(self, spider):\n new_email = Email()\n message = new_email.build_message_from_gigs(self.gigs_to_send)\n new_email.send(settings.TO_EMAIL, message)\n # loop back through and save each one as sent.\n self.record_sent_gigs(self.gigs_to_send)\n self.cursor.close()", "title": "" }, { "docid": "f590ead7faa1bac8d7cea80bfbe63a81", "score": "0.46705705", "text": "def bookAFlight(self, userEmail: str):\n source = input(\"Enter the source: \")\n destination = input(\"Enter the destination: \")\n\n if not source or not destination:\n print(\"Invalid Inputs\")\n return\n\n allFlights, availableFlights = self.getFlights(source, destination, display=True)\n\n if not availableFlights:\n return\n flightId = input(\"Enter the Flight Id: \")\n if not flightId.isdigit(): \n print(\"invalid Input\")\n return\n\n flightId = int(flightId)\n\n flight = self.getFlightById(source, destination, flightId)\n\n if not flight:\n print('No Flight with the given id found!')\n return\n\n if flight[\"available\"] <= 0:\n print(\"No Seats Available -- Try another Flight\")\n return\n\n bookingsPath = 'data/bookings.json'\n flightsPath = 'data/flights.json'\n bookingId = f'{userEmail}-{flight[\"id\"]}-{flight[\"date\"]}'\n\n bookings = []\n\n out = self.file.readJsonFile(bookingsPath)\n\n if out['status'] == 200:\n bookings = out['data']\n\n id = 1\n if bookings:\n id = bookings[-1]['id'] + 1\n\n newBooking = dict()\n\n newBooking['id'] = id\n newBooking['bookingId'] = bookingId\n newBooking['flight'] = flight\n newBooking['email'] = userEmail\n newBooking['isActive'] = True\n\n bookings.append(newBooking)\n\n\n for flight in allFlights:\n if flight[\"id\"] == flightId:\n print(flight)\n flight[\"available\"] -= 1\n\n \n writeBookings = self.file.writeJsonFile(bookingsPath, bookings)\n\n writeFlights = self.file.writeJsonFile(flightsPath, allFlights)\n\n if writeBookings['status'] == 200 and writeFlights['status'] == 200:\n print('Activating Mail Service')\n sendMail(userEmail, bookingId, flight, True)\n print(\"Booking Successful\")\n print()\n else:\n print('something went wrong')", "title": "" }, { "docid": "cbf494091b08618e758f7eb678cd2238", "score": "0.46696326", "text": "def on_collect_complete(self):\n self.status_label.setText(\"Processing Noise Data...\")\n self.progress_label.setText(\"N/A\")\n self.progress_bar.close()", "title": "" }, { "docid": "c2260ae412f3fafdf43179afb2bbdd84", "score": "0.46695688", "text": "def test_email_activation_code(self):\n p = self._profile()\n eq_(len(mail.outbox), 1)\n ok_(('activate/%s' % p.activation_key) in mail.outbox[0].body)", "title": "" }, { "docid": "11c12eea134d2ecfd35b26be0a54b668", "score": "0.46619412", "text": "def setup_postalcodes():\n\n con = sqlite3.connect(DB_PATH)\n postalcodes_setup = False\n\n # test to see if the table exists\n qry = \"\"\"SELECT name \n FROM sqlite_master \n WHERE type='table' AND name='postalcodes';\"\"\"\n\n cur = con.cursor()\n\n # get the row count... if the row count is small reload the table\n if bool(len(cur.execute(qry).fetchall())):\n count = cur.execute(\"SELECT count(*) FROM postalcodes;\").fetchone()\n if count[0] > 1000:\n postalcodes_setup = True\n\n if not postalcodes_setup:\n print(\"Setting Postal Codes\")\n delete_tbl_sql = \"DROP TABLE IF EXISTS postalcodes\"\n create_tbl_sql = \"\"\"CREATE TABLE IF NOT EXISTS postalcodes (\n id integer PRIMARY KEY AUTOINCREMENT NOT NULL,\n postal_code text NOT NULL,\n city text NOT NULL,\n state_long text NOT NULL,\n state_short text,\n lat text,\n long text\n ) ;\"\"\"\n ins_qry = \"\"\"INSERT INTO postalcodes \n (postal_code, \n city,\n state_long, \n state_short, \n lat, \n long) \n VALUES (?,?,?,?,?,?)\"\"\"\n cur.execute(delete_tbl_sql)\n con.commit()\n cur.execute(create_tbl_sql)\n con.commit()\n\n # read the datafile and add to the database\n with open(os.path.join(CURRENT_DIR,\"postalcodes\",\"US.txt\"), \"r\") as data:\n pcodes = list(csv.reader(data, delimiter='\\t'))\n for ln in pcodes:\n cur.execute(ins_qry,(ln[1], ln[2], ln[3], ln[4], ln[9], ln[10]))\n con.commit()\n cur.close()\n con.close()", "title": "" }, { "docid": "5e0947fdfb73a88aefae90e0951ba16b", "score": "0.46616802", "text": "def responseComplete(self):", "title": "" }, { "docid": "17c9cac1ab27f185834d00d0a82ac341", "score": "0.4650904", "text": "def do_register(self):\n item = self.schedule.barteritem_set.all()[0]\n self.valid_data['item-items'] = [ item.pk, ]\n \n # post a valid form\n response = self.client.post(self.url, data=self.valid_data, follow=True)\n \n return response", "title": "" }, { "docid": "3a577fb36c834009e235334e18212283", "score": "0.46439457", "text": "def createMail(self):\n body = \"\"\"Your job \"{0}\" at GourdsGroup has finished processing.\\nHead over to 'http://bittergourd.info/graphShown?selectSession={0}&selectAmount=5' to see the result\"\"\".format(\n self.sessionName)\n subject = \"Your job {0} is done\".format(self.sessionName)\n\n msg = MIMEMultipart()\n msg['TO'] = self.mailToadress\n msg['from'] = self.scriptEmail\n msg['Subject'] = subject\n msg.attach(MIMEText(body, 'plain'))\n self.message = msg.as_string()", "title": "" }, { "docid": "b8af0bf94d4deaf508e9adf2add3b3a7", "score": "0.46370453", "text": "def checkin(badge_mac):\n\n # Checking the user-agent to grant access to post data is weak at best but\n # the only reasonable thing we could do given the time frame.\n user_agent = request.headers.get('User-Agent')\n if \"WarBadge Experimental ShmooCon 2018\" not in user_agent:\n log.error(\"Bad User-Agent: %s\", user_agent)\n abort(403)\n insert_template = (u\"INSERT INTO entries \"\n \"(badge_mac, ssid, bssid_mac, rssi) \"\n \"VALUES('{0}', '{1}', '{2}', {3})\")\n\n conn = mysql.connect()\n cursor = conn.cursor()\n\n try:\n for ssid, entries in request.get_json().iteritems():\n for bssid_mac, rssi in entries.iteritems():\n insert = insert_template.format(badge_mac,\n conn.escape_string(ssid),\n bssid_mac, rssi)\n cursor.execute(insert)\n conn.commit()\n except NameError as exception:\n log.error(\"Bad SSID: %s\", exception)\n return_code = 403\n # TODO: Find something more specific to catch.\n except Exception as exception: # pylint: disable=W0703\n log.error(\"Caught Exception (unicode?) for %s: %s\", badge_mac, exception)\n log.error(request.data)\n return_code = 500\n else:\n return_code = 201\n log.info(\"Successful checkin for %s\", badge_mac)\n finally:\n conn.close()\n payload = json.dumps({'warbadging': True})\n content_type = {'ContentType': 'application/json'}\n return payload, return_code, content_type", "title": "" }, { "docid": "399c4de88508525e36ac9832fc1e13e1", "score": "0.46355137", "text": "def _run(self, database):\n codes = self._get_products_codes(database)\n\n for i in range(len(codes.all())):\n try:\n self._get_OFF_product(codes, i)\n local_product = self.product_manager.\\\n select_product_information(self.OFF_code)\n self._update_new_information(local_product)\n self._update_categories_information(local_product)\n self._update_stores_information(local_product)\n except KeyError as e:\n print('Aïe, KeyError : ', e, file=open('print_log.txt', 'a'))\n\n self._save_update_date()", "title": "" }, { "docid": "9423b4933050603e07cbfeba6618f2e9", "score": "0.46339306", "text": "def _loadBarcodes(self):\n logging.info(\"Loading barcodes\")\n raw_seqs = set() # 1. Make sure no barcode sequences are duplicate\n names = [] # 2. Keep a list of all unique barcode names\n sequences = {} # 3. Create a dictionary of all barcode sequences\n for barcode in FastaReader(options.barcodeFilename):\n name = barcode.name.strip().split()[0]\n # Check the barcode name\n if name in names:\n raise ValueError(\"Duplicate barcode name in '%s'\".format(name))\n else:\n names.append( name )\n\n # Check the forward seqeunce\n if barcode.sequence in raw_seqs:\n raise ValueError(\"Duplicate barcode sequence in '%s'\".format(name))\n else:\n raw_seqs.add( barcode.sequence )\n\n # Check the reverse complement sequence\n rc_barcode = reverse_complement( barcode )\n if rc_barcode.sequence in raw_seqs:\n raise ValueError(\"Duplicate barcode sequence in '%s'\".format(name))\n else:\n raw_seqs.add( rc_barcode.sequence )\n\n # If both pass, add the sequences and pair-wise combinations\n sequences[(name, 'FORWARD')] = barcode.sequence\n sequences[(name, 'REVERSE')] = rc_barcode.sequence\n # Verify that all of the barcodes are the same length\n bc_lengths = list(set([len(s) for s in sequences.itervalues()]))\n if len(bc_lengths) > 1:\n msg = \"Multiple barcode lengths detected - {0}\".format(bc_lengths)\n logging.error( msg )\n raise ValueError( msg )\n self._barcodeLength = bc_lengths[0]\n self._barcodeSequences = sequences\n self._barcodeNames = names\n self._barcodePairs = [(names[i], names[i+1]) for i in range(0,len(names)-1,2)]\n self._barcodePairNames = [\"{0}--{1}\".format(p[0], p[1]) for p in self._barcodePairs]", "title": "" }, { "docid": "f89357062909a9545ac10d6fd786a78c", "score": "0.4628445", "text": "def _issue_bills(self, reebills):\n issue_date = datetime.utcnow()\n for reebill in reebills:\n # a correction bill cannot be issued by itself\n assert reebill.version == 0\n reebill.issue(\n issue_date, self,\n corrections=reebill.reebill_customer.get_unissued_corrections())\n self.mail_reebill(\"issue_email_template.html\", \"Energy Bill Due\",\n reebill, reebill.email_recipient)", "title": "" }, { "docid": "652b4493b66aae0a18d8a4f5a9978ab6", "score": "0.46264803", "text": "async def brb(self, ctx):\n brb_bans = (await self.db.find_one({'name': 'brb_bans'}))['ids']\n if ctx.author.id in brb_bans:\n await ctx.send(\"but will you *really* brb?\")\n else:\n await ctx.send(\n 'https://cdn.discordapp.com/attachments/'\n '263457479239663616/319531749564612617/'\n 'stickerline-201503031729571.png'\n )", "title": "" }, { "docid": "8224e3850ae2d1beae03327ada8a288b", "score": "0.4621035", "text": "def status(self, mailbox,names):\n\t\tpass", "title": "" }, { "docid": "71406d94183cd7e6f8ab67cf624cd28e", "score": "0.46132714", "text": "def alert_for_missing_barcodes(barcodes, plates):\n found = []\n missing = []\n for barcode in barcodes:\n if not pydash.find(plates, {'name': barcode}):\n print('Barcode: {} not found!'.format(barcode))\n missing.append(barcode)\n else:\n found.append(barcode)\n\n if not len(missing):\n print('Found all the barcodes')", "title": "" }, { "docid": "73ada1e2f4dbb1291cce86c2a1d45141", "score": "0.45991895", "text": "def assign_barcodes(self, num_barcodes, projects):\n # Verify projects given exist\n sql = \"SELECT project FROM project\"\n existing = {x[0] for x in self._con.execute_fetchall(sql)}\n not_exist = {p for p in projects if p not in existing}\n if not_exist:\n raise ValueError(\"Project(s) given don't exist in database: %s\"\n % ', '.join(map(xhtml_escape, not_exist)))\n\n # Get unassigned barcode list and make sure we have enough barcodes\n barcodes = self.get_unassigned_barcodes(num_barcodes)\n\n # Assign barcodes to the project(s)\n sql = \"SELECT project_id from project WHERE project in %s\"\n proj_ids = [x[0] for x in\n self._con.execute_fetchall(sql, [tuple(projects)])]\n\n barcode_project_insert = \"\"\"INSERT INTO project_barcode\n (barcode, project_id)\n VALUES (%s, %s)\"\"\"\n project_inserts = []\n for barcode in barcodes:\n for project in proj_ids:\n project_inserts.append((barcode, project))\n self._con.executemany(barcode_project_insert, project_inserts)\n # Set assign date for the barcodes\n sql = \"\"\"UPDATE barcodes.barcode\n SET assigned_on = NOW() WHERE barcode IN %s\"\"\"\n self._con.execute(sql, [tuple(barcodes)])\n return barcodes", "title": "" }, { "docid": "8335b4cdbb26f99cd97d29837648d275", "score": "0.45891318", "text": "def test_10_setup_complete_all_done(self):\n self.fc.fd[\"moobe_setup_complete\"].select_send_link()\n self.fc.fd[\"share\"].verify_share_popup()\n self.fc.fd[\"share\"].select_message()\n self.fc.fd[\"message\"].verify_new_message_screen()\n self.fc.fd[\"message\"].compose_message(self.email)\n self.fc.fd[\"ios_system\"].handle_sim_card_popup()\n self.fc.fd[\"moobe_setup_complete\"].select_link_sent_done()\n self.fc.fd[\"moobe_setup_complete\"].verify_setup_complete_lets_print()\n self.fc.fd[\"moobe_setup_complete\"].select_print()\n self.fc.fd[\"moobe_setup_complete\"].verify_print_sent_ui()\n self.fc.fd[\"moobe_setup_complete\"].select_continue()\n self.fc.fd[\"moobe_setup_complete\"].verify_setup_complete()\n self.fc.fd[\"moobe_setup_complete\"].select_all_done_btn()\n self.fc.fd[\"home\"].verify_home()", "title": "" }, { "docid": "6b754f031701c4494f6692d59cc8946d", "score": "0.4586313", "text": "def __ondatareceived(self, sender, args):\n if self.debug:\n WinForms.MessageBox.Show('Barcode Found! [Event OnDataReceived Triggered]')\n self._barcode = SysTxt.Encoding.GetEncoding(\"Shift_JIS\").GetString(args.data)[:-1]\n \n if self.saveimg:\n if not os.path.exists('./BARCODE_IMG'):\n os.makedirs('./BARCODE_IMG')\n try:\n srcFile = self.barcodeReaderControl1.LSIMG() # File name of the transfer source (reader side)\n dstFile = './BARCODE_IMG/' + self._barcode + '__' + srcFile.split(\"\\\\\")[2] # dstFile As String\n self.barcodeReaderControl1.GetFile(srcFile, dstFile)\n except SysException as ex:\n raise BarcodeReaderError('[SAVEIMAGE ERROR]: ' + ex.Message)\n self._rsltready = True", "title": "" }, { "docid": "6d3ecd78e67f2a758f71479c840bb704", "score": "0.45863125", "text": "def on_success(self):\n pass", "title": "" }, { "docid": "ec97687ed5868af8b761aa112c2ac1d9", "score": "0.457926", "text": "def get_unsent_barcodes_from_qiita_buffer(self):\n sql = \"\"\"SELECT barcode\n FROM project_qiita_buffer\n WHERE pushed_to_qiita='N'\"\"\"\n return [i[0] for i in self._con.execute_fetchall(sql)]", "title": "" }, { "docid": "0b796de5182afd0916b52ac401bed817", "score": "0.45791626", "text": "def process(email_column=\"email\"):\n if ROSTER != None:\n r = pd.read_csv(ROSTER, header=0)\n emails = r[email_column]\n else:\n emails = [EMAIl]\n i = 0\n for email in emails:\n watermark_and_upload(email, FILE)\n i += 1\n if i % 5 == 0:\n print(f\"{i} finished uploading and sharing\")", "title": "" }, { "docid": "061e91fe122e5a6a94afa5a6dfb14c85", "score": "0.45779067", "text": "def set_deposited_ebi(self):\n accession = 'ERP012803'\n samples = fetch_url(\n 'http://www.ebi.ac.uk/ena/data/warehouse/filereport?accession='\n '%s&result=read_run&fields=sample_alias' % accession)\n # Clean EBI formatted sample names to just the barcodes\n # stripped of any appended letters for barcodes run multiple times\n barcodes = tuple(s.strip().split('.')[1][:9]\n for s in samples if len(s.split('.')) == 2)\n\n sql = \"\"\"UPDATE ag.ag_kit_barcodes\n SET deposited = TRUE\n WHERE barcode IN %s\"\"\"\n self._con.execute(sql, [barcodes])", "title": "" }, { "docid": "07d1ae1fa6a89a4350deccb3a854cc74", "score": "0.45751017", "text": "def data_ready(self):\r\n pass", "title": "" }, { "docid": "f8fb968adb030ecda07261607113c019", "score": "0.45726922", "text": "def assignBarcode(self, barcode_data: str, reload=True):\n\n model_type = self.barcodeModelType()\n\n response = self._api.post(\n '/barcode/link/',\n {\n 'barcode': barcode_data,\n model_type: self.pk,\n }\n )\n\n if reload:\n self.reload()\n\n return response", "title": "" }, { "docid": "89533565239d99537362dae68908971f", "score": "0.4567489", "text": "async def schedule_bulb_list_updates(_):\n await bulb_list.async_update()", "title": "" }, { "docid": "7877a210b308e17088ddc9fb790c3e81", "score": "0.45665106", "text": "def save_emails(self):\n self.record.save_all_donor_emails()\n print(\"All Donor emails saved.\")", "title": "" }, { "docid": "aca877076e7881266ea4cd120f682788", "score": "0.45577082", "text": "def check_library(cartridges):\n\t# set the location for each\n\t# cart object\n\tfor obj in cartridges:\n\t\tget_location(obj)\n\tloaded = [c.data['barcode'] for c in cartridges if c.location != 'External']\n\tunloaded = [c.data['barcode'] for c in cartridges if c.location == 'External']\n\tif loaded:\n\t\tprint \" The following barcodes are already loaded:\"\n\t\tloaded.sort()\n\t\tfor barcode in loaded:\n\t\t\tprint \" \u001b[42m%s\u001b[m\" % (barcode)\n\tif unloaded:\n\t\tprint \"\\n Please load the following barcodes:\"\n\t\tunloaded.sort()\n\t\tfor barcode in unloaded:\n\t\t\tprint \" \u001b[44m%s\u001b[m\" % (barcode)\n\tri = raw_input(\"\\n Press [enter] when ready \")\n\tready = True\n\tif ri != \"\":\n\t\tsys.exit()", "title": "" }, { "docid": "d64d021b88ef1942fc3dfdf2836aac39", "score": "0.45537195", "text": "def done(self):\n for model_name, objs in self._create_queues.items():\n if len(objs) > 0:\n self._commit(apps.get_model(model_name))", "title": "" }, { "docid": "f3bb49949b28cf5fd2ea4b373a452be1", "score": "0.4547799", "text": "def _explain_pulldown_failures(self, barcodes):\n # if empty list passed, don't touch database\n if len(barcodes) == 0:\n return {}\n\n def update_reason_and_remaining(sql, reason, failures, remaining):\n failures.update(\n {bc[0]: reason for bc in\n self._con.execute_fetchall(sql, [tuple(remaining)])})\n return remaining.difference(failures)\n\n fail_reason = {}\n remaining = set(barcodes)\n # TEST ORDER HERE MATTERS! Assumptions made based on filtering of\n # curent_barcodes by previous checks\n # not an AG barcode\n sql = \"\"\"SELECT barcode\n FROM ag.ag_kit_barcodes\n WHERE barcode IN %s\n UNION\n SELECT barcode\n FROM ag.ag_handout_barcodes\n WHERE barcode IN %s\"\"\"\n hold = {x[0] for x in\n self._con.execute_fetchall(\n sql, [tuple(remaining)] * 2)}\n fail_reason.update({bc: 'Not an AG barcode' for bc in\n remaining.difference(hold)})\n remaining = hold\n # No more unexplained, so done\n if len(remaining) == 0:\n return fail_reason\n\n # handout barcode\n sql = \"\"\"SELECT barcode\n FROM ag.ag_handout_barcodes\n WHERE barcode IN %s\"\"\"\n remaining = update_reason_and_remaining(\n sql, 'Unassigned handout kit barcode', fail_reason, remaining)\n # No more unexplained, so done\n if len(remaining) == 0:\n return fail_reason\n\n # withdrawn\n sql = \"\"\"SELECT barcode\n FROM ag.ag_kit_barcodes\n WHERE withdrawn = 'Y' AND barcode in %s\"\"\"\n remaining = update_reason_and_remaining(\n sql, 'Withdrawn sample', fail_reason, remaining)\n # No more unexplained, so done\n if len(remaining) == 0:\n return fail_reason\n\n # sample not logged\n sql = \"\"\"SELECT barcode\n FROM ag.ag_kit_barcodes\n WHERE sample_date IS NULL AND barcode in %s\"\"\"\n remaining = update_reason_and_remaining(\n sql, 'Sample not logged', fail_reason, remaining)\n # No more unexplained, so done\n if len(remaining) == 0:\n return fail_reason\n\n # Sample not consented\n sql = \"\"\"SELECT barcode\n FROM ag.ag_kit_barcodes\n JOIN ag.source_barcodes_surveys USING (barcode)\n WHERE survey_id IS NULL AND barcode in %s\"\"\"\n remaining = update_reason_and_remaining(\n sql, 'Sample logged without consent', fail_reason, remaining)\n # No more unexplained, so done\n if len(remaining) == 0:\n return fail_reason\n\n # other\n fail_reason.update({bc: 'Unknown reason' for bc in remaining})\n return fail_reason", "title": "" }, { "docid": "c401e5558b0d8d085fafd9be5bcb1c2b", "score": "0.45442316", "text": "def test_barcodes(self):\n for l in range(1, 6):\n barcodes = all_length(l)\n indexer = flamingo.WrapperSimpleEd(barcodes)\n for b in barcodes:\n self.check_search(indexer, b, [b], 0)\n results = indexer.search(b, 1)\n self.assertEqual(len(results), 1 + 3 * l)", "title": "" }, { "docid": "9eb3607afb3fc33676a464b9e3e83c98", "score": "0.45413", "text": "def cb_find_and_monitor_arduino( self, a_list ):\n self.controller.post_to_queue( \"call\", self.find_and_monitor_arduino, ( ) )", "title": "" }, { "docid": "56562d729485d0d840a8c5540154ba81", "score": "0.45352462", "text": "def consume(self) -> None:\n for msg in self._consumer:\n logger.info(f\"Received message: {msg.value}\")\n with self._conn.cursor() as cur:\n cur.execute(\"\"\"\n INSERT INTO\n availability_data (host, status_code, elapsed_time, regex_matched)\n VALUES\n (%(host)s, %(status_code)s, %(elapsed_time)s, %(regex_matched)s)\n \"\"\", msg.value)", "title": "" }, { "docid": "40c08d44e443f9df9ff0689ee40726e8", "score": "0.45250893", "text": "def check_code(self):\n\n\t\tos.chdir('/home/pi/blockytalky/usercode/')\n\t\t\n\t\tfor file in os.listdir('/home/pi/blockytalky/usercode/'):\n\t\t\tfo = open(file, \"rb\")\n\t\t\tcode = fo.read()\n\t\t\tfo.close()\n\t\t\ttry:\n\t\t\t\trequest = requests.post(\"http://104.131.249.150:5000\", data=code, headers=self.headers)\n\t\t\t\tnewfile = \"/home/pi/blockytalky/sentcode/\" + str(file)\n\t\t\t\tos.rename(file, newfile)\n\t\t\texcept:\n\t\t\t\t# POST failed, leave file, try again next loop\n\t\t\t\tpass", "title": "" }, { "docid": "66437182f81e71b33e6cb3f1e9246f63", "score": "0.45196748", "text": "def _notify_if_duplicate_barcodes(self):\n count = 0\n barcodes = tuple([driver.barcode for driver in self.drivers])\n for driver in self.drivers:\n if barcodes.count(driver.barcode) > 1:\n driver.messages.add('Duplicate barcode')\n count += 1\n return count", "title": "" }, { "docid": "587008b764fd8ed851a7fb8ef1e57b0d", "score": "0.45171788", "text": "def after_send(self, pdu):\n pass", "title": "" }, { "docid": "6cbe5e8e203c5b8976658ece7353cdcc", "score": "0.45056888", "text": "def claim_free_ebook():\n\n URL = \"https://www.packtpub.com/packt/offers/free-learning\"\n USERNAME = \"\" #Username for Packt Account\n PASSWORD = \"\" #Password for Packt Account\n\n options = webdriver.ChromeOptions()\n options.add_argument(\"--start-maximized\")\n driver = webdriver.Chrome(chrome_options=options)\n driver.get(URL)\n\n #Login to packtpub account\n driver.find_element_by_class_name('twelve-days-claim').click()\n driver.execute_script(\"window.scrollTo(0, 0);\")\n driver.find_element_by_xpath(\"//div[@id='account-bar-form']/div/div/form/div/div/div/div/input[@id='email']\").send_keys(USERNAME)\n driver.find_element_by_xpath(\"//div[@id='account-bar-form']/div/div/form/div/div/div[2]/div/input[@id='password']\").send_keys(PASSWORD)\n driver.find_element_by_xpath(\"//div[@id='account-bar-form']/div/div/form/div/div/div[3]/input\").click()\n\n driver.implicitly_wait(2)\n\n #get book image, name, details\n html_source = driver.page_source\n email_data = parse_html(html_source)\n driver.find_element_by_class_name('twelve-days-claim').click()\n driver.close()\n\n #send email notification\n send_email_notification(email_data)", "title": "" }, { "docid": "fa956ed6cc962f76c73aaa662d5b06c6", "score": "0.4500431", "text": "def ready(self):\n import datahub.omis.notification.signal_receivers # noqa: F401", "title": "" }, { "docid": "2d53c9f98ca8b52ee85cc983aa28ba32", "score": "0.45003837", "text": "def send_result_email(self):\n pass", "title": "" }, { "docid": "43a0bf48ca3effcefbee43000b978cc4", "score": "0.45003694", "text": "def submitBtnFN(self):\r\n\r\n selectionFilesLst = [str(e.text()) for e in self.shots_ListWidget.selectedItems()]\r\n if not selectionFilesLst:\r\n QtWidgets.QMessageBox.warning(self, 'Warning!!!', 'Select files and try again...')\r\n return\r\n\r\n for eachMayaFile in selectionFilesLst:\r\n self.makePlayblastFN(eachMayaFile)\r\n QtWidgets.QMessageBox.information(self, 'Success!!!', 'Successfully Created...')", "title": "" }, { "docid": "a33de09dd543f75e3a7c54dfef36f12b", "score": "0.44935775", "text": "def test_extract_barcodes_from_labels(self):\n \n fastq_lines =\\\n \"@HWI-ST830:GTATCT\\nAAAATTTTCCCCGGGG\\n+\\n1234567890ABCDEF\".split('\\n')\n \n extract_barcodes(fastq_lines, input_type = \"barcode_in_label\",\n output_dir = self.output_dir, disable_header_match = True)\n \n output_bcs_fp = open(join(self.output_dir, \"barcodes.fastq\"), \"U\")\n actual_bcs = [line for line in output_bcs_fp]\n expected_bcs =\\\n ['@HWI-ST830:GTATCT\\n', 'GTATCT\\n', '+\\n', 'FFFFFF\\n']\n \n self.assertEqual(actual_bcs, expected_bcs)", "title": "" }, { "docid": "eb08014b5119d956d85307428a828954", "score": "0.4489833", "text": "def record_sent_gigs(self, gigs_list):\n query = 'INSERT INTO gigs values (?, ?, ?, ?, ?)'\n for gig in gigs_list:\n self.cursor.execute(query, (\n gig['name'],\n gig['url'],\n ','.join(gig['skills']),\n datetime.now(),\n True\n ))\n self.connection.commit()", "title": "" }, { "docid": "00eafb9a9f2c54dbf8aa17c24843fcb2", "score": "0.44853243", "text": "def mail_to_user(devices_data, db):\n possible_status = ['changed_devices', 'new_devices']\n\n if sorted(possible_status) == sorted(devices_data.keys()):\n if len(devices_data['changed_devices']) > 0 or \\\n len(devices_data['new_devices']) > 0:\n mail_to_send = email.SendMAIL(devices_data, db)\n mail_to_send.send_message()\n else:\n return None\n else:\n mail_to_send = email.SendMAIL(devices_data, db)\n message = \"Wrong Status Detected, please check: \"\n message += ','.join(devices_data.keys())\n mail_to_send.send_message(message=message)\n\n return None", "title": "" }, { "docid": "0fce1145a46bf12cdf9816b38b87721c", "score": "0.44838035", "text": "def send_email():\n for email_address in MAIL_LIST[0]:\n files = []\n files.append(DATAFILE)\n text = \"{}/{} Trailer Sensor Readings\".format(\n time.localtime(time.time()).tm_mon, time.localtime(time.time()).tm_mday\n )\n msg = MIMEMultipart()\n msg[\"Subject\"] = text\n msg[\"From\"] = SRC_USERNAME\n msg[\"To\"] = email_address\n msg.attach(MIMEText(text))\n print(\"email_address\")\n print(email_address)\n print(\"--------\")\n for data_file in files:\n part = MIMEBase(\"application\", \"octet-stream\")\n part.set_payload(open(data_file, \"rb\").read())\n encoders.encode_base64(part)\n part.add_header(\n \"Content-Disposition\",\n 'attachment; filename=\"%s\"' % os.path.basename(data_file),\n )\n msg.attach(part)\n server = smtplib.SMTP(\"smtp.gmail.com:587\")\n server.ehlo_or_helo_if_needed()\n server.starttls()\n server.ehlo_or_helo_if_needed()\n server.login(SRC_USERNAME, SRC_PASSWORD)\n server.sendmail(SRC_USERNAME, email_address, msg.as_string())\n server.quit()", "title": "" }, { "docid": "09c9f6e34fc8b20ddd372b137891b1b4", "score": "0.44828293", "text": "def handle_data(self):\n\n broadcast_times = get_broadcast_times_of_today()\n for broadcast_time in broadcast_times:\n broadcast_supplier = BroadcastSupplier.selectBy(broadcast_time_id=broadcast_time.id,\n supplier_id=self.supplier.id)\n movie = Movie.selectBy(imdb_id=broadcast_time.imdb_id)\n\n if broadcast_supplier.count():\n users_broadcast_supplier = UserBroadcastSupplier.selectBy(\n broadcast_supplier_id=broadcast_supplier[0].id)\n\n label_text_movie = \"Film: %s \\n Aantal: %s\" % (movie[0].ft_title, str(users_broadcast_supplier.count()))\n visitors_label = tk.Label(self.frame_movie_grid,\n text=label_text_movie,\n background=COLOR_RED,\n foreground=COLOR_WHITE,\n font=FONT_VISITOR_OVERVIEW)\n visitors_label.pack(padx=5, pady=20, side=tk.LEFT)\n\n user_list = tk.Text(self.frame_movie_grid, width=30, background=COLOR_BLACK, foreground=COLOR_GREY,\n font=FONT_VISITOR_OVERVIEW)\n user_list.pack(padx=5, pady=20, side=tk.LEFT)\n\n for user_broadcast_supplier in users_broadcast_supplier:\n user = User.selectBy(id=user_broadcast_supplier.user_id)\n\n user_str = user[0].name + \" met code:\\n \" + user_broadcast_supplier.code + \"\\n\"\n user_list.insert(tk.END, user_str)", "title": "" }, { "docid": "f0f47680696923c000905959ffb021a3", "score": "0.44741952", "text": "def script_info():\n\n logger.info('The following {} clubs were scanned:'.format(len(clubs)))\n for item in clubs:\n logger.info(item)\n\n logger.info('The following {} hosts were not scanned:'\n .format(len(scan_queue)))\n for ip in scan_queue:\n club = get_club(ip)\n if club:\n logger.info(club)\n else:\n logger.info(ip)\n\n end = time()\n runtime = end - start\n runtime = str(timedelta(seconds=int(runtime)))\n logger.info('Script Runtime: {} '.format(runtime))\n for ip in scan_queue:\n club_number = get_club(ip)\n if club_number:\n club_queue.append(club_number)\n else:\n club_queue.append(ip)\n\n for ip in not_connected:\n club_number = get_club(ip)\n if club_number:\n not_scanned.append(club_number)\n else:\n not_scanned.append(ip)\n\n logger.info('The following {} hosts were not scanned because of a problem: '\n .format(len(not_scanned)))\n for item in not_scanned:\n logger.info(item)\n\n mail.send_mail(ctime(start),\n runtime,\n clubs,\n club_queue,\n scan_queue,\n not_scanned,\n new_club,\n api_status,\n added,\n restored,\n updated,\n deleted)", "title": "" }, { "docid": "bd70c9d3add309b8349115bc5d9d15ee", "score": "0.44715863", "text": "def make_ready(self) -> None:\n self.is_ready = True", "title": "" }, { "docid": "3b68207eda21e2fda766382b27529648", "score": "0.44715297", "text": "async def bb1984(self, ctx=''):\n if config['DEBUG']: print(_(\"bb1984 command\"))\n\n # if the function was called from the daily_bgs loop, we use as a Context the one initialized in the loop\n if ctx is None:\n ctx = self.ctx\n\n self.refresh_faction_presence()\n\n for faction_name, faction_systems in self.factions_presence.items():\n information_block = \" \"\n if faction_name == \"LGC - Cartographers's Guild\":\n embed = discord.Embed(title=_(\"Bubble\"), description=information_block, color=0x00ff00)\n url = \"http://guilde-cartographes.fr/INFORMATIONS/32MU_STARNEWS/wp-content/uploads/2016/09/LOGO_LGC.png\"\n elif faction_name == \"LGC - Colonia Cartographers' Guild\":\n embed = discord.Embed(title=_(\"Colonia\"), description=information_block, color=0x147119)\n url = \"http://guilde-cartographes.fr/INFORMATIONS/32MU_STARNEWS/wp-content/uploads/2017/06/colonia.png\"\n else:\n embed = discord.Embed(title=faction_name, description=information_block, color=0x147119)\n url = \"\"\n embed.set_thumbnail(url=url)\n await ctx.send(embed=embed)\n\n information_block = \"\"\n for faction_system in faction_systems:\n system_quote = urllib.parse.quote(faction_system)\n url_to_call = \"https://www.edsm.net/api-system-v1/factions?showHistory=1&systemName=\" + system_quote\n\n r = requests.get(url_to_call)\n informations = r.json()\n\n last_update = 0\n\n for minor_faction in informations['factions']:\n if minor_faction['name'] == informations['controllingFaction']['name']:\n minor_faction_name = \"<:lgc:243332108636913665> \"\n else:\n minor_faction_name = \":black_large_square: \"\n\n influence_previous = 0\n\n if minor_faction['name'] in followed_factions.values():\n if minor_faction['lastUpdate'] > last_update:\n last_update = minor_faction['lastUpdate']\n\n last_update = datetime.fromtimestamp(last_update)\n information_block += minor_faction_name + informations['name']\n\n if minor_faction['influenceHistory']:\n for date, history in minor_faction['influenceHistory'].items():\n if (history != minor_faction['influence']\n and int(date) < int(minor_faction['lastUpdate'])):\n influence_previous = history\n information_block += \" *[{0:.2f}\".format(float(100 * float(influence_previous)))\n information_block += \"%]* > \"\n\n information_block += \"**[{:.2%}\".format(minor_faction['influence']) + \"]**\"\n information_block += \" | \" + translations[minor_faction['state']]\n information_block += \" *(\" + last_update.strftime(\"%d/%m-%Hh%M\") + \")*\\n\"\n\n if len(information_block) >= 1000:\n embed = discord.Embed(description=information_block, color=0x000000)\n await ctx.send(embed=embed)\n information_block = \"\"\n\n # To send the last bit in an embed even if we are under 1000 characters\n if len(information_block) > 0:\n embed = discord.Embed(description=information_block, color=0x000000)\n await ctx.send(embed=embed)\n information_block = \"\"", "title": "" }, { "docid": "c8f7fee7eed122721d3c49e25065a699", "score": "0.44684777", "text": "def test_is_barcoded(self):\n # Check test project\n self.assertFalse(self.project.isBarcoded())\n # Alter libraries in project so all have barcode flag set\n for lib in self.project.libraries:\n lib.is_barcoded = True\n self.assertTrue(self.project.isBarcoded())\n # Check with empty project (i.e. no libraries)\n self.assertFalse(SolidProject('No_libraries').isBarcoded())", "title": "" }, { "docid": "b301f0a7f67e6a39f713d01057d8c0fa", "score": "0.4467005", "text": "def email_status(self):\n print(f'Sending status email to {self.user}... ', end='', flush=True)\n now = dt.datetime.now().strftime('%I:%M:%S%p on %x')\n body = f'{self.subject} -- STATUS EMAIL\\n'\n body = body + f'\\n{self.subject} emails sent at {now}\\n\\n'\n for line in self.statusLog:\n body += line + '\\n'\n body += '\\nEND OF STATUS'\n sendmailStatus = self.smtpObj.sendmail(self.user, self.user, body)\n if sendmailStatus != {}:\n print('Incomplete')\n print(f'There was a problem sending the status email to {self.user}: {sendmailStatus}')\n else:\n print('Completed')\n return", "title": "" }, { "docid": "14e119ef2d1d137f246f5fa0394d2b3f", "score": "0.44646508", "text": "def _notify_tracim(\n self,\n mails: typing.List[DecodedMail],\n ) -> None:\n logger.debug(self, 'Notify tracim about {} new responses'.format(\n len(mails),\n ))\n unsended_mails = []\n # TODO BS 20171124: Look around mail.get_from_address(), mail.get_key()\n # , mail.get_body() etc ... for raise InvalidEmailError if missing\n # required informations (actually get_from_address raise IndexError\n # if no from address for example) and catch it here\n while mails:\n mail = mails.pop()\n msg = {'token': self.token,\n 'user_mail': mail.get_from_address(),\n 'content_id': mail.get_key(),\n 'payload': {\n 'content': mail.get_body(\n use_html_parsing=self.use_html_parsing,\n use_txt_parsing=self.use_txt_parsing),\n }}\n try:\n logger.debug(\n self,\n 'Contact API on {} with body {}'.format(\n self.endpoint,\n json.dumps(msg),\n ),\n )\n r = requests.post(self.endpoint, json=msg)\n if r.status_code not in [200, 204]:\n details = r.json().get('msg')\n log = 'bad status code {} response when sending mail to tracim: {}' # nopep8\n logger.error(self, log.format(\n str(r.status_code),\n details,\n ))\n # Flag all correctly checked mail, unseen the others\n if r.status_code in [200, 204, 400]:\n self._set_flag(mail.uid, IMAP_CHECKED_FLAG)\n else:\n self._unset_flag(mail.uid, IMAP_SEEN_FLAG)\n # TODO - G.M - Verify exception correctly works\n except requests.exceptions.Timeout as e:\n log = 'Timeout error to transmit fetched mail to tracim : {}'\n logger.error(self, log.format(str(e)))\n unsended_mails.append(mail)\n self._unset_flag(mail.uid, IMAP_SEEN_FLAG)\n except requests.exceptions.RequestException as e:\n log = 'Fail to transmit fetched mail to tracim : {}'\n logger.error(self, log.format(str(e)))\n self._unset_flag(mail.uid, IMAP_SEEN_FLAG)", "title": "" }, { "docid": "afde6cb008ca336286dd5e0751a35adb", "score": "0.44616255", "text": "def _on_refresh_clicked(self):\n self._check_unseen_mails(True)", "title": "" }, { "docid": "02907cda24ffb91c02fe4caf2bce1e0a", "score": "0.44614464", "text": "def action_accepted(self):\n for repair in self.repair_ids:\n code = self.env['ir.sequence'].next_by_code('repair.sequence')\n repair.write({'code': code})\n self.accepted_by = self.env.user.id\n self.accepted_date = datetime.today().strftime(DEFAULT_SERVER_DATETIME_FORMAT)\n self._send_first_notification()\n self.state = 'accepted'", "title": "" }, { "docid": "cf83ebe03d89e91ade7b80e856249528", "score": "0.44613683", "text": "def post_results_bes(data):\n assert isinstance(data, str)\n reader = csv.DictReader(StringIO(data), delimiter=',', quotechar='\"')\n new_record_counter = 0\n for row in reader:\n # Check if the record exists, using lfdn as key\n lfdn_exists = db.session.query(BESTouchpoint).filter_by(lfdn=row['lfdn']).all()\n if not lfdn_exists:\n new_record = BESTouchpoint(lfdn=row.get('lfdn'),\n client_number=row.get('bg_12'),\n client_name=row.get('bg_13'),\n p_spec=row.get('bg_16'),\n business_area=row.get('bg_21'),\n handling_unit=row.get('bg_27'),\n q1=row.get('NKI1'),\n q2=row.get('NKI2'),\n q4=row.get('Trade_KPI'),\n q6=row.get('Improvements_text'),\n survey_date=row.get('bg_52'))\n db.session.add(new_record)\n new_record_counter += 1\n log.info('BES record with lfdn={} queued for database insert'.format(row['lfdn']))\n else:\n log.debug('Got already existing BES record lfdn={0}, skipping insert'.format(row['lfdn']))\n if new_record_counter:\n db.session.commit()\n log.info('{0} new BES records commited to database'.format(new_record_counter))\n else:\n log.info('No new BES record inserts in database')\n return 200", "title": "" }, { "docid": "2c32080f9035d918d815ef31e3c0e834", "score": "0.44610533", "text": "def activate(self):\n self.ready = True", "title": "" }, { "docid": "7e1a4f5b6f604f9d3f6d0b25d1a33aaf", "score": "0.44608468", "text": "def handle_delivery_report_content(pdu_data):", "title": "" }, { "docid": "cf77ae9d5a6f36b1b24a439aa08c9e90", "score": "0.4459211", "text": "def format_environmental(self, barcodes):\n md = {}\n errors = {}\n barcode_info = self.get_ag_barcode_details(\n [b[0][:9] for b in barcodes])\n # tuples are latitude, longitude, elevation, state\n zipcode_sql = \"\"\"SELECT UPPER(zipcode), country,\n round(latitude::numeric, 1),\n round(longitude::numeric,1),\n round(elevation::numeric, 1), state\n FROM zipcodes\"\"\"\n zip_lookup = defaultdict(dict)\n for row in self._con.execute_fetchall(zipcode_sql):\n zip_lookup[row[0]][row[1]] = map(\n lambda x: x if x is not None else 'Unspecified', row[2:])\n\n country_sql = \"SELECT country, EBI from ag.iso_country_lookup\"\n country_lookup = dict(self._con.execute_fetchall(country_sql))\n # Add for scrubbed testing database\n country_lookup['REMOVED'] = 'REMOVED'\n\n for barcode, env in barcodes:\n # Not using defaultdict so we don't ever allow accidental insertion\n # of unknown barcodes\n md[barcode] = {}\n # Add info from constants dict\n try:\n md[barcode].update(env_lookup[env])\n # Invariant information\n md[barcode]['TITLE'] = 'American Gut Project'\n md[barcode]['ASSIGNED_FROM_GEO'] = 'Yes'\n md[barcode]['PHYSICAL_SPECIMEN_REMAINING'] = 'Yes'\n md[barcode]['PHYSICAL_SPECIMEN_LOCATION'] = 'UCSDMI'\n\n # Barcode specific information\n specific_info = barcode_info[barcode[:9]]\n\n md[barcode]['ANONYMIZED_NAME'] = barcode\n md[barcode]['HOST_SUBJECT_ID'] = barcode\n\n # Geolocate based on kit information, since no other\n # geographic info available\n zipcode = specific_info['zip'].upper()\n country = specific_info['country']\n md[barcode] = self._geocode(md[barcode], zipcode, country,\n zip_lookup, country_lookup)\n\n md[barcode]['COLLECTION_DATE'] = \\\n specific_info['sample_date'].strftime('%m/%d/%Y')\n if specific_info['sample_time']:\n md[barcode]['COLLECTION_TIME'] = \\\n specific_info['sample_time'].strftime('%H:%M')\n else:\n # If no time data, show unspecified and default to midnight\n md[barcode]['COLLECTION_TIME'] = 'Unspecified'\n specific_info['sample_time'] = time(0, 0)\n\n md[barcode]['COLLECTION_TIMESTAMP'] = datetime.combine(\n specific_info['sample_date'],\n specific_info['sample_time']).strftime('%m/%d/%Y %H:%M')\n except Exception as e:\n del md[barcode]\n errors[barcode] = str(e)\n continue\n return md, errors", "title": "" }, { "docid": "13a223c211327159e45463857a91a0cb", "score": "0.44567463", "text": "def generate_barcode(self):\n if self.flags.in_insert:\n label = barcode.codex.Code39(self.barcode, ImageWriter(), add_checksum=False)\n\n path = frappe.get_site_path('public', 'files', self.barcode)\n\n label.save(path)\n\n file_name = '{0}.png'.format(self.barcode)\n\n file = frappe.get_doc({\n 'doctype': 'File',\n 'file_name': file_name,\n 'folder': 'Home/Attachments',\n 'attached_to_name': self.barcode,\n 'attached_to_doctype': 'Barcode Label',\n 'file_url': '/files/{0}'.format(file_name),\n 'is_private': 0\n })\n\n file.insert()\n\n self.image = '/files/{0}'.format(file_name)", "title": "" }, { "docid": "29f57a5fc52a41fadae6f7d49a26475e", "score": "0.44549614", "text": "def test_07_link_sent_done(self):\n self.fc.fd[\"moobe_setup_complete\"].select_send_link()\n self.fc.fd[\"share\"].verify_share_popup()\n self.fc.fd[\"share\"].select_message()\n self.fc.fd[\"message\"].verify_new_message_screen()\n self.fc.fd[\"message\"].compose_message(self.email)\n self.fc.fd[\"ios_system\"].handle_sim_card_popup()\n self.fc.fd[\"moobe_setup_complete\"].select_link_sent_done()\n self.fc.fd[\"moobe_setup_complete\"].verify_setup_complete_lets_print()", "title": "" }, { "docid": "0a3a54fa3363618c25e604c96d78f86d", "score": "0.4452563", "text": "def barcode(self):\n\n if self.msreg_barcode and self.stored_barcode:\n # Set a message for later\n self.messages.add(\n f'Print a new barcode for {self.name} because barcode updated in msreg to \"{self.msreg_barcode}\"'\n )\n self.remove_stored_barcode()\n return self.msreg_barcode or self.stored_barcode or self.generate_barcode()", "title": "" }, { "docid": "32bb1a633c09a7585dce10733dd0c827", "score": "0.44520405", "text": "def was_gibts_aus_bayern(update, context):\n update.message.reply_text('Döberin schaut nach Aufträgen...')\n everything = crawl_everything()\n bayern = gib_mir_bayern(everything)\n out = format_everything(bayern)\n update.message.reply_text('Auftrag xyz \\n' +\n out)", "title": "" }, { "docid": "da981a89e4222447344443539fca2777", "score": "0.4436152", "text": "def update(self):\n response = requests.get(blindConnectd.format(self._code))\n if response.status_code == 200:\n if 'true' in response.text:\n self._available = True\n else:\n self._available = False\n else:\n self._available = False", "title": "" }, { "docid": "024500afa70ea5c742b4ab74c4714a7d", "score": "0.44277966", "text": "def parse_messages():\n cur = conn.cursor()\n cur.execute('DROP TABLE IF EXISTS deals')\n cur.execute('CREATE TABLE deals (name text primary key, enddate date, addresses text, address_txt text, '\n 'address_url text, days text, info text, timing text, dayinfo text, latlongs text)')\n months = {\"Jan\": 1, \"Feb\": 2, \"Mar\": 3, \"Apr\": 4, \"May\": 5, \"Jun\": 6,\n \"Jul\": 7, \"Aug\": 8, \"Sep\": 9, \"Oct\": 10, \"Nov\": 11, \"Dec\": 12}\n year = date.today().year\n for message in get_messages():\n put_database = True\n address_done = False\n addresses = []\n address_text = None\n address_url = None\n timeinfo = None\n days = []\n latlongs = []\n text = message.message\n if text is None:\n continue\n if \"FLASH SALE\" in text or \"SingSaver\" in text:\n continue\n lines = text.splitlines()\n name = \"\"\n for char in lines[0]:\n if char in string.printable:\n name += char\n name = name.strip()\n first_tick = True\n last_date = date(year, 12, 31)\n info = \"\"\n timing = \"\"\n for line in lines:\n put_database = True\n if len(line) == 0:\n continue\n if line[0] == \"✅\":\n # information about discount (eg 1 for 1 milk tea)\n if first_tick:\n name += line[1:]\n first_tick = False\n elif \"Today only\" in line:\n last_date = message.date.date()\n if last_date < date.today():\n put_database = False\n else:\n # extract end date\n for month in months:\n if month in line:\n month_num = months[month]\n day = None\n if \"Until\" in line:\n day = int(line.split(\" \")[2])\n elif \"-\" in line:\n words = line.split(\" \")\n for i in range(len(words)):\n word = words[i]\n if \"-\" in word:\n day = int(word.split(\"-\")[-1])\n month_num = months[words[i+1][:3]]\n break\n if day is not None:\n last_date = date(year, month_num, day)\n if last_date < date.today():\n put_database = False\n\n break\n\n # extract timings\n if \"am\" in line or \"pm\" in line:\n if \"Before\" in line:\n timing = \"Before {}\".format(line.split(\" \")[-1])\n elif \"onwards\" in line:\n timing = \"After {}\".format(line.split(\" \")[-2])\n elif \"-\" in line:\n for word in line.split(\" \"):\n if \"-\" in word:\n timing = word\n\n # extract days\n days = ['Monday', 'Tuesday', 'Wednesday', 'Thursday', 'Friday', 'Saturday', 'Sunday']\n for day in days:\n if day in line:\n timeinfo = line[1:].strip()\n break\n\n # get url that leads to info about discount\n if \"Source\" in line:\n info = line.split(\":\")[-1]\n\n # extract address\n if line[0] == \"📍\":\n address_text = line[1:].strip()\n if address_text == \"Store Locator\":\n address_text = \"All Outlets\"\n if \"#\" in line or re.search(r'[sS][0-9]{5,6}', line):\n addresses.append(line[1:].strip())\n address_done = True\n\n # parse provided link to find addresses if applicable\n if not address_done:\n for entity in message.entities:\n if type(entity) == MessageEntityTextUrl:\n if 'goo.gl' not in entity.url:\n addresses = get_postal_codes(entity.url)\n address_url = entity.url\n for address in addresses:\n print(address)\n latlong = getLatLng(address)\n latlongs.append([latlong['lat'], latlong['lng']])\n if put_database:\n try:\n cur.execute(\"INSERT INTO deals (name, enddate, addresses, address_txt, address_url, info, timing, dayinfo, latlongs) VALUES(%s,%s,%s,%s,%s,%s,%s,%s,%s)\",\n (name, last_date, json.dumps(addresses), address_text, address_url, info, timing, timeinfo, json.dumps(latlongs)))\n conn.commit()\n\n except psycopg2.IntegrityError:\n conn.rollback()\n except psycopg2.InternalError as e:\n print((\"INSERT INTO deals (name, enddate, addresses, address_txt, address_url, info, timing, dayinfo, latlongs) VALUES(%s,%s,%s,%s,%s,%s,%s,%s,%s)\"% (name, last_date, json.dumps(addresses), address_text, address_url, info, timing, timeinfo, json.dumps(latlongs))))\n print(e.pgerror)\n conn.commit()\n conn.close()", "title": "" }, { "docid": "a9f13967e8908a6456e452b424e62f7d", "score": "0.44239593", "text": "def isReady(self):\n pass", "title": "" }, { "docid": "fa6138ecdfef0520a24b774f5fd2e873", "score": "0.4423664", "text": "def run_on_start():\n\trun_scrape = True\n\t\n\twhile run_scrape == True:\n\t\tzip_codes = settings.ZIP_CODES\n\t\t\n\t\t#Search what we REALLY want\n\t\tfor zip_code in zip_codes:\n\t\t\n\t\t\tall_results = []\n\t\t\tall_results = do_scrape(zip_code, True)\n\t\t\t\n\t\t\t# Post each result to slack\n\t\t\tfor result in all_results:\n\t\t\t\tmessage = result\n\t\t\t\t\n\t\t\t\tpost_chat_to_slack(message)\n\t\t\n\t\t#Search things where the listing might just be ok\n\t\tfor zip_code in zip_codes:\n\t\t\n\t\t\tall_results = []\n\t\t\tall_results = do_scrape(zip_code, False)\n\t\t\t\n\t\t\t# Post each result to slack\n\t\t\tfor result in all_results:\n\t\t\t\tmessage = result\n\t\t\t\t\n\t\t\t\tpost_chat_to_slack(message)\n\t\t\t\n\t\ttime.sleep(settings.SLEEP_INTERVAL) # 20 minutes", "title": "" } ]
882625070fb2d7b72cc9a9a1c266da2d
Display prints information about what just happened to stdout.
[ { "docid": "5a2075a745cfad3dfe8fc7d1e44e7ac6", "score": "0.0", "text": "def Display(self, unused_args, result):\n printer = util.PrettyPrinter(0)\n printer.Print('Result of the restore-backup operation:')\n printer.PrintOperation(result)", "title": "" } ]
[ { "docid": "de71a50deacff66ac51efe1d380bfad3", "score": "0.7455874", "text": "def display():\n\n # Check the pipe setup.\n check_pipe_setup(sequence=True, j=True)\n\n # Call the write method with sys.stdout as the file.\n write(file=sys.stdout)", "title": "" }, { "docid": "1c3cedaffd5bdfe755017520f489bb0d", "score": "0.73436886", "text": "def print_standout(info):\n sys.stdout.write(\"Info: %s\" % info)\n sys.stdout.write(\"\\n\")\n sys.stdout.flush()", "title": "" }, { "docid": "b73c70eba73221d0151eb3691fd427bd", "score": "0.6960357", "text": "def print_out():\n pass", "title": "" }, { "docid": "4f338c3933096b47fc97e115d7541ef5", "score": "0.69563633", "text": "def stdout(self):\n pass", "title": "" }, { "docid": "3d26a7de91f9e189826308667a993ef0", "score": "0.67014945", "text": "def p(self):\n self.printstdout = True", "title": "" }, { "docid": "0aaca55e826e07c46d8e3d5a1513e10c", "score": "0.66119254", "text": "def printInfo():\n utils = CONFIG['utils']\n mytime = utils.mytime()\n logIt(\"Todays date: \" + mytime + \"\\n\")\n logIt(\" Number is: \" + str(CONFIG['number']) + \"\\n\")\n logIt(\" Host is: \" + str(CONFIG['host']) + \"\\n\")\n logIt(\" Port is: \" + str(CONFIG['port']) + \"\\n\")\n logIt(\" Log file is: \" + str(CONFIG['logfile']) + \"\\n\")\n logIt(\" Stdout flag is: \" + str(CONFIG['stdout']) + \"\\n\")\n logIt(\" Debug flag is: \" + str(CONFIG['debug']) + \"\\n\")", "title": "" }, { "docid": "0fc488c262c5db860968c0575ef14d9c", "score": "0.659986", "text": "def print_status(self):\r\n\t\tif VERBOSE:\r\n\r\n\t\t\tprint( 'Player : ')\r\n\t\t\tfor h in self.hands:\r\n\t\t\t\tprint('\\t' + str(h))\r\n\t\t\tprint( 'Dealer:\\n\\t' + str(self.dealer))\r\n\t\t\tprint( '-----------------------')", "title": "" }, { "docid": "aacf68e39e5be8232d9270c51b1fdfb2", "score": "0.6527617", "text": "def info(msg):\n sys.stdout.write('%s[ INFO ]%s %s\\n' % (colors.GREEN, colors.RESET , msg))", "title": "" }, { "docid": "d52e0e233e5ba40d5455e00f18da487a", "score": "0.6488839", "text": "def test_perform_display_print(capsys):\n assert sync_perform(stdio_dispatcher, Effect(Display(\"foo\"))) is None\n out, err = capsys.readouterr()\n assert err == \"\"\n assert out == \"foo\\n\"", "title": "" }, { "docid": "36afbd22d8baaa2e2660035a2d968fa3", "score": "0.6448931", "text": "def hook_print():\n sys.stdout = PrintHook()", "title": "" }, { "docid": "1df70412245b4081863013c468297a90", "score": "0.6432679", "text": "def displayStdout(self, test):\n test = proto_test(test)\n if test.dotted_name in self.stdout_output:\n self.stream.write(\n \"\\n{} for {}\\n{}\".format(\n self.colors.yellow(\"Captured stdout\"),\n self.colors.bold(test.dotted_name),\n self.stdout_output[test],\n )\n )\n del self.stdout_output[test]", "title": "" }, { "docid": "f6dd56cdd43b459e12d3d11e253e6e86", "score": "0.64228266", "text": "def print_info(msg):\n print(msg)", "title": "" }, { "docid": "806ec93bddbf9df701746446fedd6760", "score": "0.6388685", "text": "def _print(self, text):\n\t\tif self.verbose:\n\t\t\tprint text", "title": "" }, { "docid": "d2c8a5f716390720dab8be4528f61572", "score": "0.6355124", "text": "def displayInfo(self):\n # clear stdout for a smoother display\n # os.system('cls' if os.name=='nt' else 'clear')\n\n #print(\"=========== Status ============\")\n # print(\n # \"speed: \" + str(self.speed) +\n # \"\\nangle: \" + str(self.steering_angle) +\n # \"\\nsign: \" + str(self.detected_sign) +\n # \"\\nlane lines: \" + str(self.lane_lines) +\n # \"\\nintersection line flag: \" + str(self.intersection_line) +\n # \"\\ncurrent state label: \" + str(self.currentStateLabel) +\n # \"\\ncurrent states: \" + str(self.currentState)\n #)", "title": "" }, { "docid": "1550ab98ea50db1558f06ea37f913456", "score": "0.63410354", "text": "def hinfo(*objects, file=sys.stderr, flush=True, style=Fore.CYAN, **kwargs):\r\n with ScopedColoredStream(file, style, flush_on_exit=flush) as stream:\r\n print(*objects, file=stream, flush=False, **kwargs)", "title": "" }, { "docid": "6804365c0bac42f322f8fa27c84c74d5", "score": "0.6311187", "text": "def printOutput(self):\n pass", "title": "" }, { "docid": "876c3adcc5cee541116da8fc295d22cd", "score": "0.62989914", "text": "def info(message):\n global LAST_LOG\n LAST_LOG = message\n cprint('\\r[INF] {0}'.format(message), 'white', file=sys.stdout)", "title": "" }, { "docid": "5c350c5dd33fbb037578a7bdf922cf10", "score": "0.6293104", "text": "def info(msg, *args):\n if args:\n msg %= args\n click.echo(msg, file=sys.stdout)", "title": "" }, { "docid": "4977d16274778470e5f8955fabdf2294", "score": "0.6268923", "text": "def do_print(self, line):\n cmd_args = io.parse_cmd_args(line, io.output_cmd_pattern)\n if cmd_args:\n success = self.manager.print_to_console(\n cmd_args.get('target'), \n cmd_args.get('filters')\n )\n if success:\n self.console_print(\"There, you asked for it!\", settings.INFO_FORMAT)\n else:\n self.console_print(\"Sorry, something kinda went wrong! You can try again.\", settings.ERROR_FORMAT)\n else:\n self.console_print(settings.COMMMAND_ARGS_ERROR_MSG, settings.ERROR_FORMAT)", "title": "" }, { "docid": "8e48694933b8942d6441720a7aa0fe03", "score": "0.62629884", "text": "def display_detail(msg, *args):\n msg = _concat_message(msg, *args)\n if verbose > 1:\n print \" %s\" % msg.encode(\"UTF-8\")\n sys.stdout.flush()\n if prefs.pref(\"LoggingLevel\") > 0:\n munkilog.log(u\" \" + msg)", "title": "" }, { "docid": "4ef924d19e9bb9548d7d5a9b12bca33e", "score": "0.62504095", "text": "def printhelp():", "title": "" }, { "docid": "2572b600d26cca12d3ee510fc23a6638", "score": "0.622207", "text": "def _verbose(self,text):\n if self.verbose:\n print(text)", "title": "" }, { "docid": "6ad96d3f5e5a42b45849892c1270d33a", "score": "0.62013775", "text": "def print_info(*args):\n print(CGREEN2 + str(*args) + CEND)", "title": "" }, { "docid": "d0f1e7ac6e1f222b1075dcd6554cc3b6", "score": "0.6196016", "text": "def print_output():\n print(\"count: [primary: \"+str(primary_shards)+\", replica: \"+str(secondary_shards)+\"]\")\n print(\"size: [primary: \"+pretty_print_storage(total_size_primary)+\", replica: \"+pretty_print_storage(total_size_secondary)+\"]\")\n print(\"disk-max-node: \"+max_size_node_name)\n print(\"watermark-breached: \"+str(watermark_breached))", "title": "" }, { "docid": "46dc59d1f5a5071c47cec3d52534fe98", "score": "0.6163901", "text": "def display_stdout_and_err_in_curr_cell(self):\n ipy_display(self.output_widget)", "title": "" }, { "docid": "8d1f907de067ac909ce434c7164b0861", "score": "0.61496186", "text": "def info(self, message):\n if self.show_info:\n print(message)", "title": "" }, { "docid": "0a106121781fce4ac67bd95a8ada361a", "score": "0.61458576", "text": "def showme(message):\n print(message)", "title": "" }, { "docid": "eb4e06a3423afed173f326d2bc969685", "score": "0.6139444", "text": "def display_usage():\n print >> sys.stderr, __doc__", "title": "" }, { "docid": "855ce8170702e7d891083b120624f181", "score": "0.6136315", "text": "def show(what):\n global program, simulator\n try:\n if \"breakpoints\".find(what) == 0 and simulator is not None:\n for(id, h, s) in simulator.get_breakpoints():\n print id, \" : hits={} {}\".format(h, s)\n elif \"assumptions\".find(what) == 0 and simulator is not None:\n for(g, l, expr) in simulator.get_assumptions():\n if l == 0:\n print \"0x{:x} : {}\".format(g, expr)\n else:\n print \"(0x{:x},{}) : {}\".format(g, l, expr)\n elif \"pc\".find(what) == 0:\n print \"0x{:x}\".format(pc())\n elif \"mppc\".find(what) == 0:\n print \"0x{:x}\".format(mppc())\n elif \"hooks\".find(what) == 0:\n for hf in sorted(hooks.keys()):\n print \"hooks for function\", hf.__name__\n index = 0\n for h in hooks[hf]:\n if h.__name__ is not None:\n if h.__name__.find(\"__\") == 0: # internal hook\n continue\n desc = h.__name__\n else:\n desc = str(h)\n print \"{:2d} : {}\".format(index, desc)\n index += 1\n if index == 0:\n print \"there is no hook\"\n except:\n simulation_error()", "title": "" }, { "docid": "932d545be67a0487babb40731e467c3e", "score": "0.61152506", "text": "def display_log(obj, title=None, show=False):\n print(obj)", "title": "" }, { "docid": "6bfdb6b0ffa81f84bfa631f5fc3e9f66", "score": "0.6110176", "text": "def info(msg):\n print(colored.green(\"[INFO]: {0}\".format(msg)))", "title": "" }, { "docid": "4282075ad0fb918d2e4a43af64b830b7", "score": "0.6109205", "text": "def print_stdout(command):\n sys.stdout.write(\"%s\\n\" % command)\n sys.stdout.flush()", "title": "" }, { "docid": "bd3249fa71b3469d601f96074c813b86", "score": "0.6094341", "text": "def main():\n print \"Printing Sample Status\"", "title": "" }, { "docid": "3cda87ef3e5c0344c1d1d69ba26161f4", "score": "0.6091792", "text": "def print_metadata():\n data = {\n 'python_implementation': platform.python_implementation(),\n 'python_version_info': tuple(sys.version_info),\n 'pickle_protocol': pickle.HIGHEST_PROTOCOL,\n }\n if sys.version_info < (3,):\n out_stream = sys.stdout\n else:\n out_stream = sys.stdout.buffer\n out_stream.write(json.dumps(data).encode(_IPC_ENCODING) + b'\\n')", "title": "" }, { "docid": "b92d67c35a3ace68d007b042209f8688", "score": "0.6081262", "text": "def info(self, text):\n if not self.is_quiet_err:\n self.__emit(text, sys.stderr)", "title": "" }, { "docid": "268cf3728dfeca214f51bc1e2fa77ef7", "score": "0.607781", "text": "def log_info(msg):\n msg = '{0}\\n'.format(msg)\n sys.stdout.write(msg)", "title": "" }, { "docid": "cefce1311a414c3bc9107272bdb52814", "score": "0.6059337", "text": "def _print_status(self):", "title": "" }, { "docid": "7ec1b0e5292e27123b7ab1b1313e37c8", "score": "0.60578746", "text": "def Print(self):\n\n\t\tif self.verbose:\n\n\t\t print (\"\\033[1m[HEADER]\\033[0m\")\n\t\t print (\"code:\\t\\t%s\" % self.kod)\n\t \tprint (\"version:\\t%s\" % self.ver)\n\t\t print (\"date and time:\\t%s\" % self.probid)\n\t\t print (\"dump number:\\t%s\" % self.knod)\n\t \tprint (\"number of histories:\\t%s\" % self.nps)\n\t\t print (\"number of pseudorandom numbers used:\\t%s\" % self.rnr)\n\t\t print (\"title: %s\" % self.title)\n\n\t\t if self.ntal>1:\n\t\t\t\tprint self.ntal, 'tallies:', self.ntals\n\t \telse:\n\t\t\t\tprint self.ntal, 'tally:', self.ntals\n\n\n\t\t if self.npert != 0:\n\t\t\t\tprint(\"number of perturbations: %s\" % self.npert)", "title": "" }, { "docid": "3ae89ceb5458d93e47b585849331fe25", "score": "0.6056793", "text": "def info(self, *lines):\n if self.__debug_level >= DEBUG_LEVELS['info']:\n self.print_lines(self.colored(('green', 'bold'), lines))", "title": "" }, { "docid": "eaf4d8e94d3d9938f15383fea0714f9f", "score": "0.6054277", "text": "def printout(*args, **kwargs):\n console_print(sys.stdout, *args, **kwargs)", "title": "" }, { "docid": "ee11d3e0498aee62f958aa99a71cfbd9", "score": "0.60469395", "text": "def _verboseHeader(self):\n\n if verbose:\n name = self._getName()\n methodName = self._getMethodName()\n\n title = f\"Running {name}.{methodName}\"\n print('{}\\n{}'.format(title, '-' * len(title)))", "title": "" }, { "docid": "f2d9eb6756960b3552db2b696e0da652", "score": "0.60462826", "text": "def print_verbose(self) -> None:\n print(self)\n if self.meta is not None:\n print(self.meta.__repr__())", "title": "" }, { "docid": "23aab15799d25953484d64aa8b4df8c6", "score": "0.60416037", "text": "def tell(self):\n print('Name {}, Age {}'. format(self.name, self.age), end=\" \")", "title": "" }, { "docid": "f3effda2b766e418237dcfc813dbd0df", "score": "0.60381395", "text": "def status(s):\n print('\\033[1m{0}\\033[0m'.format(s))", "title": "" }, { "docid": "f3effda2b766e418237dcfc813dbd0df", "score": "0.60381395", "text": "def status(s):\n print('\\033[1m{0}\\033[0m'.format(s))", "title": "" }, { "docid": "f3effda2b766e418237dcfc813dbd0df", "score": "0.60381395", "text": "def status(s):\n print('\\033[1m{0}\\033[0m'.format(s))", "title": "" }, { "docid": "f3effda2b766e418237dcfc813dbd0df", "score": "0.60381395", "text": "def status(s):\n print('\\033[1m{0}\\033[0m'.format(s))", "title": "" }, { "docid": "a0d00be76526d8214d943c4c5a2dd0b8", "score": "0.6037604", "text": "def status(s):\n print(\"\\033[1m{0}\\033[0m\".format(s))", "title": "" }, { "docid": "a0d00be76526d8214d943c4c5a2dd0b8", "score": "0.6037604", "text": "def status(s):\n print(\"\\033[1m{0}\\033[0m\".format(s))", "title": "" }, { "docid": "a0d00be76526d8214d943c4c5a2dd0b8", "score": "0.6037604", "text": "def status(s):\n print(\"\\033[1m{0}\\033[0m\".format(s))", "title": "" }, { "docid": "a0d00be76526d8214d943c4c5a2dd0b8", "score": "0.6037604", "text": "def status(s):\n print(\"\\033[1m{0}\\033[0m\".format(s))", "title": "" }, { "docid": "7caaaf3561d0fe7fad59c16ccb8a366b", "score": "0.6033988", "text": "def displayhook(self, obj):\n # reproduce the behavior of the standard displayhook, not printing None\n if obj is not None:\n print >> self.stdout, repr(obj)", "title": "" }, { "docid": "4692c828088e1d55aee6ceda566c9209", "score": "0.60241336", "text": "def disp(self, modulo=None): # TODO: rather assign opt['verb_disp'] as default?\r\n if modulo is None:\r\n modulo = self.opts['verb_disp']\r\n\r\n # console display\r\n if modulo:\r\n if (self.countiter-1) % (10 * modulo) < 1:\r\n self.disp_annotation()\r\n if self.countiter > 0 and (self.stop() or self.countiter < 4\r\n or self.countiter % modulo < 1):\r\n if self.opts['verb_time']:\r\n toc = self.elapsed_time()\r\n stime = str(int(toc//60))+':'+str(round(toc%60,1))\r\n else:\r\n stime = ''\r\n print(' '.join((repr(self.countiter).rjust(5),\r\n repr(self.countevals).rjust(7),\r\n '%.15e' % (min(self.fit.fit)),\r\n '%4.1e' % (self.D.max()/self.D.min()),\r\n '%6.2e' % self.sigma,\r\n '%6.0e' % (self.sigma * sqrt(min(self.dC))),\r\n '%6.0e' % (self.sigma * sqrt(max(self.dC))),\r\n stime)))\r\n # if self.countiter < 4:\r\n sys.stdout.flush()", "title": "" }, { "docid": "c3222b2377f3b440ac2f40529878f367", "score": "0.6002983", "text": "def show(self, *args, prefix=None):\n if prefix is None:\n prefix = '$'\n if self.verbose >= 2:\n print(prefix, *args)", "title": "" }, { "docid": "3b3caff1e76e17e25a994f3e61b87645", "score": "0.6000851", "text": "def Print (redirect = True) :\n lock = sys.hal_log_values.get(\"Lock\", False)\n if not lock :\n sys.hal_log_values [\"__log_display\"] = redirect", "title": "" }, { "docid": "14d24cf16e4bbdca6906602aafdb5d66", "score": "0.59955674", "text": "def print_usage_command(self):\n print self.get_usage_command()", "title": "" }, { "docid": "14d24cf16e4bbdca6906602aafdb5d66", "score": "0.59955674", "text": "def print_usage_command(self):\n print self.get_usage_command()", "title": "" }, { "docid": "600853c06fe81fd6fee931ff732a4b9b", "score": "0.5992973", "text": "def print_usage():\n print(helptxt)\n sys.exit(2)", "title": "" }, { "docid": "1ed296df45b3ac4834b54056e36505f0", "score": "0.5992596", "text": "def print_info(self, mode=COUNT):\n def partition_line(character, num):\n return character * num\n\n self.update_records() # trace records\n self.count_results() # statistical results\n\n #count mode (default) : print statistical results of all kernel\n if mode == self.COUNT:\n table_header = f\"\"\"\n {partition_line('=',73)}\n {_ti_core.arch_name(ti.cfg.arch).upper()} Profiler(count)\n {partition_line('=',73)}\n \"\"\"\n items_header = f\"\"\"\n [ % total count | min avg max ] Kernel name\n \"\"\"\n print(inspect.cleandoc(table_header))\n print(inspect.cleandoc(items_header))\n for key in self._statistical_results:\n result = self._statistical_results[key]\n fraction = result.total_time / self._total_time_ms * 100.0\n #message in one line\n print(\n \"[{:6.2f}% {:7.3f} s {:6d}x |{:9.3f} {:9.3f} {:9.3f} ms] {}\"\n .format(\n fraction,\n result.total_time / 1000.0,\n result.counter,\n result.min_time,\n result.total_time / result.counter, # avg_time\n result.max_time,\n result.name))\n print(f\"{partition_line('-',73)}\")\n #one-line summary\n print(f\"[100.00%] Total kernel execution time: \"\n f\"{self._total_time_ms/1000:7.3f} s \"\n f\"number of records: \"\n f\"{len(self._statistical_results)}\")\n print(f\"{partition_line('=',73)}\")\n\n #trace mode : print records of launched kernel\n if mode == self.TRACE:\n table_header = f\"\"\"\n {partition_line('=',73)}\n {_ti_core.arch_name(ti.cfg.arch).upper()} Profiler(trace)\n {partition_line('=',73)}\n \"\"\"\n items_header = f\"\"\"\n [ % | time ] Kernel name\n \"\"\"\n print(inspect.cleandoc(table_header))\n print(inspect.cleandoc(items_header))\n for record in self._traced_records:\n fraction = record.kernel_time / self._total_time_ms * 100.0\n #message in one line\n print(\"[{:6.2f}% |{:9.3f} ms] {}\".format(\n fraction, record.kernel_time, record.name))\n print(f\"{partition_line('-',73)}\")\n #one-line summary\n print(f\"[100.00%] Total kernel execution time: \"\n f\"{self._total_time_ms/1000:7.3f} s \"\n f\"number of records: {len(self._traced_records)}\")\n print(f\"{partition_line('=',73)}\")", "title": "" }, { "docid": "47cc27867722d0a582745e2ffe8e5769", "score": "0.59856683", "text": "def print_info(self):\n print(\"Experiment key: \" + self.key)\n print(\"Experiment name: \" + self.name)\n print(\"Experiment path: \" + self.output_path)\n print(\"Auto-sync activated: \" + str(self.auto_sync))\n print(\"\")\n print(\"Experiment metadata: \")\n print(self.exp_metadata.to_str())", "title": "" }, { "docid": "5e81eda7789f5b2d041de515ec0e48f6", "score": "0.59836775", "text": "def command_info(fmt, *args, **kwargs):\n sys.stderr.write(fmt.format(*args, **kwargs))", "title": "" }, { "docid": "73524c15b2fe7fce5a0ca7115c8357ae", "score": "0.5978123", "text": "def output_debug_info(self):", "title": "" }, { "docid": "cbdb7eeffe3a740adbbc1833c864d1e2", "score": "0.5977601", "text": "def _print_results_header(self):\n print(\"\\033[94m\"+\"Summary\\n\"+\"-\"*32+\"\\033[0m\")\n print(\"Subroutine: {}\".format(self.mc_sample.__name__))\n print(\"Num Runs: {:2.1e}\".format(self.num_runs))\n print(\"-\"*32+'\\n')", "title": "" }, { "docid": "c422a21249edddf4127119ce5efc13bf", "score": "0.5974553", "text": "def display(config, transfo, learner, *args):\n\n stderr.write(\"Config is %s\\n\" % str(config))\n stderr.write(\"Transfo is %s\\n\" % str(ktpipes.KtPipe.from_json(config[transfo])))\n stderr.write(\"Learner is %s\\n\" % str(learner))", "title": "" }, { "docid": "503639bb1c73c0cbceee4247e545853e", "score": "0.5964199", "text": "def disp(self, modulo=None):\n if modulo is None:\n modulo = self.opts['verb_disp']\n\n # console display\n\n if modulo:\n if not hasattr(self, 'has_been_called'):\n self.disp_annotation()\n\n if self.countiter > 0 and (self.stop() or self.countiter < 4\n or self.countiter % modulo < 1):\n try:\n print(' '.join((repr(self.countiter).rjust(5),\n repr(self.countevals).rjust(6),\n '%.15e' % (self.pareto_front_cut.hypervolume),\n '%4.1e' % (np.median([kernel.D.max() / kernel.D.min()\n if not kernel.opts['CMA_diagonal'] or kernel.countiter > kernel.opts['CMA_diagonal']\n else max(kernel.sigma_vec*1) / min(kernel.sigma_vec*1) \\\n for kernel in self.kernels])),\n '%6.2e' % (np.median([kernel.sigma for kernel in self.kernels])),\n '%6.0e' % (np.median([kernel.sigma * min(kernel.sigma_vec * kernel.dC**0.5) \\\n for kernel in self.kernels])),\n '%6.0e' % (np.median([kernel.sigma * max(kernel.sigma_vec * kernel.dC**0.5) \\\n for kernel in self.kernels]))\n )))\n except AttributeError:\n pass\n # if self.countiter < 4:\n # try:\n # sys.stdout.flush() : error in matlab:\n # Python Error: AttributeError: 'MexPrinter' object has no attribute 'flush'\n\n # except AttributeError:\n # pass\n return self", "title": "" }, { "docid": "39ae272ffa87823addcc80787b22f2bc", "score": "0.5962748", "text": "def do_overview(self):\n summaries = []\n for name, cmd in self.base.commands.iteritems():\n summaries.append(' %-14s %s\\n' % (name, cmd.get_summary()))\n summaries.sort()\n sys.stdout.write('Usage: %s COMMAND ARGUMENTS...\\n\\n' \\\n 'Available commands:\\n' % (self.base.scriptname, ))\n for line in summaries:\n sys.stdout.write(line)", "title": "" }, { "docid": "efd83f7289d75dc63417764d9206300f", "score": "0.5956349", "text": "def print_performance_info(self):\n pass", "title": "" }, { "docid": "98a55df112a982dbc1c03ee732300ed2", "score": "0.59546804", "text": "def print_help(self):\n print self.get_help()", "title": "" }, { "docid": "24e7c6829d0352906f47c87b85912081", "score": "0.59542274", "text": "def info():\n print __doc__\n sys.exit(1)", "title": "" }, { "docid": "7a4aed2384122aa1abdde7d4a06b1300", "score": "0.5940534", "text": "def print_help():\n\tprint(\"Help text\")", "title": "" }, { "docid": "76da47eb688d07ef6004f8d5a97ff894", "score": "0.59390724", "text": "def console(out):\n logging.debug(out)\n try:\n print(out)\n except UnicodeEncodeError:\n print(re.sub(r'([^\\s\\w]|_)+', '', out))", "title": "" }, { "docid": "f258de8c165df0336c4f269ce4869672", "score": "0.5936291", "text": "def status(s: str):\n print(\"\\033[1m{0}\\033[0m\".format(s))", "title": "" }, { "docid": "5d28fea3e20df6f5fec63360f16a8fb3", "score": "0.5933878", "text": "def show(self):\n self._logger.debug(\"show\")", "title": "" }, { "docid": "5f6b9dd5bc015d926c6abf8cc17ce289", "score": "0.59280145", "text": "def dump_to_console(name, results, benchmark):\r\n for a in affinities:\r\n print(\" \"+a)\r\n res = results[a]\r\n print(\"\\t\"+\"#thr\"+\"\\t\"+\"time\"+\"\\t\"+\"spdup\"+\"\\t\"+\"effc\"+\"\\t\"+\"raw\")\r\n for i in range(len(res[\"threads\"])):\r\n print(\"\\t{0}\\t{1:.2f}\\t{2:.2f}\\t{3:.4f}\\t{4}\".format(\r\n res[\"threads\"][i],\r\n res[\"avg\"][i],\r\n res[\"speedup\"][i],\r\n res[\"efficiency\"][i],\r\n benchmark[a][res[\"threads\"][i]]))\r\n print()", "title": "" }, { "docid": "993d5f4581d9b65dc916ca9afd5ac527", "score": "0.5921753", "text": "def __debugInfo(self, msg):\n\t\tif self.verbosity:\n\t\t\tprint(stylize(\"[*] DEBUG: {}\".format(msg), colored.fg(\"wheat_1\")))", "title": "" }, { "docid": "0215c61b75e74f0d1647d7ed950bf3d5", "score": "0.59190506", "text": "def print_curinfo(text: str):\n templateName = \"... {:s} ...\"\n print(templateName.format(text), flush=True)", "title": "" }, { "docid": "4591389302197a542ad2ac6dedbbc856", "score": "0.5912126", "text": "def debug_print(self):\n os.system('cls' if os.name == 'nt' else 'clear')\n\n print('\\nPosition')\n print(self.tetromino.position())\n print('\\nBlock coordinates')\n print(self.tetromino.block_coordinates())\n print('\\nBoard')\n print(self.board)\n print('\\nBoard heights')\n print(self.board.get_height())\n\n if self.pause:\n print('\\nPaused')", "title": "" }, { "docid": "5890997b7befe29438badd8a7e831063", "score": "0.5912112", "text": "def info(msg):\n click.secho(msg, fg='blue')", "title": "" }, { "docid": "4f787bba68a8d6b47c894f4c91c69524", "score": "0.5909794", "text": "def eprint(*args, **kwargs):\n\tprint(*args, file=sys.stderr, **kwargs)", "title": "" }, { "docid": "c782a81672bb5c69a2a3a113f07552c6", "score": "0.59041977", "text": "def cmdPrint( self, *args):\n return self.cmd( *args, **{ 'verbose': True } )", "title": "" }, { "docid": "2312b2d174f8b52cd90954ba907a51e4", "score": "0.5902542", "text": "def print_intro(self):\n \n print('Did you know that birds hold the record for longest animal migrations?')", "title": "" }, { "docid": "9c86133874481090d4a674e25b8df06a", "score": "0.5901542", "text": "def show_debug_msg(self) -> None:\n if self.debug_mode:\n for point in self.points:\n print(point.debug_info())", "title": "" }, { "docid": "f79f7c73ecf6b8f26a49de9aa223144f", "score": "0.58977586", "text": "def eprint(*pargs, **kargs):\n print('\\u001b[31m', end='', file=sys.stderr)\n print(*pargs, file=sys.stderr, **kargs)\n print('\\u001b[0m', end='', file=sys.stderr)", "title": "" }, { "docid": "e6fc074b7acaa95303ada7cbf7b5dcdb", "score": "0.5892921", "text": "def print_results(self):\n pass", "title": "" }, { "docid": "9b3659716a2a4edb1eb9f7309995f437", "score": "0.5889978", "text": "def display(self,message):\r\n \r\n print(message)", "title": "" }, { "docid": "e34fbafaaf78b5c0c6e2b91be9eb6eaf", "score": "0.5888109", "text": "def print_stats():\n if spritegroup_stats[0] > 0:\n generic.print_info(\"Concurrent spritegroups: {}/{} ({})\".format(spritegroup_stats[0], total_action2_ids, str(spritegroup_stats[1])))\n if a2register_stats[0] > 0:\n generic.print_info(\"Concurrent Action2 registers: {}/{} ({})\".format(a2register_stats[0], total_tmp_locations, str(a2register_stats[1])))", "title": "" }, { "docid": "8d52854cb7e95bec630d3f93b5ae62d8", "score": "0.58861643", "text": "def _print(self, *args, **kwargs) -> None:\n # Only print in verbose mode\n if self._verbose:\n arglist = list(args)\n arglist[0] = f\"[buddy-{self._experiment_name}] {args[0]}\"\n print(*arglist, **kwargs)", "title": "" }, { "docid": "60fa66c6d74815dfa5453a07e65c025a", "score": "0.588386", "text": "def do_info(self, args):\n if self.exploit is None:\n eprint(colorize('No exploit set; nothing to describe. Select an exploit with the \\'use\\' command',\n 'cyan'))\n else:\n eprint(colorize('\\n ' + self.exploit.DESCRIPTION + '\\n', 'green'))", "title": "" }, { "docid": "77a6c8490e3bc6d51c11baa24e8902b3", "score": "0.58837146", "text": "def display(self):\r\n print(self.title, 'written by', self.author)", "title": "" }, { "docid": "8789d5ec993f45140383ab5903e6fd30", "score": "0.58832765", "text": "def nice_output(self):\n return 'Inning {0}'.format(self.num)", "title": "" }, { "docid": "181ca8a5e3964f3cf586eb88317e869b", "score": "0.5878443", "text": "def printme(self):\n sys.stdout.write(self._header)\n for k in range(len(self)):\n sys.stdout.write(self.line(k))", "title": "" }, { "docid": "8ec6a136fcc3ef1a8149ad0d65b4e8bf", "score": "0.5875838", "text": "def print_command(self):\n self.success = False\n command = ['lame', '-h', '--silent']\n command.append('-b ' + str(self.bitrate))\n command.append(self.source)\n command.append(self.target)\n print(' '.join(command))", "title": "" }, { "docid": "0d5bba2b2ac01c358eb71b743ab723a2", "score": "0.58727866", "text": "def info(*objects, file=sys.stderr, flush=True, style=None, **kwargs):\r\n with ScopedColoredStream(file, style, flush_on_exit=flush) as stream:\r\n print(*objects, file=stream, flush=False, **kwargs)", "title": "" }, { "docid": "6865d2029eaaba9a9cf013c30ba595e4", "score": "0.5872619", "text": "def print_info(c, timestamp):\r\n print(f\"\\n[{timestamp}] [{id(c)}] [Fitness: {c.fitness()}]\\n \" +\r\n f\"Age: {c.age} seconds, F.Eaten: {c.food_eaten}, P.Eaten: {c.poison_eaten}\\n\" +\r\n f\"currHP: {c.health}, Gen: {c.gen}, Childs: {c.childs}\\n\" +\r\n f\"DNA: {c.dna}\\n\" +\r\n f\"FoodAttr: {c.food_attraction}, PoisonAttr: {c.poison_attraction}\\n\" +\r\n f\"FoodDist: {c.food_dist}, PoisonDist: {c.poison_dist}\\n\" +\r\n f\"MaxHealth: {c.max_health}, MaxVel: {c.max_vel}, Size: {c.size}\\n\" +\r\n f\"MaxSteer: {c.max_steer_force}, DirAngleMult: {c.dir_angle_mult}\\n\")", "title": "" }, { "docid": "90b65a4ca13d88179602a7a2d6a8a0a7", "score": "0.5864605", "text": "def showUsage():\n None", "title": "" }, { "docid": "d7b334546b49716cd8314c2d19972802", "score": "0.5864389", "text": "def tprint(msg):\n sys.stdout.write(msg + '\\n')\n sys.stdout.flush()", "title": "" }, { "docid": "d7b334546b49716cd8314c2d19972802", "score": "0.5864389", "text": "def tprint(msg):\n sys.stdout.write(msg + '\\n')\n sys.stdout.flush()", "title": "" }, { "docid": "5b666b1319aae96f5e3bf8da8d66488a", "score": "0.58622056", "text": "def show_msg(self):\n if self.result and self.success_msg:\n print color_str('g', '\\n'.join(self.success_msg))\n elif self.result == False and self.fail_msg:\n print color_str('r', '\\n'.join(self.fail_msg))\n if self.stat_msg:\n print color_str('b', '\\n'.join(self.stat_msg))", "title": "" }, { "docid": "a3ece84400bad66e9266c034377f1bb5", "score": "0.58543503", "text": "def do_show(self, arg):\n obj = self.verify(arg, 1)\n if obj:\n print(obj)", "title": "" }, { "docid": "61dd0d33e0f09c2b206201a49e0f42b0", "score": "0.58426684", "text": "def print_status(self):\n print \"Zombie has\" + super(Zombie, self).print_status()", "title": "" }, { "docid": "3d509be128efd5bdfd4fe81d55a49b11", "score": "0.5839623", "text": "def info():\n print(\"Made using the OOP RPG game creator (c) Claire.\\n\")", "title": "" } ]
7b4e00c839b2b884cd8f2d137e5deb74
Maps each S. cervisiae ORF to ortholog ORFs in 8 closely related yeast species
[ { "docid": "06afd725cd38a46eb969ad288ccea146", "score": "0.48885584", "text": "def blast_closely_related_species(ortho_species_dir,scer_DB_dir,outDir,summaryFile,scer_closely_related): \n\t\n\tprint (\"----------- Find orthologues in closely related species -----------\")\n\t\n\tif not os.path.exists(outDir):\n\t\tos.makedirs(outDir)\n\t\n\tfor species2 in scer_closely_related:\n\t\trun_ortholog_mapping(\"Scer\", species2,scer_DB_dir,os.path.join(ortho_species_dir,species2+'.aa'),outDir)\n\t\n\t\n\tscer_orthologs = read_scer_orthologs([outDir],[scer_closely_related],os.path.join(outDir,summaryFile))\n\n\treturn scer_orthologs", "title": "" } ]
[ { "docid": "5a11091e3c63cbba32f7d82ce5c6a064", "score": "0.6164072", "text": "def ortho_final_coord(ortho_dict):#rna_ortho_dict,\n\tfinal_coord_dict = dict()\n\tfor k, v in ortho_dict.iteritems():\n\t\tupstream = v[0]\n\t\tdownstream = v[1]\n\t\tuscafs = set()\n\t\tdscafs = set()\n\t\tfor gene in upstream:\n\t\t\tuscafs.add(gene[1])\n\t\tfor gene in downstream:\n\t\t\tdscafs.add(gene[1])\n\t\t#print \"up\", uscafs\n\t\t#print \"down\", dscafs\n\t\tcommon_scaf = uscafs.intersection(dscafs)\n\t\t#different_scaf = uscafs.difference(dscafs)\n\t\t#print \"common\", common_scaf\n\t\t#if len(different_scaf) >= 1:\n\t\t#\tprint \"different\", different_scaf, k\n\t\tfor x in common_scaf:\n\t\t\t#print \"This is x\", x\n\t\t\tupos = []\n\t\t\tfor gene in upstream:\n\t\t\t\t#print \"This is gene\", gene\n\t\t\t\tif gene[1] == x:\n\t\t\t\t\t#print \"This is gene[1]\", gene[1]\n\t\t\t\t\t#print \"This is common_scaf\", common_scaf\n\t\t\t\t\t#upos.append(gene[0])\n\t\t\t\t\tupos.append(gene[2])\n\t\t\t\t\tupos.append(gene[3])\n\t\t\t\t\t#print upos\n\t\t\tdpos = []\n\t\t\tfor gene in downstream:\n\t\t\t\tif gene[1] == x:\n\t\t\t\t\t#dpos.append(gene[0])\n\t\t\t\t\tdpos.append(gene[2])\n\t\t\t\t\tdpos.append(gene[3])\n\t\t#print \"This is upos\", upos\n\t\t#print \"This is upos[1:]\", upos[1:]\n\t\t#print \"This is dpos\", dpos\n\t\t#ex upos : ['3815439', '3822866', '3808823', '3809996']\n\t\t#ex dbos : ['3823313', '3826021', '3826740', '3828621', '3829156', '3829994', '3831313', '3855168']\n\t\t#quit()\n\t\t#final_coord_dict[k] = [x, max(upos), min(dpos)]\n\t\t#print type(upos[1]), its a string, want int!\n\t\tupos_num = [int(n) for n in upos]\n\t\t#print \"This is upos\", upos\n\t\t#print \"this is upos_num\", upos_num\n\t\tdpos_num = [int(n) for n in dpos]\n\t\tmerged_pos = upos_num + dpos_num\n\t\tprint \"merged\", merged_pos\n\t\tprint min(merged_pos)\n\t\tfinal_coord_dict[k] = [x, min(merged_pos), max(merged_pos)]\n\t\tprint k, final_coord_dict[k]\n\t\t#avg_upos = sum(upos_num)/len(upos_num)\n\t\t#avg_dpos = sum(dpos_num)/len(dpos_num)\n\t\t#print k\n\t\t#print upos, dpos\n\t\t#print max(upos_num), min(dpos_num), min(upos_num), max(upos_num)\n\t\t#if max(upos_num) < min(dpos_num):\n\t\t#\tfinal_coord_dict[k] = [x, min(upos_num), max(dpos_num)]\n\t\t#elif min(upos_num) > max(upos_num):\n\t\t#\tfinal_coord_dict[k] = [x, min(dpos_num), max(upos_num)]\n\t\t#print k, x, max(upos), min(dpos)\n\treturn final_coord_dict\n\t\t#'FBtr0342867': ['scaffold_0', '3442611', '3447776'], 'FBtr0342862': ['scaffold_0', '3442611', '3447776']", "title": "" }, { "docid": "4b9e5d948f21a34ea19c868e78bcbf7e", "score": "0.5968028", "text": "def test_orfs(self):\n # ref_dic is a large external file\n ref_dict=fasta2dic(\"/data/reference/ce10_ucsc.fa\")\n bigg_one=self.bigg[30]\n bigg_one.bind_chroseq(ref_dict, gap=0, intron=False)\n print((bigg_one.seq_chro))\n ans=bigg_one.find_orfs_with_trans()\n\n print(ans)\n print(bigg_one)", "title": "" }, { "docid": "4fc4df950dee893fbd5f9e891a87b0d0", "score": "0.5848065", "text": "def map_system():\n list_of_orbits = {}\n data = open(\"day06.txt\", 'r')\n\n for pair in data:\n center, orbiting_body = pair.rstrip().split(\")\")\n\n # for the \"parent\" body create a list of objects that orbit it\n for_body = list_of_orbits.get(center)\n if for_body:\n for_body.append(orbiting_body)\n else:\n list_of_orbits[center] = [orbiting_body]\n\n data.close()\n\n return list_of_orbits", "title": "" }, { "docid": "d954395929ba63b6011562cd08994e1c", "score": "0.5660125", "text": "def read_human_orf(human_ref_with_seq, human91_ref):\n ref_df_91 = pd.read_csv(human91_ref)\n ref_df_ensembl = pd.read_csv(human_ref_with_seq)\n ref_df_91 = ref_df_91.fillna(-1)\n\n # merge this two df together\n # check if there are NAs in entrez gene ID and entrez gene symbol\n merged_df = pd.merge(ref_df_91, ref_df_ensembl, left_on=[\"entrez_gene_id\", \"entrez_gene_symbol\"],\n right_on=[\"entrez_gene_id\", \"symbol\"], how=\"left\")\n merged_df[\"grch37_filled\"] = merged_df[\"cds_seq37\"].fillna(merged_df[\"cds_seq\"])\n merged_df[\"grch38_filled\"] = merged_df[\"cds_seq38\"].fillna(merged_df[\"cds_seq\"])\n\n merged_df[\"entrez_gene_id\"] = merged_df[\"entrez_gene_id\"].astype(int)\n merged_df['orf_name'] = merged_df['orf_id'].astype(str) + \"_\" + merged_df['entrez_gene_id'].astype(str) + \"_G0\" + merged_df['Pool group #'].astype(str) + \"_\" + merged_df['entrez_gene_symbol'].astype(str)\n\n # humanallORF = pd.read_csv(human_ref)\n # humanallORF = humanallORF[[\"ORFID\", \"ensembl_transcript_id\", \"ensembl_protein_id\", \"ensembl_gene_id\", \"uniprot_AC_iso\", \"symbol\", \"entrez_gene_id\", \"CDS\"]]\n #\n # humanallORF[\"entrez_gene_id\"] = humanallORF[\"entrez_gene_id\"].astype(int)\n # humanallORF['orf_name'] = humanallORF['entrez_gene_id'].astype(str) + \"_\" + humanallORF['entrez_gene_symbol'].astype(str)\n output_file = \"/home/rothlab/rli/02_dev/06_pps_pipeline/target_orfs/human_summary.csv\"\n merged_df.to_csv(output_file)\n return merged_df", "title": "" }, { "docid": "c684ec9da864196d980b7a3621c9164e", "score": "0.5639827", "text": "def mapobs():\n\t\tpass", "title": "" }, { "docid": "dd10dc282c69639b29ef1c17a5300827", "score": "0.56163377", "text": "def species_vs_loci(self, outfile_name):\n \n species_vs_loci = {}\n for record in self.records:\n organism = 'undef'\n for feature in record.features:\n if feature.type == 'source':\n if 'organism' in feature.qualifiers.keys():\n organism = feature.qualifiers['organism'][0]\n if not organism in species_vs_loci.keys():\n species_vs_loci[organism] = {} \n for feature in record.features:\n if not feature.type == 'source':\n for locus in self.loci:\n if not locus.name in locus.aliases:\n locus.aliases.append(locus.name)\n if 'gene' in feature.qualifiers.keys():\n if feature.qualifiers['gene'][0] in locus.aliases:\n if not locus.name in species_vs_loci[organism].keys():\n species_vs_loci[organism][locus.name] = 1\n else:\n species_vs_loci[organism][locus.name] += 1\n elif 'product' in feature.qualifiers.keys():\n if feature.qualifiers['product'][0] in locus.aliases:\n if not locus.name in species_vs_loci[organism].keys():\n species_vs_loci[organism][locus.name] = 1\n else:\n species_vs_loci[organism][locus.name] += 1\n with open(outfile_name, 'wb') as csvfile:\n linewriter = csv.writer(csvfile, delimiter='\\t',\n quotechar='|',\n quoting=csv.QUOTE_MINIMAL)\n loci_names = []\n for g in self.loci:\n loci_names.append(g.name)\n linewriter.writerow(['species']+loci_names)\n for organism in sorted(list(species_vs_loci.keys())):\n line = [organism]\n for name in loci_names:\n if name in species_vs_loci[organism].keys():\n line.append(str(species_vs_loci[organism][name]))\n else:\n line.append('0')\n linewriter.writerow(line)", "title": "" }, { "docid": "81038c061cb6876569dfde007457080e", "score": "0.5443136", "text": "def read_yeast_orf(HIP_target_ORFs, other_target_ORFs):\n HIP_df = pd.read_csv(HIP_target_ORFs)\n other_target_ORFs = pd.read_csv(other_target_ORFs)\n\n HIP_df = HIP_df[[\"ORF_id\", \"ORF_NAME_NODASH\", \"len(seq)\", \"SYMBOL\", \"plate\"]]\n HIP_df[\"db\"] = \"HIP\"\n HIP_df = HIP_df.rename(columns={\"ORF_id\": \"orf_name\"})\n other_ORFs = other_target_ORFs[[\"orf_name\", \"ORF_NAME_NODASH\", \"src_collection\", \"plate\"]]\n other_ORFs = other_ORFs.rename(columns={\"src_collection\": \"db\"})\n #other_ORFs['plate'] = 'scORFeome-' + other_ORFs['plate'].astype(str)\n combined = pd.concat([HIP_df, other_ORFs], axis=0, ignore_index=True)\n \n output_file = \"/home/rothlab/rli/02_dev/06_pps_pipeline/target_orfs/yeast_summary.csv\"\n combined.to_csv(output_file)\n return combined", "title": "" }, { "docid": "23b96d3796c068471b20bf4ada18e313", "score": "0.5400288", "text": "def make_grid_es_rhos(self, Fis, FisL):\n\n self.es_plus_1 = []\n self.rhos_plus_1 = []\n\n self.es_plus_1.append(0)\n self.rhos_plus_1.append(0)\n for component in self.components:\n self.es_plus_1[-1] = self.es_plus_1[-1] + Fis[component.name][0] * component.e\n self.rhos_plus_1[-1] = self.rhos_plus_1[-1] + Fis[component.name][0] * component.rho\n\n for j in range(len(self.ptotbio) - 1):\n for i in range(self.ptotbio[j]+1, self.ptotbio[j + 1] + 1):\n self.es_plus_1.append(0)\n self.rhos_plus_1.append(0)\n for component in self.components:\n self.es_plus_1[-1] = self.es_plus_1[-1] + Fis[component.name][i] * component.e\n self.rhos_plus_1[-1] = self.rhos_plus_1[-1] + Fis[component.name][i] * component.rho\n\n self.esL_plus_1 = [e for e in self.es_plus_1]\n self.rhosL_plus_1 = [rho for rho in self.rhos_plus_1]\n\n for j in range(len(self.ptotbio) - 1):\n self.esL_plus_1[self.ptotbio[j]] = 0\n self.rhosL_plus_1[self.ptotbio[j]] = 0\n for component in self.components:\n self.esL_plus_1[self.ptotbio[j]] = self.esL_plus_1[self.ptotbio[j]] + FisL[component.name][self.ptotbio[j]] * component.e\n self.rhosL_plus_1[self.ptotbio[j]] = self.rhosL_plus_1[self.ptotbio[j]] + FisL[component.name][self.ptotbio[j]] * component.rho", "title": "" }, { "docid": "e95e2b8e853bdbb765a12c6a83aaf752", "score": "0.5377103", "text": "def render_seg_ord_map(H, W, ori_char_bb_list, ori_label, num_lexicon, label_len, sigma,\n shrunk_ratio=0, ord_map_mode='loc'):\n\n char_bb_list = ori_char_bb_list\n label = ori_label\n\n num_char = len(char_bb_list)\n\n seg_map = np.zeros((H, W))\n ord_map = np.zeros((H, W))\n bb_pos_x = np.zeros((num_char, 4))\n bb_pos_y = np.zeros((num_char, 4))\n diff_bb_pos_x = np.zeros((num_char, 4))\n diff_bb_pos_y = np.zeros((num_char, 4))\n label = np.array(label)\n\n\n center_pos_x, center_pos_y = np.where(seg_map > -1)\n\n for i, char_bb in enumerate(char_bb_list):\n bb_pos_x[i] = char_bb.transpose([1, 0])[:, 1]\n bb_pos_y[i] = char_bb.transpose([1, 0])[:, 0]\n\n center_pos_x = np.expand_dims(center_pos_x, 1)\n center_pos_y = np.expand_dims(center_pos_y, 1)\n\n center_pos_x_ = np.expand_dims(center_pos_x, 2) # (H*W, 1, 1)\n center_pos_y_ = np.expand_dims(center_pos_y, 2)\n\n for i in range(4):\n diff_bb_pos_x[:, i] = bb_pos_x[:, (i+1)%4] - bb_pos_x[:, i]\n diff_bb_pos_y[:, i] = bb_pos_y[:, (i+1)%4] - bb_pos_y[:, i]\n\n bb_center_x = bb_pos_x.mean(axis=-1) # (num_char,)\n bb_center_y = bb_pos_y.mean(axis=-1)\n\n temp_x = np.expand_dims(bb_center_x, 1)\n temp_y = np.expand_dims(bb_center_y, 1)\n\n bb_pos_x = temp_x + (bb_pos_x - temp_x) * (1 - shrunk_ratio)\n bb_pos_y = temp_y + (bb_pos_y - temp_y) * (1 - shrunk_ratio)\n\n bb_pos_x = np.expand_dims(bb_pos_x, 0) # (1, num_char, 4)\n bb_pos_y = np.expand_dims(bb_pos_y, 0)\n\n diff_bb_pos_x = np.expand_dims(diff_bb_pos_x, 0) # (1, num_char, 4)\n diff_bb_pos_y = np.expand_dims(diff_bb_pos_y, 0)\n\n bb_center_x = np.expand_dims(bb_center_x, 0)\n bb_center_y = np.expand_dims(bb_center_y, 0)\n\n temp_x = (center_pos_x_ - bb_pos_x)\n temp_y = (center_pos_y_ - bb_pos_y)\n\n # (H*W, num_char, 4)\n cross_prods = temp_x * diff_bb_pos_y - temp_y * diff_bb_pos_x\n idxes, label_idxes = np.where((cross_prods > 0).sum(axis=-1) == 4)\n idx_r, idx_c = idxes // W, idxes % W\n seg_map[idx_r, idx_c] = label[label_idxes]\n\n ord_dis = (center_pos_x - bb_center_x) ** 2 + (center_pos_y - bb_center_y) ** 2\n ord_dis = np.exp(- ord_dis / (2 * sigma ** 2))\n\n ord_dis = ord_dis.reshape((H, W, num_char))\n ord_dis = np.pad(ord_dis, ((0, 0), (0, 0), (0, label_len - num_char)), 'constant', constant_values = 0)\n ord_dis = np.transpose(ord_dis, [2, 0, 1]) # (label_len, H, W)\n\n\n if ord_map_mode == 'seg':\n ord_map[idx_r, idx_c] = label_idxes + 1\n else:\n temp_z, temp_x, temp_y = np.where(ord_dis > 0.2)\n ord_map[temp_x, temp_y] = temp_z + 1\n\n bb_center_x = np.expand_dims(bb_center_x[0], 1) / H * 2 - 1\n bb_center_y = np.expand_dims(bb_center_y[0], 1) / W * 2 - 1\n\n\n bb_cen_pos = np.concatenate([bb_center_x, bb_center_y], axis=1) # (num_char, 2)\n bb_cen_pos = np.pad(bb_cen_pos, ((0, label_len - num_char), (0, 0)),\n 'constant',\n constant_values = 0)\n\n\n out = {}\n out['seg_map'] = seg_map\n out['ord_map'] = ord_map\n out['loc_map'] = ord_dis.max(0)\n out['cen_pos'] = bb_cen_pos\n\n return out", "title": "" }, { "docid": "faaae085fbd5d70f5b838f42907cf9b4", "score": "0.5306171", "text": "def map_orbitees( pairs ):\n return dict( [ (orbiter, orbitee) for orbitee, orbiter in map( lambda x: x.split(')'), pairs ) ] )", "title": "" }, { "docid": "560a250b54aceb4ae93e5b837b4b2296", "score": "0.529674", "text": "def readGeodstCoordMap2D(self):\n\n record = self.getRecord()\n # Number of unique hex assemblies - this is N in the comments above\n nAssem = self.fc[\"nintxy\"]\n # Number of lateral surfaces per hex assembly (always 6)\n nSurf = self.fc[\"nSurf\"]\n\n numExternalSurfaces = self._getNumExtSurfaces()\n\n # Initialize numpy arrays to store all hex ordering (and hex surface ordering) data.\n # We don't actually use incomingPointersToAllAssemblies (basically equivalent to nearest neighbors indices),\n # but it's here in case someone needs it in the future.\n\n # Index pointers to INCOMING partial currents to this assembly\n self.incomingPointersToAllAssemblies = numpy.zeros((nAssem * nSurf), dtype=int)\n # Index pointers to INCOMING partial currents on core outer boundary\n self.externalCurrentPointers = numpy.zeros((numExternalSurfaces), dtype=int)\n # Index pointers to DIF3D GEODST ordering of each assembly\n self.geodstCoordMap = numpy.zeros((nAssem), dtype=int)\n\n # surfaceIndex = numpy.zeros((nAssem*nSurf))\n # nodeIndex = numpy.zeros((nAssem*nSurf))\n\n # Loop through all surfaces of all assemblies in the x-y plane.\n for i in range(nAssem):\n for j in range(nSurf):\n self.incomingPointersToAllAssemblies[nSurf * i + j] = record.getInt()\n # surfaceIndex[nSurf*i + j] = math.fmod(nSurf*i+j,nSurf) + 1\n # nodeIndex[nSurf*i + j] = (nSurf*i+j)/nSurf + 1\n\n # Loop through all external surfaces on the outer core boundary (usually vacuum).\n for i in range(numExternalSurfaces):\n self.externalCurrentPointers[i] = record.getInt()\n\n # Loop through all assemblies.\n for i in range(nAssem):\n self.geodstCoordMap[i] = record.getInt()", "title": "" }, { "docid": "4b580d9fc27e4b0eee20d1e2df5d1c63", "score": "0.52674496", "text": "def _compute_adjusted_mappings(self):\n S = self.S\n L = self.L\n try:\n d = S.rank\n except AttributeError:\n print(\"Species tree not initialized; Assign ranks before computing mappings.\")\n raise\n\n try:\n L.locus_id\n except AttributeError:\n print(\"Locus tree not initialized; Assign locus IDs before computing mappings.\")\n raise\n\n\n # I mapping\n pureM = dict((g.nid, S) for g in L.traverse(strategy=\"postorder\")) # I from single loci\n combinedM = pureM.copy() # I at junction nodes from neighbouring loci\n smap = dict((g, S) for g in L)\n for g in L.traverse(strategy=\"postorder\"):\n if g.is_leaf():\n g_species = g.species\n pureM[g] = g_species\n combinedM[g] = g_species\n smap[g] = g_species\n g.I = 0\n g.P = 0\n else:\n g.P = None # init\n # computing pureM\n same_locus = [c for c in g.children if c.locus_id == g.locus_id]\n same_pureM = [pureM[c] for c in same_locus if pureM[c] is not None]\n if not same_locus or not same_pureM:\n pureM[g] = None\n warn(\"Detected an extinct lineage (node id %i); This may indicate corrupted data.\" % g.nid)\n print(g.get_ascii(attributes=['name', 'nid']))\n elif len(same_pureM) == 1:\n pureM[g] = same_pureM[0]\n else:\n pureM[g] = S.get_common_ancestor(same_pureM)\n # computing combinedM and I mapping\n all_pureM = [pureM[c] for c in g.children if pureM[c] is not None]\n if not all_pureM:\n combinedM[g] = None\n g.I = None\n elif len(all_pureM) == 1:\n combinedM[g] = all_pureM[0]\n g.I = combinedM[g].rank\n else:\n combinedM[g] = S.get_common_ancestor(all_pureM)\n g.I = combinedM[g].rank\n\n # P mapping\n for s in S.traverse():\n s.lastvisited = None\n\n leaves = [l for l in L]\n for i in range(0, d + 1):\n for lid, l1 in enumerate(leaves):\n if smap[l1].rank == i:\n for l2 in leaves[(lid + 1):]:\n if smap[l2] == smap[l1]:\n p = l1.get_common_ancestor(l2)\n locus_set = [p.locus_id] + [c.locus_id for c in p.children]\n if p.P is None and {l1.locus_id, l2.locus_id}.issubset(locus_set):\n p.P = i\n smap[l1] = smap[l1].up", "title": "" }, { "docid": "5ed09c68708f71e7105fca3a5e958df5", "score": "0.52112347", "text": "def define_hydroyears(outmaps, climFile, startYear, endYear, metadata, logger):\r\n # initialize general settings\r\n # prepare climatology netCDF files\r\n climatology_datetimeObj = []\r\n climatology_datetimeObj_upper = []\r\n climatology_datetimeObj_lower = []\r\n \r\n for n in np.arange(0,12):\r\n climatology_datetimeObj_lower.append(datetime.datetime(startYear,n+1,1,0,0))\r\n if n == 11:\r\n climatology_datetimeObj_upper.append(datetime.datetime(endYear+1,1,1,0,0))\r\n else:\r\n climatology_datetimeObj_upper.append(datetime.datetime(endYear,n+2,1,0,0))\r\n \r\n climatology_datetimeObj.append(datetime.datetime((endYear-startYear)/2+startYear,n+1,16,0,0))\r\n\r\n # first derive a climatology\r\n climatology, parUnit , dummy_ncfile = max_climatology(outmaps, 'qc', startYear, endYear, logger) # , logger\r\n # write climatology ref to file\r\n logger.info('writing climatology to \"' + climFile + '\"')\r\n nc_src = nc.Dataset(dummy_ncfile, 'r')\r\n try:\r\n nc_trg = nc.Dataset(climFile, 'a')\r\n except:\r\n prepare_nc_clim(nc_src, climFile, climatology_datetimeObj, climatology_datetimeObj_upper, climatology_datetimeObj_lower, metadata, logger)\r\n nc_trg = nc.Dataset(climFile, 'a')\r\n nc_trg.createVariable('qc','f4',('time','lat','lon',),zlib=True, fill_value=-9999.)\r\n\r\n nc_trg.variables['qc'][:] = climatology\r\n\r\n # close files\r\n nc_src.close()\r\n nc_trg.sync()\r\n nc_trg.close()", "title": "" }, { "docid": "97fe1bb6a4386464bc512b3aa9b1ec2e", "score": "0.5209274", "text": "def find_all_ORFs(dna):\n #print dna\n half_the_codons = [] #sets open list for half the codons since we are not using the reverse string of DNA\n for i in range(0,3): # i = 0, i = 1, i = 2\n half_the_codons += find_all_ORFs_oneframe(dna[i:]) #uses offsets to find all the codons for one strand of DNA\n \n# print half_the_codons\n return half_the_codons\n \n \n # YOUR IMPLEMENTATION HERE", "title": "" }, { "docid": "4cd255844f2f04a91a16f703342d7da5", "score": "0.51885563", "text": "def generate_map(self,cutoff,modelfile):\n vor_instance = Vor()\n vor_instance.runall(modelfile,cutoff)\n index = vor_instance.get_index()\n\n icofrac = []\n index = [index[i].strip().split() for i in range(0,len(index))]\n for i in range(0,len(index)):\n for j in range(0,len(index[i])):\n try:\n index[i][j] = int(index[i][j])\n except:\n try:\n index[i][j] = float(index[i][j])\n except:\n pass\n index[i] = index[i][6:11]\n icofrac.append(int( float(index[i][2])/float(sum(index[i]))*100 ))\n\n model = Model(modelfile)\n atoms = model.get_atoms()\n \n atoms = [atom.set_znum(icofrac[i]) for i,atom in enumerate(atoms)]\n #for atom in atoms:\n # if atom.get_znum() == 0:\n # atom.set_znum(1)\n nbins = 6\n del_bin = 100.0/nbins\n for atom in atoms:\n for i in range(1,nbins+1):\n if( atom.z <= i*del_bin):\n atom.z = i\n break\n\n atoms = [atom.convert_to_sym() for i,atom in enumerate(atoms)]\n\n print(model.natoms)\n print('{0} {1} {2}'.format(model.lx,model.ly,model.lz))\n for atom in atoms:\n print atom.vesta()", "title": "" }, { "docid": "34940aeb55b7bb244ad3f2a5de39fa32", "score": "0.5187263", "text": "def generateCoreMap(self):", "title": "" }, { "docid": "70a123bc07a11ce7b0f33f5a8f6274ea", "score": "0.51714396", "text": "def convert_coor_topo(self, records):\n\t\tfor index, record in enumerate(records):\n\t\t\tfor coor_index, coor in enumerate(record.get(\"fields\").get(\"parcours\").get(\"coordinates\")):\n\t\t\t\trecords[index][\"fields\"][\"parcours\"][\"coordinates\"][coor_index] = [coor[1], coor[0]]\n\n\t\treturn records", "title": "" }, { "docid": "51692fb078edff3dd22867ca2d532689", "score": "0.51693887", "text": "def find_all_ORFs(dna):\n\n\n return find_all_ORFs_oneframe_offset(dna, 0) + find_all_ORFs_oneframe_offset(dna,1) + find_all_ORFs_oneframe_offset(dna,2)", "title": "" }, { "docid": "ad6ef048e55b4577132aa1cdbc04e5e3", "score": "0.5164164", "text": "def main():\n parser = argparse.ArgumentParser()\n parser.add_argument(\n \"--input-gtf\",\n \"-i\",\n dest=\"input_gtf\",\n default=None,\n required=True,\n help=\"input GTF\",\n )\n parser.add_argument(\n \"--output-gtf\", \"-o\", dest=\"output_gtf\", default=None, help=\"output GTF\"\n )\n args = parser.parse_args()\n\n intron_cands = {}\n exons = {}\n exon_ids = {}\n gene_locs = {}\n gene_locations = {}\n # gather the location of each genes and exons; and exon_ids to avoid duplication\n with gzip.open(args.input_gtf, \"rt\") if args.input_gtf.endswith(\".gz\") else open(\n args.input_gtf, \"r\"\n ) as input_file:\n for line in input_file:\n if not line.startswith(\"#\"):\n fields = [x.strip() for x in line.strip().split(\"\\t\")]\n if fields[2] == \"exon\":\n gene_id = get_feature(line.strip(), \"gene_id\")\n exon_id = get_feature(line.strip(), \"exon_id\")\n contig_id = fields[0]\n locpair = (int(fields[3]), int(fields[4]))\n if contig_id not in exons:\n exons[contig_id] = []\n if exon_id not in exon_ids:\n exons[contig_id].append(locpair)\n exon_ids[exon_id] = True\n elif fields[2] == \"gene\":\n gene_id = get_feature(line.strip(), \"gene_id\")\n contig_id = fields[0]\n locpair = (int(fields[3]), int(fields[4]), gene_id)\n if gene_id is not None:\n if contig_id not in gene_locs:\n gene_locs[contig_id] = []\n gene_locs[contig_id].append(locpair)\n\n gene_locations[gene_id] = locpair\n\n # sorted the gene locs by start\n for contig_id in gene_locs:\n gene_locs[contig_id].sort(key=lambda x: x[0], reverse=False)\n # print(contig_id, len(gene_locs[contig_id]), gene_locs[contig_id][:3], gene_locs[contig_id][-3:])\n\n # keep sort the exons by start by contig\n for contig_id in exons:\n exons[contig_id].sort(key=lambda x: x[0], reverse=False)\n\n # compute the intron candidates for each contig\n # where any bp that is not an exon is an candidate intron whithout\n # worrying about the inclusiveness of that base pair within the range\n # of a gene\n for contig_id in exons:\n intron_cands[contig_id] = []\n last_exon_end = 0\n for exon_coor in exons[contig_id]:\n if exon_coor[0] > last_exon_end:\n pair = (last_exon_end, exon_coor[0])\n intron_cands[contig_id].append(pair)\n\n last_exon_end = max(last_exon_end, exon_coor[1])\n\n # add the remaining last\n pair = (last_exon_end, 30000000000)\n intron_cands[contig_id].append(pair)\n\n # global ordered (ascending) array of intronic start or end points\n introns = {}\n for contig_id in gene_locs:\n\n introns[contig_id] = []\n intronic_points = []\n for coor in intron_cands[contig_id]:\n intronic_points.append(coor[0])\n intronic_points.append(coor[1])\n\n for gene_loc in gene_locs[contig_id]:\n i = bisect_right(intronic_points, gene_loc[0], 0, len(intronic_points))\n j = bisect_left(intronic_points, gene_loc[1], 0, len(intronic_points))\n\n if i % 2 == 1: # it is a start location on i\n intron_start = gene_loc[0]\n intron_end = intronic_points[i]\n # introns[contig_id].append( (intron_start, intron_end, gene_loc[2]) )\n\n for k in range(i, j, 2):\n introns[contig_id].append(\n (intronic_points[k], intronic_points[k + 1], gene_loc[2])\n )\n\n if j % 2 == 1:\n intron_start = intronic_points[j]\n intron_end = gene_loc[1]\n introns[contig_id].append((intron_start, intron_end, gene_loc[2]))\n\n genewise_introns = {}\n for contig_id in introns:\n genewise_introns[contig_id] = {}\n for intron in introns[contig_id]:\n if intron[2] not in genewise_introns[contig_id]:\n genewise_introns[contig_id][intron[2]] = []\n genewise_introns[contig_id][intron[2]].append((intron[0], intron[1]))\n\n # print(contig_id, len(introns[contig_id]), introns[contig_id][:5])\n intron_no = 1\n with gzip.open(args.input_gtf, \"rt\") if args.input_gtf.endswith(\".gz\") else open(\n args.input_gtf, \"r\"\n ) as input_file:\n with gzip.open(args.output_gtf, \"wb\") if args.output_gtf.endswith(\n \".gz\"\n ) else open(args.output_gtf, \"w\") as output_gtf:\n\n for line in input_file:\n if line.startswith(\"#\"):\n if args.output_gtf.endswith(\".gz\"):\n output_gtf.write(\"{}\".format(line.strip() + \"\\n\").encode())\n else:\n output_gtf.write(line.strip() + \"\\n\")\n else:\n fields = [x.strip() for x in line.strip().split(\"\\t\")]\n if fields[2] == \"exon\":\n if args.output_gtf.endswith(\".gz\"):\n output_gtf.write(\"{}\".format(line.strip() + \"\\n\").encode())\n else:\n output_gtf.write(line.strip() + \"\\n\")\n\n elif fields[2] == \"gene\":\n if args.output_gtf.endswith(\".gz\"):\n output_gtf.write(\"{}\".format(line.strip() + \"\\n\").encode())\n else:\n output_gtf.write(line.strip() + \"\\n\")\n\n gene_id = get_feature(line.strip(), \"gene_id\")\n contig_id = fields[0]\n if gene_id in genewise_introns[contig_id]:\n for intron in genewise_introns[contig_id][gene_id]:\n mod_fields = fields.copy()\n mod_fields[2] = \"intron\"\n mod_fields[3] = str(intron[0])\n mod_fields[4] = str(intron[1])\n mod_fields[8] = mod_fields[\n 8\n ] + ' intron_id \"{}\"'.format(str(intron_no))\n intron_no += 1\n if args.output_gtf.endswith(\".gz\"):\n output_gtf.write(\n \"{}\".format(\n \"\\t\".join(mod_fields) + \"\\n\"\n ).encode()\n )\n else:\n output_gtf.write(\"\\t\".join(mod_fields) + \"\\n\")\n else:\n if args.output_gtf.endswith(\".gz\"):\n output_gtf.write(\"{}\".format(line.strip() + \"\\n\").encode())\n else:\n output_gtf.write(line.strip() + \"\\n\")", "title": "" }, { "docid": "7a900881e55ee6c1b495f143f596fe8d", "score": "0.51512045", "text": "def find_orfs(RNA):\r\n\r\n\t# rather complicated regex to find orfs. \r\n\t# The \"?=\" makes sure we find also overlapping orfs\r\n\t# findall returns a list with all matches\r\n\tmatches = re.findall('(?=(AUG([ACGU]{3})*?(UAG|UAA|UGA)))',RNA)\r\n\t\r\n\t# create an empty list to store the found orfs\r\n\torfs = []\r\n\tfor match in matches:\r\n\t\t# each match is a list with full and partial matches \r\n\t\t# the full match is the first element\r\n\t\torfs.append(match[0])\r\n\treturn orfs", "title": "" }, { "docid": "1db8c5520a166344c73ace146d9cfec5", "score": "0.5125914", "text": "def ClusterOrthologsByGenes(orthologs):\n\n map_gene2cluster = {}\n clusters = []\n\n for t1, t2, g1, g2, w in orthologs:\n\n # get previous clusters\n pos = {}\n for g in g1.keys():\n k = \"a%s\" % g\n if k in map_gene2cluster:\n pos[map_gene2cluster[k]] = 1\n for g in g2.keys():\n k = \"b%s\" % g\n if k in map_gene2cluster:\n pos[map_gene2cluster[k]] = 1\n\n tt1 = t1\n tt2 = t2\n ww = [w]\n # add previous clusters to this cluster\n # and clear previous clusters\n for p in pos:\n tt1 += clusters[p][0]\n tt2 += clusters[p][1]\n ww += clusters[p][2]\n clusters[p] = None\n\n # map previous clusters to new cluster\n n = len(clusters)\n for g in GetGenes(tt1):\n map_gene2cluster[\"a%s\" % g] = n\n for g in GetGenes(tt2):\n map_gene2cluster[\"b%s\" % g] = n\n\n # append to clusters\n clusters.append([tt1, tt2, ww])\n\n orthologs = []\n for c in clusters:\n if c:\n orthologs.append(\n (c[0], c[1], GetGenes(c[0]), GetGenes(c[1]), c[2]))\n\n return orthologs", "title": "" }, { "docid": "057bc801db87e0af988786c5f7dc7b59", "score": "0.51087564", "text": "def computeORF(psr):\n\n # begin loop over all pulsar pairs and calculate ORF\n k = 0\n npsr = len(psr)\n ORF = np.zeros(npsr*(npsr-1) // 2)\n phati = np.zeros(3)\n phatj = np.zeros(3)\n for ll in range(0, npsr):\n phati[0] = np.cos(psr[ll].phi) * np.sin(psr[ll].theta)\n phati[1] = np.sin(psr[ll].phi) * np.sin(psr[ll].theta)\n phati[2] = np.cos(psr[ll].theta)\n\n for kk in range(ll+1, npsr):\n phatj[0] = np.cos(psr[kk].phi) * np.sin(psr[kk].theta)\n phatj[1] = np.sin(psr[kk].phi) * np.sin(psr[kk].theta)\n phatj[2] = np.cos(psr[kk].theta)\n\n xip = (1.-np.sum(phati*phatj)) / 2.\n ORF[k] = 3.*( 1./3. + xip * ( np.log(xip) -1./6.) )\n k += 1\n\n return ORF", "title": "" }, { "docid": "5d1c85220bf459e6a833e6fa4370c605", "score": "0.5093066", "text": "def get_fasta_orfs(data):\n\n fasta_orfs = {}\n\n for d in data:\n\n _id = d.id\n\n fasta_orfs[_id] = get_orf_complete(d.seq)\n\n return fasta_orfs", "title": "" }, { "docid": "9580349d1bebacb09b524c400d8a1394", "score": "0.5077131", "text": "def omapper(self):\n items = [((d, CENTER), (d, LEFT), (d, RIGHT)) for d in self.dimensions]\n\n processed = []\n for item in product(*items):\n where = []\n mapper = {}\n for d, s in item:\n osl, osr = self.owned_size[d]\n\n # Handle SubDomain/SubDimensions to-honor offsets\n nl = Max(0, *[i for i, _ in self.honored.get(d, [])])\n nr = Max(0, *[i for _, i in self.honored.get(d, [])])\n\n if s is CENTER:\n where.append((d, CORE, s))\n mapper[d] = (d.symbolic_min + osl,\n d.symbolic_max - osr)\n if nl != 0:\n mapper[nl] = (Max(nl - osl, 0),)\n if nr != 0:\n mapper[nr] = (Max(nr - osr, 0),)\n else:\n where.append((d, OWNED, s))\n if s is LEFT:\n mapper[d] = (d.symbolic_min,\n Min(d.symbolic_min + osl - 1, d.symbolic_max - nr))\n if nl != 0:\n mapper[nl] = (nl,)\n mapper[nr] = (0,)\n else:\n mapper[d] = (Max(d.symbolic_max - osr + 1, d.symbolic_min + nl),\n d.symbolic_max)\n if nr != 0:\n mapper[nl] = (0,)\n mapper[nr] = (nr,)\n processed.append((tuple(where), frozendict(mapper)))\n\n _, core = processed.pop(0)\n owned = processed\n\n return OMapper(core, owned)", "title": "" }, { "docid": "26d600c685be399d934ca5feb41c61bb", "score": "0.50467575", "text": "def read_scer_orthologs(dirs_list,species_list,summaryFile):\n\n\tscer_orthos = defaultdict(dict)\n\tscer_ortho_species =[]\n\tfor dir,species in zip(dirs_list,species_list):\n\t\tfor spec in species:\n\t\t\tscer_ortho_species.append(spec)\n\t\t\tblastFile = os.path.join(dir, 'Scer-' + spec + '-blast_stats_coverageCutoff.best.txt')\n\t\t\twith open(blastFile,\"r\") as f:\n\t\t\t\tfor line in f:\n\t\t\t\t\tscer_orthos[line.split()[1]].setdefault(spec,[]).append((line.split()[0],float(line.split()[2])))\n\n\tfor scer_orf in scer_orthos:\n\t\tfor orth_species in scer_orthos[scer_orf]:\n\t\t\t(scer_orthos[scer_orf][orth_species]).sort(key=lambda x: float(x[1]))\n\t\t\tscer_orthos[scer_orf][orth_species]= scer_orthos[scer_orf][orth_species][0][0]\n\n\n\tprint_ortho_summary_file(summaryFile,scer_ortho_species,scer_orthos,print_sum=False)\n\n\treturn scer_orthos", "title": "" }, { "docid": "3a8436741d1122aed7fe7636c130b7c2", "score": "0.50413215", "text": "def test_map_ho2_elimination_from_peroxy_radical(self):\n r_xyz = \"\"\"N -0.82151000 -0.98211000 -0.58727000\n C -0.60348000 0.16392000 0.30629000\n C 0.85739000 0.41515000 0.58956000\n C 1.91892000 -0.27446000 0.14220000\n O -1.16415000 1.38916000 -0.20784000\n O -2.39497344 1.57487672 0.46214548\n H -0.50088000 -0.69919000 -1.51181000\n H -1.83926000 -1.03148000 -0.69340000\n H -1.09049000 -0.04790000 1.26633000\n H 1.04975000 1.25531000 1.25575000\n H 2.92700000 0.00462000 0.43370000\n H 1.81273000 -1.13911000 -0.50660000\"\"\" # NC(C=C)O[O]\n p_1_xyz = \"\"\"N 1.16378795 1.46842703 -0.82620909\n C 0.75492192 0.42940001 -0.18269967\n C -0.66835457 0.05917401 -0.13490822\n C -1.06020680 -1.02517494 0.54162130\n H 2.18280085 1.55132949 -0.73741996\n H 1.46479392 -0.22062618 0.35707573\n H -1.36374229 0.69906451 -0.66578157\n H -2.11095970 -1.29660899 0.57562763\n H -0.36304116 -1.66498540 1.07269317\"\"\" # N=CC=C\n rxn_1 = ARCReaction(r_species=[ARCSpecies(label='R', smiles='NC(C=C)O[O]', xyz=r_xyz)],\n p_species=[ARCSpecies(label='P1', smiles='N=CC=C', xyz=p_1_xyz),\n ARCSpecies(label='HO2', smiles='O[O]', xyz=self.ho2_xyz)])\n atom_map = rxn_1.atom_map\n self.assertIn(atom_map[:6], [[0, 1, 2, 3, 10, 9], [0, 1, 2, 3, 9, 10]])\n self.assertIn(atom_map[6], [4, 11])\n self.assertIn(atom_map[7], [4, 11])\n self.assertEqual(atom_map[8], 5)\n self.assertEqual(atom_map[9], 6)\n self.assertIn(atom_map[10], [7, 8])\n self.assertIn(atom_map[11], [7, 8])\n self.assertTrue(check_atom_map(rxn_1))\n\n # A different main product\n p_2_xyz = \"\"\"N -1.60333711 -0.23049987 -0.35673484\n C -0.63074775 0.59837442 0.08043329\n C 0.59441219 0.18489797 0.16411656\n C 1.81978128 -0.23541908 0.24564488\n H -2.56057110 0.09083582 -0.42266843\n H -1.37296018 -1.18147301 -0.62077856\n H -0.92437032 1.60768040 0.35200716\n H 2.49347824 -0.13648710 -0.59717108\n H 2.18431385 -0.69791121 1.15515621\"\"\" # NC=C=C\n rxn_2 = ARCReaction(r_species=[ARCSpecies(label='R', smiles='NC(C=C)O[O]', xyz=r_xyz)],\n p_species=[ARCSpecies(label='P2', smiles='NC=C=C', xyz=p_2_xyz),\n ARCSpecies(label='HO2', smiles='O[O]', xyz=self.ho2_xyz)])\n atom_map = rxn_2.atom_map\n self.assertIn(atom_map[:10], [[0, 1, 2, 3, 10, 9, 5, 4, 6, 11], [0, 1, 2, 3, 9, 10, 5, 4, 6, 11]])\n self.assertIn(atom_map[10], [7, 8])\n self.assertIn(atom_map[11], [7, 8])\n \n # Reversed product order\n rxn_2 = ARCReaction(r_species=[ARCSpecies(label='R', smiles='NC(C=C)O[O]', xyz=r_xyz)],\n p_species=[ARCSpecies(label='HO2', smiles='O[O]', xyz=self.ho2_xyz),\n ARCSpecies(label='P2', smiles='NC=C=C', xyz=p_2_xyz)])\n atom_map = rxn_2.atom_map\n self.assertIn(atom_map[:10], [[3, 4, 5, 6, 1, 0, 8, 7, 9, 2], [3, 4, 5, 6, 0, 1, 8, 7, 9, 2]])\n self.assertIn(atom_map[10], [10, 11])\n self.assertIn(atom_map[11], [10, 11])\n \n c2h5o3_xyz = {'coords': ((-1.3476727508427788, -0.49923624257482285, -0.3366372557370102),\n (-0.11626816111736853, 0.3110915299407186, 0.018860985632263887),\n (0.7531175607750088, 0.3366822240291409, -1.1050387236863213),\n (0.5228736844989644, -0.3049881931104616, 1.1366016759286774),\n (1.8270658637404131, 0.34102014147584997, 1.2684162942337813),\n (-2.039181700362481, -0.5535509846570477, 0.5100031541057821),\n (-1.865025875161301, -0.06806929272376178, -1.1994046923960628),\n (-1.0711960095793496, -1.5264629385419055, -0.6002175107608478),\n (-0.40133538695862053, 1.3357900487643664, 0.28224155088545305),\n (1.3942569570346546, 1.035594500292526, -0.8890721851777293)),\n 'isotopes': (12, 12, 16, 16, 16, 1, 1, 1, 1, 1),\n 'symbols': ('C', 'C', 'O', 'O', 'O', 'H', 'H', 'H', 'H', 'H')}\n c2h4o_xyz = {'coords': ((-0.6485165220711699, -0.036287809639473964, -0.040072327958319325),\n (0.8441328059817381, 0.04088405476411104, 0.05352861712992162),\n (1.4799812732494606, 1.0748679945888888, -0.1224478071645769),\n (-1.0603388058764294, 0.9464876376852732, -0.28238370478893315),\n (-0.9213427138232859, -0.7478396768473443, -0.8228167900899559),\n (-1.0499663443190728, -0.37234114306362315, 0.9187474043028493),\n (1.3560503068587568, -0.9057710574878411, 0.29544460856901716)),\n 'isotopes': (12, 12, 16, 1, 1, 1, 1),\n 'symbols': ('C', 'C', 'O', 'H', 'H', 'H', 'H')}\n r_1 = ARCSpecies(label='C2H5O3', smiles='CC(O)O[O]', xyz=c2h5o3_xyz)\n p_1 = ARCSpecies(label='C2H4O', smiles='CC=O', xyz=c2h4o_xyz)\n p_2 = ARCSpecies(label='HO2', smiles='O[O]', xyz=self.ho2_xyz)\n rxn_4 = ARCReaction(r_species=[r_1], p_species=[p_1, p_2])\n self.assertIn(rxn_4.atom_map[:5], [[0, 1, 2, 8, 7], [0, 1, 2, 7, 8]])\n self.assertIn(tuple(rxn_4.atom_map[5: 8]), list(permutations([3, 4, 5])))\n self.assertEqual(rxn_4.atom_map[8:], [6, 9])", "title": "" }, { "docid": "e935b0c4d0b92ae0b4c57006f41ce438", "score": "0.5040407", "text": "def reefmap(self,pathname,forc_hurr,forc_cm,hmax_sr,cmmax_sr):\r\n\r\n inputmap = open(pathname,'U')\r\n try:\r\n self.cell_x = float(inputmap.readline()) # x dimension of cells\r\n self.cell_y = float(inputmap.readline()) # y dimension of cells\r\n self.cell_area = self.cell_x*self.cell_y # cell area\r\n except:\r\n self.error_text.append(\r\n '** The first two lines of \"%s\" should be float values'\r\n %pathname)\r\n self.error_text.append(\r\n 'that specify the x and y dimensions of grid cells')\r\n self.init_errors += 1\r\n\r\n if self.init_errors == 0:\r\n self.reefmap = []\r\n for lines in inputmap:\r\n try:\r\n self.reefmap.append(map(int, lines.split(',')))\r\n except:\r\n self.error_text.append(\r\n '** Problem with formatting of raster data in map input file:')\r\n self.error_text.append(\r\n 'see user documentation for formatting requirements')\r\n self.init_errors += 1\r\n inputmap.close()\r\n pdb.set_trace()\r\n self.reefmap = array(self.reefmap)\r\n self.reefmap.reshape\r\n\r\n if self.init_errors == 0:\r\n self.width = self.reefmap.shape[1]\r\n self.length = self.reefmap.shape[0]\r\n self.srrange = self.reefmap.max()\r\n if self.srrange > 10:\r\n self.error_text.append(\r\n '** Number of subregions should not exceed 10')\r\n self.init_errors += 1\r\n if self.reefmap.min() != 0:\r\n self.error_text.append(\r\n '** Mapfile should include values = \"0\" to indicate the location')\r\n self.error_text.append(\r\n 'of non-reef grid cells (all grid cell values should be')\r\n self.error_text.append('greater than or equal to zero)')\r\n self.init_errors += 1\r\n self.subregs = []\r\n self.subregid = []\r\n for i in range(self.srrange):\r\n self.subregs.append(0)\r\n self.reeftotal = 0\r\n for ii in range(self.length):\r\n for jj in range(self.width):\r\n for i in range(self.srrange+1)[1:]:\r\n if self.reefmap[ii,jj] == i:\r\n self.subregs[i-1] +=1\r\n self.subregid.append(i)\r\n self.reeftotal +=1\r\n if self.reeftotal == 0:\r\n self.error_text.append(\r\n '** Mapfile does not contain any reef cells')\r\n self.init_errors += 1\r\n num = 0\r\n for i in range(1,max(self.subregid)+1):\r\n if i not in self.subregid:\r\n num += 1\r\n if num != 0:\r\n self.error_text.append(\r\n '**Numbering of subregions in the mapfile should be')\r\n self.error_text.append('consecutive and should start at \"1\"')\r\n self.init_errors += 1\r\n if forc_hurr in [1,2]:\r\n if hmax_sr > self.srrange:\r\n self.error_text.append(\r\n '** Value for \"hmax_sr\" in the input file should be less than')\r\n self.error_text.append(\r\n 'or equal to the number of subregions in the mapfile')\r\n self.error_text.append('(current value of \"%s\" is invalid)'\r\n %hmax_sr)\r\n self.init_errors += 1\r\n if forc_cm in [1,3]:\r\n if cmmax_sr > self.srrange:\r\n self.error_text.append(\r\n '** Value for \"cmmax_sr\" in the input file should be less than')\r\n self.error_text.append(\r\n 'or equal to the number of subregions in the mapfile')\r\n self.error_text.append('(current value of \"%s\" is invalid)'\r\n %cmmax_sr)\r\n self.init_errors += 1", "title": "" }, { "docid": "3fae7adcd0f2c1da6c9e4e783ad249f5", "score": "0.5036836", "text": "def find_all_ORFs(dna):\n tempdna = dna\n allORFs2 = []\n frameORFs = []\n\n for frame in range(0, 3):\n frameORFs = (find_all_ORFs_oneframe(dna[frame:]))\n for x in frameORFs:\n allORFs2.append(x)\n return allORFs2", "title": "" }, { "docid": "eccd242ab7783afbf09c2f3c5f5f2b7e", "score": "0.50011873", "text": "def makeOrbitsDat(root='./', efit='efit3_d/output/efit3.log',\n poly='polyfit_d/fit', onlyHighAccel=True):\n from gcwork.polyfit import accel\n\n # Now read in the efit3.log file\n tab = Table.read(root + efit)\n\n name = tab[0]._data\n dist = tab[1].tonumpy() # pc\n a = tab[4].tonumpy() # mas\n p = tab[5].tonumpy() # yr\n e = tab[6].tonumpy() #\n t0 = tab[7].tonumpy() # yr\n w = tab[8].tonumpy() # deg\n i = tab[9].tonumpy() # deg\n o = tab[10].tonumpy() # deg\n\n if onlyHighAccel == True:\n # Find the significantly accelerating sources within the\n # central arcsecond.\n srcs = accel.highSigSrcs(0.5, 4, verbose=False, rootDir=root, poly=poly)\n else:\n # Use ALL stars in this list\n srcs = name\n\n _out = open(root + 'source_list/orbits_new.dat', 'w')\n _out.write('# Python gcwork.starTables.makeOrbitsDat()\\n')\n _out.write('%-10s %7s %7s %8s %7s %7s %7s %7s %7s\\n' % \\\n ('#Star', 'P', 'A', 't0', 'e', 'i', 'Omega', 'omega', 'search'))\n _out.write('%-10s %7s %7s %8s %7s %7s %7s %7s %7s\\n' % \\\n ('#Name', '(yrs)', '(mas)', '(yrs)', '()',\n '(deg)', '(deg)', '(deg)', '(pix)'))\n\n\n # Loop through every src and if it is named, output into a\n # new orbits_new.dat file.\n for ss in range(len(srcs)):\n try:\n idx = name.index(srcs[ss])\n except ValueError:\n #print 'Failed to find match for %s in %s' % (srcs[ss], efit)\n continue\n\n # Skip if this isn't a named source\n if (('star' in srcs[ss]) and (onlyHighAccel == True)):\n continue\n\n # Write output\n _out.write('%-10s ' % (srcs[ss]))\n _out.write('%7.2f %7.1f %8.3f ' % (p[idx], a[idx], t0[idx]))\n _out.write('%7.5f %7.3f %7.3f ' % (e[idx], i[idx], o[idx]))\n _out.write('%7.3f %7d\\n' % (w[idx], 2))\n\n _out.close()", "title": "" }, { "docid": "866ec9fbe5192bb335508e23be225c4a", "score": "0.49955928", "text": "def dmelgn_to_ortho(p2g_dict, best_hit_dict):\n\tdmelgn_to_ortho = {}\n\tfor k,v in best_hit_dict.iteritems():\n\t\t#print \"this is k\", k\n\t\tnew_key = p2g_dict[k]\n\t\t#print \"This is new_key\", new_key\n\t\tdmelgn_to_ortho[new_key]= [best_hit_dict[k][0],best_hit_dict[k][1], best_hit_dict[k][2]]\n\treturn dmelgn_to_ortho\n\t#'FBgn0051014': ['scaffold_4', '5150977', '5150084'], \n\t#'FBgn0051013': ['scaffold_4', '5297590', '5296811'], \n\t#'FBgn0051010': ['scaffold_4', '5350617', '5349826']", "title": "" }, { "docid": "bcfd77d855252aa3ad4755f33e3baede", "score": "0.49753296", "text": "def find_all_ORFs(dna):\n ORFs = []\n for i in range(0,3):\n ORFs.extend(find_all_ORFs_oneframe(dna[i:]))\n return ORFs", "title": "" }, { "docid": "d662e1060032efe86eb996d865e619e7", "score": "0.4973616", "text": "def _remap_src_to_ortho(self, ortho_profile, dem_array):\n\n # initialse tile grid here once off (save cpu) - to offset later (requires N-up geotransform)\n j_range = np.arange(0, self.tile_size[0], dtype='float32')\n i_range = np.arange(0, self.tile_size[1], dtype='float32')\n jgrid, igrid = np.meshgrid(j_range, i_range, indexing='xy')\n xgrid, ygrid = ortho_profile['transform'] * [jgrid, igrid]\n\n block_count = 0\n with rio.open(self._src_im_filename, 'r') as src_im:\n with rio.open(self._ortho_im_filename, 'w', **ortho_profile) as ortho_im:\n if self.per_band:\n bands = np.array([range(1, src_im.count + 1)]).T # RW one row of bands i.e. one band at a time\n else:\n bands = np.array([range(1, src_im.count + 1)]) # RW one row of bands i.e. all bands at once\n\n ttl_blocks = np.ceil(ortho_profile['width'] / ortho_profile['blockxsize']) * np.ceil(\n ortho_profile['height'] / ortho_profile['blockysize']) * bands.shape[0]\n\n for bi in bands.tolist():\n # read source image band(s)\n src_im_array = src_im.read(bi)\n\n for ji, ortho_win in ortho_im.block_windows(1):\n\n # offset tile grids to ortho_win\n ortho_win_transform = rio.windows.transform(ortho_win, ortho_im.transform)\n ortho_xgrid = xgrid[:ortho_win.height, :ortho_win.width] + (\n ortho_win_transform.xoff - ortho_im.transform.xoff)\n ortho_ygrid = ygrid[:ortho_win.height, :ortho_win.width] + (\n ortho_win_transform.yoff - ortho_im.transform.yoff)\n\n # extract ortho_win from dem_array\n ortho_zgrid = dem_array[ortho_win.row_off:(ortho_win.row_off + ortho_win.height),\n ortho_win.col_off:(ortho_win.col_off + ortho_win.width)]\n\n # find the 2D source image pixel co-ords corresponding to ortho image 3D co-ords\n src_ji = self._camera.unproject(np.array([ortho_xgrid.reshape(-1, ), ortho_ygrid.reshape(-1, ),\n ortho_zgrid.reshape(-1, )]))\n src_jj = src_ji[0, :].reshape(ortho_win.height, ortho_win.width)\n src_ii = src_ji[1, :].reshape(ortho_win.height, ortho_win.width)\n\n # Interpolate the ortho tile from the source image based on warped/unprojected grids\n ortho_im_win_array = np.zeros((src_im_array.shape[0], ortho_win.height, ortho_win.width),\n dtype=ortho_im.dtypes[0])\n for oi in range(0, src_im_array.shape[0]): # for per_band=True, this will loop once only\n ortho_im_win_array[oi, :, :] = cv2.remap(src_im_array[oi, :, :], src_jj, src_ii,\n self.interp, borderMode=cv2.BORDER_CONSTANT,\n borderValue=self.nodata)\n # below is the scipy equivalent to cv2.remap. it is ~3x slower but doesn't blur with nodata\n # ortho_im_win_array[oi, :, :] = map_coordinates(src_im_array[oi, :, :], (src_ii, src_jj),\n # order=2, mode='constant', cval=self.nodata,\n # prefilter=False)\n # remove blurring with nodata at the boundary where necessary\n nodata_mask = (ortho_im_win_array[0, :, :] == self.nodata)\n if (self.interp != 'nearest') and (np.sum(nodata_mask) > np.min(self.tile_size)):\n nodata_mask_d = cv2.dilate(nodata_mask.astype(np.uint8, copy=False),\n np.ones((3, 3), np.uint8))\n ortho_im_win_array[:, nodata_mask_d.astype(bool, copy=False)] = self.nodata\n else:\n nodata_mask_d = nodata_mask\n\n # write out the ortho tile to disk\n ortho_im.write(ortho_im_win_array, bi, window=ortho_win)\n\n if self.write_mask and np.all(bi == bands[0]): # write mask once for all bands\n with np.testing.suppress_warnings() as sup:\n sup.filter(DeprecationWarning, \"\") # suppress the np.bool warning as it is buggy\n ortho_im.write_mask(np.bitwise_not(255 * nodata_mask_d).astype(np.uint8, copy=False),\n window=ortho_win)\n\n # print progress\n block_count += 1\n progress = (block_count / ttl_blocks)\n sys.stdout.write('\\r')\n sys.stdout.write(\"[%-50s] %d%%\" % ('=' * int(50 * progress), 100 * progress))\n sys.stdout.flush()\n\n sys.stdout.write('\\n')", "title": "" }, { "docid": "1a1659412ed4ea8d112e292d4254422d", "score": "0.4965838", "text": "def getChromosomalOveralap(affy_chr_db,ensembl_chr_db,ensembl_transcript_clusters,no_match_list):\n ###exon_location[transcript_cluster_id,chr,strand] = [(start,stop,exon_type,probeset_id)]\n y = 0; l =0; multiple_ensembl_associations=[]\n ###(bp1,ep1) = (47211632,47869699); (bp2,ep2) = (47216942, 47240877)\n for chr in affy_chr_db:\n try:\n ensembl_db = ensembl_chr_db[chr]\n affy_db = affy_chr_db[chr]\n for (bp1,ep1) in affy_db:\n x = 0\n transcript_cluster_key = affy_db[(bp1,ep1)][0]\n for (bp2,ep2) in ensembl_db:\n y += 1; ensembl = ensembl_db[(bp2,ep2)][0][0]\n #print ensembl, transcript_cluster_key, (bp2,ep2),(bp1,ep1);kill\n ###if the two gene location ranges overlapping\n ###affy_probeset_info = (start,stop,exon_type,probeset_id)\n if ((bp1 >= bp2) and (ep2 >= bp1)) or ((bp2 >= bp1) and (ep1 >= bp2)):\n x = 1; affy_probeset_info = affy_db[(bp1,ep1)][1]; ensembl_key = ensembl_db[(bp2,ep2)][0],(bp2,ep2),affy_probeset_info\n try:ensembl_transcript_clusters[transcript_cluster_key].append(ensembl_key)\n except KeyError: ensembl_transcript_clusters[transcript_cluster_key] = [ensembl_key]\n l += 1\n if x == 0: no_match_list.append(transcript_cluster_key)\n except KeyError: print chr, 'data not found'\n \n print \"Transcript Clusters overlapping with Ensembl:\",len(ensembl_transcript_clusters)\n print \"With NO overlapp\",len(no_match_list)\n return ensembl_transcript_clusters,no_match_list", "title": "" }, { "docid": "fd2e6158933af766cfd5d0540e833ed9", "score": "0.49627453", "text": "def ITRF_to_geoditic(ITRF_XYZ):\n \n# a = 6378137.0 #### semi-major axis, m\n# e2 = 0.00669438002290 ## eccentricity squared\n# me2 = 1-e2\n# b2 = a*a*me2 ## semi-minor axis squared\n# \n# r2 = ITRF_XYZ[0]*ITRF_XYZ[0]\n# r2 += ITRF_XYZ[1]*ITRF_XYZ[1]\n# r = np.sqrt(r2)\n# Z2 = ITRF_XYZ[2]*ITRF_XYZ[2]\n# \n# F = Z2*(54*b2)\n# G = r2 + me2*Z2 - e2*(a*a - b2)\n# c = e2*e2*F*r2/(G*G*G)\n# s = np.cbrt( 1 + c + np.sqrt(c*c + 2*c) )\n# P = G*(1+s+1/s)\n# P = F/(3*P*P)\n# Q = np.sqrt( 1 + 2*e2*e2*P )\n# \n# Ro = -P*e2*r/(1+Q) + np.sqrt( a*a*(1+1/Q)*0.5 - P*me2*Z2/(Q*(1+Q)) - P*r2*0.5 )\n# N = r-e2*Ro\n# U = np.sqrt( N*N + Z2 )\n# V = np.sqrt( N*N + me2*Z2 )\n# zo = b2*ITRF_XYZ[2]/(a*V)\n# \n# h = U*(1-b2/(a*V))\n# lat = np.arctan( (ITRF_XYZ[2]+e2*zo)/r )*RTD\n# lon = np.arctan2(ITRF_XYZ[1], ITRF_XYZ[0])*RTD\n# return lat, lon, h\n \n class minimizer:\n def __init__(self, X,Y,Z):\n self.X = X\n self.Y = Y\n self.Z = Z\n def __call__(self, lat_lon_h):\n ret = geoditic_to_ITRF( lat_lon_h )\n ret[0] -= self.X\n ret[1] -= self.Y\n ret[2] -= self.Z\n return ret\n \n guess = np.append( latlonCS002, [0.0] )\n ret = least_squares(minimizer(ITRF_XYZ[0], ITRF_XYZ[1], ITRF_XYZ[2]), guess, x_scale='jac' )\n return ret.x", "title": "" }, { "docid": "c4af5ad0393a6c735bfd989cbde17295", "score": "0.49544472", "text": "def translate(orf):\r\n\tcodon_table = {'UUU':'F', 'UUC':'F', 'UUA':'L', 'UUG':'L',\r\n\t\t\t 'UCU':'S', 'UCC':'S', 'UCA':'S', 'UCG':'S',\r\n\t\t\t 'UAU':'Y', 'UAC':'Y', 'UAA':'*', 'UAG':'*',\r\n\t\t\t 'UGU':'C', 'UGC':'C', 'UGA':'*', 'UGG':'W',\r\n\t\t\t 'CUU':'L', 'CUC':'L', 'CUA':'L', 'CUG':'L',\r\n\t\t\t 'CCU':'P', 'CCC':'P', 'CCA':'P', 'CCG':'P',\r\n\t\t\t 'CAU':'H', 'CAC':'H', 'CAA':'Q', 'CAG':'Q',\r\n\t\t\t 'CGU':'R', 'CGC':'R', 'CGA':'R', 'CGG':'R',\r\n\t\t\t 'AUU':'I', 'AUC':'I', 'AUA':'I', 'AUG':'M',\r\n\t\t\t 'ACU':'T', 'ACC':'T', 'ACA':'T', 'ACG':'T',\r\n\t\t\t 'AAU':'N', 'AAC':'N', 'AAA':'K', 'AAG':'K',\r\n\t\t\t 'AGU':'S', 'AGC':'S', 'AGA':'R', 'AGG':'R',\r\n\t\t\t 'GUU':'V', 'GUC':'V', 'GUA':'V', 'GUG':'V',\r\n\t\t\t 'GCU':'A', 'GCC':'A', 'GCA':'A', 'GCG':'A',\r\n\t\t\t 'GAU':'D', 'GAC':'D', 'GAA':'E', 'GAG':'E',\r\n\t\t\t 'GGU':'G', 'GGC':'G', 'GGA':'G', 'GGG':'G'}\r\n\t\r\n\tcodon_count = len(orf)/3\r\n\t\r\n\tprot = ''\r\n\tfor codon_number in range(codon_count):\r\n\t\tcodon_start = codon_number * 3\r\n\t\tcodon = orf[codon_start:codon_start+3]\r\n\t\tprot = prot + codon_table.get(codon,'')\r\n\t\t\r\n\t# remove the stops at the end\r\n\tprot = prot.rstrip(\"*\")\r\n\treturn prot", "title": "" }, { "docid": "dcf8e2a20eec2f1580e5ad3cac315765", "score": "0.49462026", "text": "def build_yeast_aligment(sequences_dirs,MSA_dir,orthologs,opath,path_to_clustalo):\n\tprint (\"----------- Build yeast alignments -----------\")\n\t# Read all of the sequence data\n\tScer_ORFs = list(orthologs.keys())\n\n\tspecies = set([species for d in orthologs.values() for species in list(d.keys())])\n\tprint(\"Building MSA for Scer ORFs using related species: \\n\"+ \", \".join(species))\n\tprint(len({ key:value for (key,value) in orthologs.items() if len(value) == len(species)}),\"/\",len(orthologs),\"ORFs have mapping in all\",len(species),\"related species.\")\n\t\n\tsequences = [\"Scer.aa\",\"Scer.nt\"]+[spec+\".nt\" for spec in species]+[spec+\".aa\" for spec in species]\n\n\ttest_ORF = Scer_ORFs[0]\n\tfasta_example = os.path.join(MSA_dir,test_ORF,test_ORF+\".aa.fa\") # Only run read all sequence if needed (long)\n\tsequences_dict = read_all_sequence_files(sequences_dirs,sequences,fasta_example)\n\n\t# Write a FASTA file per yeast ORF with the sequences (nt and aa) from each strain\n\tfasta_path = write_fasta_MSA_files(MSA_dir,sequences_dict,Scer_ORFs,orthologs)\n\n\t# Run a MSA for each ORF (using the fasta file)\n\tmsa_run_clustalw(fasta_path,path_to_clustalo)\n\n\t# Combine everything\n\tbuild_combined_file(sequences_dirs,opath,fasta_path,[\"Scer\"]+list(species))\n\t\n\treturn", "title": "" }, { "docid": "e118fb34f6d1bf073af7ad29accdeec9", "score": "0.49453247", "text": "def __init__(self, reconstruction=\"\",\n odf_array = \"\", nifti_prefix=\"\",\n real_affine_image=\"\", mask_image=\"\",\n step_size=np.sqrt(3)/2. , angle_max=35, odf_resolution=\"odf8\",\n angle_weights=\"flat\", angle_weighting_power=1.,normalize_doubleODF=True):\n\n # From args\n self.step_size = step_size\n self.odf_resolution = odf_resolution\n self.angle_max = angle_max\n self.orientation = \"lps\"\n self.angle_weights = angle_weights\n self.normalize_doubleODF = normalize_doubleODF\n self.angle_weighting_power = angle_weighting_power\n logger.info(\"\\nUsing\\n------\\n Step Size:\\t\\t%.4f Voxels \\n ODF Resolution:\\t\"\n \"%s\\n Max Angle:\\t\\t%.2f Degrees\\n\"\n \" Angle Weights:\\t%s\\n Angle weight power:\\t%.1f\",\n self.step_size, self.odf_resolution, self.angle_max,\n self.angle_weights,self.angle_weighting_power)\n # Get matrices we'll need for analysis\n self.odf_vertices, self.prob_angles_weighted = \\\n get_transition_analysis_matrices(self.odf_resolution, self.angle_max,\n self.angle_weights, self.angle_weighting_power)\n self.n_unique_vertices = self.odf_vertices.shape[0]//2\n self.real_affine = np.eye(4)\n\n # Load input data and get spatial info\n self.label_lut = None\n self.atlas_labels = None\n self.mask_image = mask_image\n\n\n def set_ras_affine(voxel_size):\n aff = np.ones(4,dtype=np.float)\n aff[:3] = voxel_size\n self.ras_affine = np.diag(aff)\n\n if not nifti_prefix == \"\":\n self._load_niftis(nifti_prefix)\n else:\n if reconstruction.endswith(\".mif\"):\n self.flat_mask, self.volume_grid, self.odf_values, \\\n self.real_affine, self.voxel_size = \\\n load_mif(reconstruction, sphere=odf_resolution, mask=mask_image)\n set_ras_affine(self.voxel_size)\n\n elif reconstruction.endswith(\".fib\") or reconstruction.endswith(\".fib.gz\"):\n self.flat_mask, self.volume_grid, self.odf_values, \\\n self.real_affine, self.voxel_size = \\\n load_fib(reconstruction, self.odf_vertices,\n real_affine_image=real_affine_image)\n set_ras_affine(self.voxel_size)\n else:\n logger.critical(\"No valid inputs detected\")\n\n # Adjust the ODF values\n norm_factor = self.odf_values.sum(1)\n norm_factor[norm_factor == 0] = 1.\n self.odf_values = self.odf_values / norm_factor[:,np.newaxis] * 0.5\n logger.info(\"Loaded ODF data: %s\",str(self.odf_values.shape))\n\n\n # Coordinate mapping information\n self.nvoxels = self.flat_mask.sum()\n self.voxel_coords = np.array(np.unravel_index(\n np.flatnonzero(self.flat_mask), self.volume_grid, order=\"F\")).T\n self.coordinate_lut = dict(\n [(tuple(coord), n) for n,coord in enumerate(self.voxel_coords)])\n\n self._initialize_nulls()", "title": "" }, { "docid": "92835486538304a404b8bff982bc1070", "score": "0.4944411", "text": "def get_orfs(seq):\n\n start_codons = find_start_codons(seq)\n\n stop_codons = find_stop_codons(seq)\n\n # After looking at the quiz, I think the instructors want us to assume\n # absolute termination at the nearest stop codon. This is not strictly\n # the case in reality, but ... fine.\n def nearest_stop_codon(start):\n\n # Convert to numpy array to facilitate filtering\n stop = np.array(stop_codons)\n\n # Only include stop codons that are past\n # the start codon. +3 here is a magic number\n # to advance past the triplet codon.\n stop = stop[stop > start+3]\n\n # We are only interested in stop locations\n # that are in frame. Apply mask so only\n # that subset is available.\n stop = stop[is_orf(start, stop)]\n\n # Possible that we will not find an in-frame\n # stop code (i.e., no ORF)\n # Conditional statement here protects against\n # this error and facilitates filtering return\n if len(stop) > 0:\n return stop.min()\n else:\n return None\n\n # Will be a list of lists of values\n orf_index = []\n\n # Iterate through all start codons and find the\n # nearest, in-frame stop codon\n for start in start_codons:\n\n stop = nearest_stop_codon(start)\n\n if stop is not None:\n\n if orf_length(start, stop) != -1:\n\n orf_index.append([start, stop])\n\n return orf_index", "title": "" }, { "docid": "bc0ed70fbdeb29573dc5dfeb98a3f61b", "score": "0.4940306", "text": "def test_map_abstractions(self):\n # H + CH4 <=> H2 + CH3\n r_1 = ARCSpecies(label='H', smiles='[H]', xyz={'coords': ((0, 0, 0),), 'isotopes': (1,), 'symbols': ('H',)})\n r_2 = ARCSpecies(label='CH4', smiles='C', xyz=self.ch4_xyz)\n p_1 = ARCSpecies(label='H2', smiles='[H][H]', xyz=self.h2_xyz)\n p_2 = ARCSpecies(label='CH3', smiles='[CH3]', xyz=self.ch3_xyz)\n rxn = ARCReaction(r_species=[r_1, r_2], p_species=[p_1, p_2])\n rxn.determine_family(self.rmgdb)\n atom_map = rxn.atom_map\n self.assertIn(atom_map[0], [0, 1])\n self.assertEqual(atom_map[1], 2)\n for index in [2, 3, 4, 5]:\n self.assertIn(atom_map[index], [0, 1, 3, 4, 5])\n self.assertTrue(any(atom_map[r_index] in [0, 1] for r_index in [2, 3, 4, 5]))\n self.assertTrue(check_atom_map(rxn))\n\n # H + CH4 <=> CH3 + H2 (different order)\n rxn = ARCReaction(r_species=[r_1, r_2], p_species=[p_2, p_1])\n rxn.determine_family(self.rmgdb)\n atom_map = rxn.atom_map\n self.assertIn(atom_map[0], [4, 5])\n self.assertEqual(atom_map[1], 0)\n for index in [2, 3, 4, 5]:\n self.assertIn(atom_map[index], [1, 2, 3, 4, 5])\n self.assertTrue(any(atom_map[r_index] in [4, 5] for r_index in [2, 3, 4, 5]))\n self.assertTrue(check_atom_map(rxn))\n\n # CH4 + H <=> H2 + CH3 (different order)\n rxn = ARCReaction(r_species=[r_2, r_1], p_species=[p_1, p_2])\n rxn.determine_family(self.rmgdb)\n atom_map = rxn.atom_map\n self.assertEqual(atom_map[0], 2)\n for index in [1, 2, 3, 4]:\n self.assertIn(atom_map[index], [0, 1, 3, 4, 5])\n self.assertTrue(any(atom_map[r_index] in [0, 1] for r_index in [1, 2, 3, 4]))\n self.assertIn(atom_map[5], [0, 1])\n self.assertTrue(check_atom_map(rxn))\n\n # CH4 + H <=> CH3 + H2 (different order)\n rxn = ARCReaction(r_species=[r_2, r_1], p_species=[p_2, p_1])\n rxn.determine_family(self.rmgdb)\n atom_map = rxn.atom_map\n self.assertEqual(atom_map[0], 0)\n for index in [1, 2, 3, 4]:\n self.assertIn(atom_map[index], [1, 2, 3, 4, 5])\n self.assertTrue(any(atom_map[r_index] in [4, 5] for r_index in [1, 2, 3, 4]))\n self.assertIn(atom_map[5], [4, 5])\n self.assertTrue(check_atom_map(rxn))\n\n \n # H + CH3NH2 <=> H2 + CH2NH2\n ch3nh2_xyz = {'coords': ((-0.5734111454228507, 0.0203516083213337, 0.03088703933770556),\n (0.8105595891860601, 0.00017446498908627427, -0.4077728757313545),\n (-1.1234549667791063, -0.8123899006368857, -0.41607711106038836),\n (-0.6332220120842996, -0.06381791823047896, 1.1196983583774054),\n (-1.053200912106195, 0.9539501896695028, -0.27567270246542575),\n (1.3186422395164141, 0.7623906284020254, 0.038976118645639976),\n (1.2540872076899663, -0.8606590725145833, -0.09003882710357966)),\n 'isotopes': (12, 14, 1, 1, 1, 1, 1),\n 'symbols': ('C', 'N', 'H', 'H', 'H', 'H', 'H')}\n ch2nh2_xyz = {'coords': ((0.6919493009211066, 0.054389375309083846, 0.02065422596281878),\n (1.3094508022837807, -0.830934909576592, 0.14456347719459348),\n (1.1649142139806816, 1.030396183273415, 0.08526955368597328),\n (-0.7278194451655412, -0.06628299353512612, -0.30657582460750543),\n (-1.2832757211903472, 0.7307667658607352, 0.00177732009031573),\n (-1.155219150829674, -0.9183344213315149, 0.05431124767380799)),\n 'isotopes': (12, 1, 1, 14, 1, 1),\n 'symbols': ('C', 'H', 'H', 'N', 'H', 'H')}\n r_1 = ARCSpecies(label='H', smiles='[H]', xyz={'coords': ((0, 0, 0),), 'isotopes': (1,), 'symbols': ('H',)})\n r_2 = ARCSpecies(label='CH3NH2', smiles='CN', xyz=ch3nh2_xyz)\n p_1 = ARCSpecies(label='H2', smiles='[H][H]', xyz=self.h2_xyz)\n p_2 = ARCSpecies(label='CH2NH2', smiles='[CH2]N', xyz=ch2nh2_xyz)\n rxn = ARCReaction(r_species=[r_1, r_2], p_species=[p_1, p_2])\n rxn.determine_family(self.rmgdb)\n atom_map = rxn.atom_map\n self.assertIn(atom_map[0], [0,1])\n self.assertEqual(atom_map[1], 2)\n self.assertEqual(atom_map[2], 5)\n self.assertIn(atom_map[3], [0, 1, 3, 4])\n self.assertIn(atom_map[4], [0, 1, 3, 4])\n self.assertIn(atom_map[5], [0, 1, 3, 4])\n self.assertTrue(any(atom_map[r_index] in [0, 1] for r_index in [3, 4, 5]))\n self.assertIn(atom_map[6], [6, 7])\n self.assertIn(atom_map[7], [6, 7])\n self.assertTrue(check_atom_map(rxn))\n\n # CH4 + OH <=> CH3 + H2O\n r_1 = ARCSpecies(label='CH4', smiles='C', xyz=self.ch4_xyz)\n r_2 = ARCSpecies(label='OH', smiles='[OH]', xyz=self.oh_xyz)\n p_1 = ARCSpecies(label='CH3', smiles='[CH3]', xyz=self.ch3_xyz)\n p_2 = ARCSpecies(label='H2O', smiles='O', xyz=self.h2o_xyz)\n rxn = ARCReaction(r_species=[r_1, r_2], p_species=[p_1, p_2])\n rxn.determine_family(self.rmgdb)\n atom_map = rxn.atom_map\n self.assertEqual(atom_map[0], 0)\n self.assertIn(atom_map[1], [1, 2, 3, 5, 6])\n self.assertIn(atom_map[2], [1, 2, 3, 5, 6])\n self.assertIn(atom_map[3], [1, 2, 3, 5, 6])\n self.assertIn(atom_map[4], [1, 2, 3, 5, 6])\n self.assertEqual(atom_map[5], 4)\n self.assertIn(atom_map[6], [5, 6])\n self.assertTrue(any(atom_map[r_index] in [5, 6] for r_index in [1, 2, 3, 4]))\n self.assertTrue(check_atom_map(rxn))\n\n # NH2 + N2H4 <=> NH3 + N2H3\n r_1 = ARCSpecies(label='NH2', smiles='[NH2]', xyz=self.nh2_xyz)\n r_2 = ARCSpecies(label='N2H4', smiles='NN', xyz=self.n2h4_xyz)\n p_1 = ARCSpecies(label='NH3', smiles='N', xyz=self.nh3_xyz)\n p_2 = ARCSpecies(label='N2H3', smiles='N[NH]', xyz=self.n2h3_xyz)\n rxn = ARCReaction(r_species=[r_1, r_2], p_species=[p_1, p_2])\n rxn.determine_family(self.rmgdb)\n atom_map = rxn.atom_map\n self.assertEqual(atom_map[0], 0)\n self.assertIn(atom_map[1], [1, 2, 3])\n self.assertIn(atom_map[2], [1, 2, 3])\n self.assertIn(atom_map[3], [4, 5])\n self.assertIn(atom_map[4], [4, 5])\n self.assertTrue(any(atom_map[r_index] in [1, 2, 3] for r_index in [5, 6, 7, 8]))\n self.assertTrue(check_atom_map(rxn))\n\n # NH2 + N2H4 <=> N2H3 + NH3 (reversed product order compared to the above reaction)\n rxn = ARCReaction(r_species=[r_1, r_2], p_species=[p_2, p_1])\n rxn.determine_family(self.rmgdb)\n atom_map = rxn.atom_map\n self.assertEqual(atom_map[0], 5)\n self.assertIn(atom_map[1], [6, 7, 8])\n self.assertIn(atom_map[2], [6, 7, 8])\n self.assertIn(atom_map[3], [0, 1])\n self.assertIn(atom_map[4], [0, 1])\n self.assertTrue(any(atom_map[r_index] in [6, 7, 8] for r_index in [5, 6, 7, 8]))\n self.assertTrue(check_atom_map(rxn))\n\n\n # CH3OO + CH3CH2OH <=> CH3OOH + CH3CH2O / peroxyl to alkoxyl, modified atom and product order\n r_1 = ARCSpecies(\n label=\"CH3OO\",\n smiles=\"CO[O]\", xyz=\"\"\"C -0.41690000 0.03757000 0.00590000\n O 0.83973000 0.69383000 -0.05239000\n O 1.79663000 -0.33527000 -0.02406000\n H -0.54204000 -0.62249000 -0.85805000\n H -1.20487000 0.79501000 -0.01439000\n H -0.50439000 -0.53527000 0.93431000\"\"\")\n r_2 = ARCSpecies(label='CH3CH2OH', smiles='CCO', xyz=\"\"\"C -0.97459464 0.29181710 0.10303882\n C 0.39565894 -0.35143697 0.10221676\n H -1.68942501 -0.32359616 0.65926091\n H -0.93861751 1.28685508 0.55523033\n H -1.35943743 0.38135479 -0.91822428\n H 0.76858330 -0.46187184 1.12485643\n H 1.10301149 0.25256708 -0.47388355\n O 0.30253309 -1.63748710 -0.49196889\n H 1.19485981 -2.02360458 -0.47786539\"\"\")\n p_1 = ARCSpecies(label='CH3OOH', smiles='COO', xyz=\"\"\"C -0.76039072 0.01483858 -0.00903344\n H -1.56632337 0.61401630 -0.44251282\n H -1.02943316 -0.30449156 1.00193709\n O 0.16024511 1.92327904 0.86381800\n H -0.60052507 -0.86954495 -0.63086438\n O 0.44475333 0.76952102 0.02291303\n H 0.30391344 2.59629139 0.17435159\"\"\")\n p_2 = ARCSpecies(label='CH3CH2O', smiles='CC[O]', xyz=\"\"\"C 0.79799272 -0.01511040 0.00517437\n H -1.13881231 -0.99286049 0.06963185\n O 1.17260343 -0.72227959 -1.04851579\n H -1.14162013 0.59700303 0.84092854\n H -1.13266865 0.46233725 -0.93283228\n C -0.74046271 0.02568566 -0.00568694\n H 1.11374677 1.03794239 0.06905096\n H 1.06944350 -0.38306117 1.00698657\"\"\")\n rxn = ARCReaction(r_species=[r_1, r_2], p_species=[p_1, p_2])\n rxn.determine_family(self.rmgdb)\n atom_map = rxn.atom_map\n self.assertEqual([0,5,3],atom_map[0:3])\n self.assertIn(tuple(atom_map[3:6]), list(permutations([1, 2, 4])))\n self.assertEqual([12, 7], atom_map[6:8])\n self.assertIn(tuple(atom_map[8:11]),list(permutations([8, 10, 11])))\n self.assertIn(tuple(atom_map[11:13]),list(permutations([13, 14])))\n self.assertEqual([9,6], atom_map[13:]) \n self.assertTrue(check_atom_map(rxn))\n\n # C3H6O + OH <=> C3H5O + H2O\n r_1 = ARCSpecies(label='C3H6O', smiles='CCC=O', xyz=self.c3h6o_xyz)\n r_2 = ARCSpecies(label='OH', smiles='[OH]', xyz=self.oh_xyz)\n p_1 = ARCSpecies(label='C3H5O', smiles='C[CH]C=O', xyz=self.c3h5o_xyz)\n p_2 = ARCSpecies(label='H2O', smiles='O', xyz=self.h2o_xyz)\n rxn = ARCReaction(r_species=[r_1, r_2], p_species=[p_1, p_2])\n rxn.determine_family(self.rmgdb)\n atom_map = rxn.atom_map\n self.assertEqual(atom_map[:4], [0, 1, 3, 4])\n self.assertIn(atom_map[4], [5,6, 7])\n self.assertIn(atom_map[5], [5, 6, 7])\n self.assertIn(atom_map[6], [5, 6, 7])\n self.assertIn(atom_map[7], [2, 11])\n self.assertIn(atom_map[8], [2, 11])\n self.assertEqual(atom_map[9:], [8, 9, 10])\n\n # C4H10O + OH <=> C4H9O + H2O\n r_1 = ARCSpecies(label='C4H10O', smiles='CC(C)CO', xyz=self.c4h10o_xyz)\n r_2 = ARCSpecies(label='OH', smiles='[OH]', xyz=self.oh_xyz)\n p_1 = ARCSpecies(label='C4H9O', smiles='[CH2]C(C)CO', xyz=self.c4h9o_xyz)\n p_2 = ARCSpecies(label='H2O', smiles='O', xyz=self.h2o_xyz)\n rxn = ARCReaction(r_species=[r_1, r_2], p_species=[p_1, p_2])\n rxn.determine_family(self.rmgdb)\n atom_map = rxn.atom_map\n self.assertEqual(atom_map[:5], [0, 3, 4, 5, 6])\n for index in [5, 6, 7]:\n self.assertIn(atom_map[index], [1, 2, 15, 16])\n self.assertEqual(atom_map[8],7)\n for i in atom_map[9:12]:\n self.assertIn(i,[8,9,10])\n for i in atom_map[12:14]:\n self.assertIn(i,[11,12])\n self.assertEqual(atom_map[14],13)\n self.assertEqual(atom_map[15],14)\n self.assertIn(atom_map[16], [15, 16])\n self.assertTrue(check_atom_map(rxn))\n\n # C3H6O + C4H9O <=> C3H5O + C4H10O\n r_1 = ARCSpecies(label='C3H6O', smiles='CCC=O', xyz=self.c3h6o_xyz)\n r_2 = ARCSpecies(label='C4H9O', smiles='[CH2]C(C)CO', xyz=self.c4h9o_xyz)\n p_1 = ARCSpecies(label='C3H5O', smiles='C[CH]C=O', xyz=self.c3h5o_xyz)\n p_2 = ARCSpecies(label='C4H10O', smiles='CC(C)CO', xyz=self.c4h10o_xyz)\n rxn = ARCReaction(r_species=[r_1, r_2], p_species=[p_1, p_2])\n rxn.determine_family(self.rmgdb)\n atom_map = rxn.atom_map\n self.assertEqual(atom_map[0:4], [0, 1, 3, 4])\n self.assertIn(atom_map[4], [5,6, 7])\n self.assertIn(atom_map[5], [5,6, 7])\n self.assertIn(atom_map[6], [5,6, 7])\n self.assertIn(atom_map[7], [2, 14, 15, 16, 18, 19, 20])\n self.assertIn(atom_map[8], [2, 14, 15, 16, 18, 19, 20])\n self.assertIn(2, atom_map[7:9])\n self.assertEqual(atom_map[9], 8)\n self.assertIn(atom_map[10], [9,11])\n self.assertIn(atom_map[11], [14, 15, 16,18,19,20])\n self.assertIn(atom_map[12], [14, 15, 16,18,19,20])\n self.assertEqual(atom_map[13],10)\n self.assertIn(atom_map[14], [9,11])\n self.assertEqual(atom_map[15:17], [12,13])\n self.assertEqual(atom_map[17],17)\n self.assertIn(atom_map[18], [14, 15, 16,18,19,20])\n self.assertIn(atom_map[19], [14, 15, 16,18,19,20])\n self.assertIn(atom_map[20], [14, 15, 16,18,19,20])\n self.assertIn(atom_map[21], [21,22])\n self.assertIn(atom_map[22], [21,22])\n self.assertEqual(atom_map[23],23)\n self.assertTrue(check_atom_map(rxn))\n\n\n # ClCH3 + H <=> CH3 + HCl\n r_1 = ARCSpecies(label=\"ClCH3\", smiles=\"CCl\", xyz=self.ch3cl_xyz)\n r_2 = ARCSpecies(label=\"H\", smiles=\"[H]\", xyz=self.h_rad_xyz)\n p_1 = ARCSpecies(label=\"CH3\", smiles=\"[CH3]\", xyz=self.ch3_xyz_2)\n p_2 = ARCSpecies(label=\"HCl\", smiles=\"[H][Cl]\", xyz=self.hcl_xyz)\n rxn = ARCReaction(r_species=[r_2, r_1], p_species=[p_2, p_1])\n rxn.determine_family(self.rmgdb)\n atom_map = rxn.atom_map\n self.assertEqual(rxn.family.label.lower(),\"cl_abstraction\")\n self.assertEqual(atom_map[:3], [0, 1, 2])\n for i in atom_map[3:]:\n self.assertIn(i, [3, 4, 5])\n self.assertTrue(check_atom_map(rxn))\n # ClCH3 + H <=> CH3 + HCl different order\n rxn_2 = ARCReaction(r_species=[r_1, r_2], p_species=[p_2, p_1])\n rxn_2.determine_family(self.rmgdb)\n atom_map = rxn_2.atom_map\n self.assertEqual(atom_map[:2], [1, 2])\n for index in [2, 3, 4]:\n self.assertIn(atom_map[index], [3, 4, 5])\n self.assertEqual(atom_map[-1], 0)\n self.assertTrue(check_atom_map(rxn))\n\n # [OH] + CC(Cl)C(Cl)Cl <=> OCl + C[CH]C(Cl)Cl\n smiles = []\n for i in '[OH] + CC(Cl)C(Cl)Cl <=> OCl + C[CH]C(Cl)Cl'.split():\n if i != \"<=>\" and i != '+':\n smiles.append(i)\n\n r_1_xyz = {'symbols': ('O', 'H'), 'isotopes': (16, 1),\n 'coords': ((0.48890386738601, 0.0, 0.0), (-0.48890386738601, 0.0, 0.0))}\n\n r_2_xyz = {'symbols': ('C', 'C', 'Cl', 'C', 'Cl', 'Cl', 'H', 'H', 'H', 'H', 'H'),\n 'isotopes': (12, 12, 35, 12, 35, 35, 1, 1, 1, 1, 1), 'coords': (\n (1.2438372893135106, 0.40661350465687324, -0.16279018264054892),\n (0.07827324125005171, -0.277154649803216, 0.5482887194488805),\n (-0.1538756923467617, 0.5009471321060629, 2.155037501334864),\n (-1.245183156820767, -0.303306879503286, -0.23533878891899096),\n (-1.1043944712471334, -1.3227416585177485, -1.7010412234762065),\n (-1.8186157680197266, 1.3177860639647956, -0.7221760707038685),\n (2.159163866798944, 0.32583527910226096, 0.4346504778666261),\n (1.056514815021544, 1.471768404816661, -0.33289291962920015),\n (1.4499964728678152, -0.05967057895051073, -1.131013164504492),\n (0.3717352549047681, -1.308596593192221, 0.7750989547682503),\n (-2.0374518517222544, -0.751480024679671, 0.37217669645466245))}\n\n p_1_xyz = {'symbols': ('O', 'Cl', 'H'), 'isotopes': (16, 35, 1), 'coords': (\n (-0.3223044372303026, 0.4343354356368888, 0.0), (1.2650242694442462, -0.12042710381137228, 0.0),\n (-0.9427198322139436, -0.3139083318255167, 0.0))}\n\n p_2_xyz = {'symbols': ('C', 'C', 'C', 'Cl', 'Cl', 'H', 'H', 'H', 'H', 'H'),\n 'isotopes': (12, 12, 12, 35, 35, 1, 1, 1, 1, 1), 'coords': (\n (-1.3496376883278178, -0.020445981649800302, -0.1995184115269273),\n (-0.051149096449292386, -0.3885500107837139, 0.4222976979623008),\n (1.217696701041357, 0.15947991928242372, -0.1242718714010236),\n (1.7092794464102241, 1.570982412202936, 0.8295196720275746),\n (2.474584210365428, -1.0919019396606517, -0.06869614478411318),\n (-1.6045061896547035, 1.0179450876989615, 0.03024632893682861),\n (-1.3137314500783486, -0.14754777860704252, -1.2853589013330937),\n (-2.1459595425475264, -0.6625965540242661, 0.188478021031359),\n (-0.044412318929613885, -0.9093853981117669, 1.373599947353138),\n (1.1078359281702537, 0.47202024365290884, -1.1662963382659064))}\n\n r_1 = ARCSpecies(label='r1', smiles=smiles[0],xyz=r_1_xyz )\n r_2 = ARCSpecies(label='r2', smiles=smiles[1],xyz=r_2_xyz)\n p_1 = ARCSpecies(label='p1', smiles=smiles[2],xyz=p_1_xyz)\n p_2 = ARCSpecies(label='p2', smiles=smiles[3],xyz=p_2_xyz)\n\n rxn1 = ARCReaction(r_species=[r_1, r_2], p_species=[p_1, p_2])\n rxn1.determine_family(self.rmgdb)\n atom_map = rxn1.atom_map\n #expected: [0, 2, 3, 4, 1, 5, [6, 7], [6, 7], [8, 9, 10], [8, 9, 10], [8, 9, 10], 11, 12]\n self.assertEqual(atom_map[:6], [0,2,3,4,1,5])\n self.assertIn(atom_map[6],[6,7])\n self.assertIn(atom_map[7], [6, 7])\n self.assertIn(atom_map[8], [8,9,10])\n self.assertIn(atom_map[9], [8,9,10])\n self.assertIn(atom_map[10], [8,9,10])\n self.assertEqual(atom_map[11],11)\n self.assertEqual(atom_map[12], 12)\n self.assertTrue(check_atom_map(rxn))\n\n # Br abstraction\n\n # OH + CH3Br <=> HOBr + CH3\n r_1_xyz = {'symbols': ('O', 'H'), 'isotopes': (16, 1),\n 'coords': ((0.48890386738601, 0.0, 0.0), (-0.48890386738601, 0.0, 0.0))}\n\n r_2_xyz = {'symbols': ('C', 'Br', 'H', 'H', 'H'), 'isotopes': (12, 79, 1, 1, 1), 'coords': (\n (-0.18386469024502916, -0.0018692264481234688, 0.0013619971891954718),\n (1.7508998155803106, 0.017800204658373744, -0.01296995950979447),\n (-0.5218757573028803, -0.6458197160504338, -0.8118262063895171),\n (-0.5338693855859405, 1.0212985296781085, -0.14294057406667127),\n (-0.5112899824464621, -0.3914097918379277, 0.9663747427767874))}\n\n p_1_xyz = {'symbols': ('O', 'Br', 'H'), 'isotopes': (16, 79, 1), 'coords': (\n (-0.3691040522383542, 0.44403140947953346, 0.0), (1.3490312999095744, -0.1319682267704319, 0.0),\n (-0.9799272476712202, -0.31206318270910166, 0.0))}\n\n p_2_xyz = {'symbols': ('C', 'H', 'H', 'H'), 'isotopes': (12, 1, 1, 1), 'coords': (\n (3.3746019998564553e-09, 5.828827384106545e-09, -4.859105107686622e-09),\n (1.0669051052331406, -0.17519582095514982, 0.05416492980439295),\n (-0.6853171627400634, -0.8375353626879753, -0.028085652887100996),\n (-0.3815879458676787, 1.0127311778142964, -0.026079272058187608))}\n\n r_1 = ARCSpecies(label='r1', smiles='[O][H]', xyz=r_1_xyz)\n r_2 = ARCSpecies(label='r2', smiles='[CH3]Br', xyz=r_2_xyz)\n p_1 = ARCSpecies(label='p1', smiles='OBr', xyz=p_1_xyz)\n p_2 = ARCSpecies(label='p2', smiles='[CH3]', xyz=p_2_xyz)\n\n rxn1 = ARCReaction(r_species=[r_1, r_2], p_species=[p_1, p_2])\n rxn1.determine_family(self.rmgdb)\n atom_map = rxn1.atom_map\n self.assertEqual(atom_map[:4],[0,2,3,1])\n self.assertIn(atom_map[4], [4,5,6])\n self.assertIn(atom_map[5], [4, 5, 6])\n self.assertIn(atom_map[6], [4, 5, 6])\n self.assertTrue(check_atom_map(rxn))\n\n # [H] + CC(=O)Br <=> [H][Br] + C[C](=O)\n r_1_xyz = {'symbols': ('H',), 'isotopes': (1,), 'coords': ((0.0, 0.0, 0.0),)}\n\n r_2_xyz = {'symbols': ('C', 'C', 'O', 'Br', 'H', 'H', 'H'), 'isotopes': (12, 12, 16, 79, 1, 1, 1), 'coords': (\n (-0.7087772076387326, -0.08697184565826255, 0.08295914062572969),\n (0.7238141593293749, 0.2762480677183181, -0.14965326856248656),\n (1.1113560248255752, 1.3624373452907719, -0.554840372311578),\n (2.0636725443687616, -1.041297021241265, 0.20693447296577364),\n (-0.9844931733249197, -0.9305935329026733, -0.5546432084044857),\n (-0.8586221633621384, -0.3455305862905263, 1.134123935245044),\n (-1.3469501841979155, 0.7657075730836449, -0.16488069955797996))}\n\n p_1_xyz = {'symbols': ('C', 'C', 'O', 'H', 'H', 'H'), 'isotopes': (12, 12, 16, 1, 1, 1), 'coords': (\n (-0.4758624005470258, 0.015865899777425058, -0.11215987340300927),\n (0.9456990856850401, -0.031530842469194666, 0.2228995599390481),\n (2.0897646616994816, -0.06967555524967288, 0.492553667108967),\n (-1.08983188764878, -0.06771143046366379, 0.7892594299969324),\n (-0.7261604551815313, 0.9578749227991876, -0.6086176800339509),\n (-0.7436090040071672, -0.8048229943940851, -0.7839351036079769))}\n\n p_2_xyz = {'symbols': ('Br', 'H'), 'isotopes': (79, 1),\n 'coords': ((0.7644788559644482, 0.0, 0.0), (-0.7644788559644482, 0.0, 0.0))}\n\n r_1 = ARCSpecies(label='r1', smiles='[H]', xyz=r_1_xyz)\n r_2 = ARCSpecies(label='r2', smiles='CC(=O)Br', xyz=r_2_xyz)\n p_1 = ARCSpecies(label='p1', smiles='C[C](=O)', xyz=p_1_xyz)\n p_2 = ARCSpecies(label='p2', smiles='[Br][H]', xyz=p_2_xyz)\n\n rxn = ARCReaction(r_species=[r_1, r_2], p_species=[p_2, p_1])\n rxn.determine_family(self.rmgdb)\n atom_map=rxn.atom_map\n self.assertEqual(atom_map[:5], [1, 2, 3, 4, 0])\n self.assertIn(tuple(atom_map[5:]), permutations([5, 6, 7]))\n self.assertTrue(check_atom_map(rxn))\n\n #Change Order [H] + CC(=O)Br <=> C[C](=O) + [H][Br]\n r_1 = ARCSpecies(label='r1', smiles='[H]', xyz=r_1_xyz)\n r_2 = ARCSpecies(label='r2', smiles='CC(=O)Br', xyz=r_2_xyz)\n p_1 = ARCSpecies(label='p1', smiles='C[C](=O)', xyz=p_1_xyz)\n p_2 = ARCSpecies(label='p2', smiles='[H][Br]', xyz=p_2_xyz)\n\n rxn = ARCReaction(r_species=[r_1, r_2], p_species=[p_1, p_2])\n rxn.determine_family(self.rmgdb)\n atom_map=rxn.atom_map\n self.assertEqual(atom_map[:5], [7, 0, 1, 2, 6])\n self.assertIn(tuple(atom_map[5:]), list(permutations([3, 4, 5])))\n self.assertTrue(check_atom_map(rxn))\n\n # [O] + CC(Cl)(Cl)C(Cl)(Cl)Cl <=> [O][Cl] + C[C](Cl)C(Cl)(Cl)Cl\n smiles = ['[O]', 'CC(Cl)(Cl)C(Cl)(Cl)Cl', '[O][Cl]', 'C[C](Cl)C(Cl)(Cl)Cl']\n r_1_xyz = {'symbols': ('O',), 'isotopes': (16,), 'coords': ((0.0, 0.0, 0.0),)}\n \n r_2_xyz = {'symbols': ('C', 'C', 'Cl', 'Cl', 'C', 'Cl', 'Cl', 'Cl', 'H', 'H', 'H'),\n 'isotopes': (12, 12, 35, 35, 12, 35, 35, 35, 1, 1, 1), 'coords': (\n (-1.3340513332954889, 0.2811635614535751, -0.078045907046801),\n (-0.06460593375936133, -0.5810773314093911, -0.02962891425941322),\n (-0.2609310384494481, -1.7354943987581986, 1.3623405448734305),\n (-0.06523629769352735, -1.6097818007913829, -1.5298182298699716),\n (1.2568349080206898, 0.251354210359208, 0.09596787533379413),\n (2.7373740437547514, -0.7858820942054363, 0.1510602855327231),\n (1.4729373085674606, 1.396702908938121, -1.2920641361183987),\n (1.2776463867390788, 1.2712465700052025, 1.5941477468638563),\n (-1.3327512075949484, 0.9633461541030465, -0.9346702675682734),\n (-2.235286345856216, -0.338363905821591, -0.1659562352150731),\n (-1.45193049043298, 0.886786126126846, 0.8266672374741411))}\n \n p_1_xyz = {'symbols': ('O', 'Cl'), 'isotopes': (16, 35),\n 'coords': ((0.8407400963991551, 0.0, 0.0), (-0.8407400963991551, 0.0, 0.0))}\n \n p_2_xyz = {'symbols': ('C', 'C', 'Cl', 'C', 'Cl', 'Cl', 'Cl', 'H', 'H', 'H'),\n 'isotopes': (12, 12, 35, 12, 35, 35, 35, 1, 1, 1), 'coords': (\n (-1.3826664358998055, -0.04852445131046896, -0.016935550260331302),\n (-0.01984344739858957, 0.5351447284412386, 0.14069644461529232),\n (0.06780252918727915, 2.0178457939896477, 1.0316373428560468),\n (1.240695333262242, -0.22627953918952265, -0.15010504208991474),\n (2.5003017492701316, 0.8385176202279041, -0.8511606324628386),\n (1.8619474142609682, -0.9616513146239644, 1.3591396432655138),\n (0.9630230000989414, -1.5484613928720057, -1.3347069863893728),\n (-1.4535219021739985, -1.0095075283181074, 0.502205010423143),\n (-2.1607091682952886, 0.6031752006499635, 0.39420249485619346),\n (-1.6170290723118037, -0.20025911699469934, -1.0749727248137075))}\n \n r_1 = ARCSpecies(label='r1', smiles=smiles[0], xyz=r_1_xyz)\n r_2 = ARCSpecies(label='r2', smiles=smiles[1], xyz=r_2_xyz)\n p_1 = ARCSpecies(label='p1', smiles=smiles[2], xyz=p_1_xyz)\n p_2 = ARCSpecies(label='p2', smiles=smiles[3], xyz=p_2_xyz)\n \n rxn1 = ARCReaction(r_species=[r_1, r_2], p_species=[p_1, p_2])\n rxn1.determine_family(self.rmgdb)\n atom_map = rxn1.atom_map\n self.assertEqual(atom_map[:3],[0,2,3])\n self.assertIn(atom_map[3:5],[[1,4],[4,1]])\n self.assertEqual(atom_map[5],5)\n self.assertIn(atom_map[6], [6,7,8])\n self.assertIn(atom_map[7], [6, 7, 8])\n self.assertIn(atom_map[8], [6, 7, 8])\n self.assertIn(atom_map[9], [9, 10, 11])\n self.assertIn(atom_map[10], [9, 10, 11])\n self.assertIn(atom_map[11], [9, 10, 11])\n self.assertTrue(check_atom_map(rxn1))", "title": "" }, { "docid": "4396e3b99e8e34fca5c6b7be278751a1", "score": "0.49343368", "text": "def geo_position(splice_junctions):\n u5='UCAUUUUCCGC'\n geo_position={}\n pair=''\n \n Watson_Crick=[0,0,0,0,0,0,0,0,0,0,0]\n isosteric=[0,0,0,0,0,0,0,0,0,0,0]\n different=[0,0,0,0,0,0,0,0,0,0,0]\n\n for j in splice_junctions:\n n=0\n while n<11:\n \n pair=str(j[n])+str(u5[n])\n\n if pair=='GC' or pair=='CG' or pair=='AU' or pair=='TA':\n Watson_Crick[n]+=1\n elif pair=='AA' or pair=='GA' or pair=='GG' or pair=='AG' or pair=='CC':\n different[n]+=1\n else:\n isosteric[n]+=1\n n+=1\n if n==11:\n break\n \n geo_position['Watson_Crick']=Watson_Crick\n geo_position['isosteric']=isosteric\n geo_position['different']=different\n\n print(geo_position)\n return(geo_position)", "title": "" }, { "docid": "330872bf27e699648fa8b3652edebb56", "score": "0.49271697", "text": "def run_ortholog_mapping(species1, species2, species1_path,species2_path,out_folder): \n\teValue = '1e-5'\n\tminimumCoverage = 0.5\n\n\t# Run BLAST\n\tblast_results_path= os.path.join(out_folder,species1+ '-' + species2 + '-blast_stats.best.txt')\n\tblast_coverage_cutoff_path= os.path.join(out_folder,species1+ '-' + species2 + '-blast_stats_coverageCutoff.best.txt')\n\tif not (os.path.exists(species1_path) and os.path.exists(species2_path)):\n\t\traise UserWarning('Input aa sequence file does not exist for '+species1+\" or \"+species2)\n\tif not os.path.exists(os.path.join(out_folder,species1+'-'+species2+'-blast_stats.best.txt')):\n\t\tprint (\"> Running BLAST for \"+species1+\" against \"+species2)\n\t\trun_blast(species1_path, species2_path, blast_results_path, eValue)\n\telse:\n\t\tprint (\"BLAST of \"+species1+\" against \"+species2+\" already done. Skipping.\")\n\n\t# Apply coverage cutoff\n\tif not os.path.exists(blast_coverage_cutoff_path):\n\t\toFormat = lambda a, b, e: ('\\t'.join([b]+a.split('_')+[str(e)])+'\\n')\t# Format wanted for the output: a the target name, b the query name, and e the evalue in the blast results file\n\t\tcoverage_cutoff_on_blast(blast_results_path,blast_coverage_cutoff_path, minimumCoverage,oFormat)\n\telse: \n\t\tprint (\"Curation of BLAST results of \"+species1+\" against \"+species2+\" already done. Skipping.\")\n\n\treturn", "title": "" }, { "docid": "47d35ed55791e3bd8318bb280efbaec7", "score": "0.49214986", "text": "def region_exposure(region,include_storms=True,event_set=False,sens_analysis_storms=[],save=True): \n country = region[:2]\n \n data_path = load_config()['paths']['data'] \n \n osm_path = os.path.join(data_path,'OSM','{}.osm.pbf'.format(country))\n \n area_poly = os.path.join(data_path,country,'NUTS3_POLY','{}.poly'.format(region))\n area_pbf = os.path.join(data_path,country,'NUTS3_OSM','{}.osm.pbf'.format(region))\n\n if (region == 'UKN01') | (region == 'UKN02') | (region == 'UKN03') | (region == 'UKN04') | (region == 'UKN05'):\n osm_path = os.path.join(data_path,'OSM','IE.osm.pbf') \n \n clip_osm(data_path,osm_path,area_poly,area_pbf) \n \n gdf_table = fetch_buildings(data_path,country,region,regional=True)\n \n print ('Fetched all buildings from osm data for {}'.format(region))\n\n # convert to european coordinate system for overlap\n gdf_table = gdf_table.to_crs(epsg=3035)\n \n print(len(gdf_table))\n\n # Specify Country\n gdf_table[\"COUNTRY\"] = country\n \n # give unique_id \n gdf_table['ID_'] = [str(x)+'_'+region for x in gdf_table.index]\n \n # Calculate area\n gdf_table[\"AREA_m2\"] = gdf_table.geometry.area\n\n # Determine centroid\n gdf_table[\"centroid\"] = gdf_table.geometry.centroid\n\n nuts_eu = gpd.read_file(os.path.join(data_path,'input_data','NUTS3_ETRS.shp'))\n\n nuts_eu.loc[nuts_eu['NUTS_ID']==region].to_file(os.path.join(data_path,\n country,'NUTS3_SHAPE','{}.shp'.format(region)))\n\n # create geometry envelope outline for rasterstats. Use a buffer to make sure all buildings are in there.\n geoms = [mapping(nuts_eu.loc[nuts_eu['NUTS_ID']==region].geometry.envelope.buffer(10000).values[0])]\n\n # Get land use values \n with rio.open(os.path.join(data_path,'input_data','g100_clc12_V18_5.tif')) as src:\n out_image, out_transform = mask(src, geoms, crop=True)\n out_image = out_image[0,:,:]\n tqdm.pandas(desc='CLC_2012_'+region)\n gdf_table['CLC_2012'] = gdf_table.centroid.progress_apply(lambda x: get_raster_value(x,out_image,out_transform))\n\n # Obtain storm values for sensitivity analysis storms \n if len(sens_analysis_storms) > 0:\n storm_list = load_sens_analysis_storms(sens_analysis_storms)\n for outrast_storm in storm_list:\n storm_name = str(int2date(get_num(outrast_storm[-23:].split('_')[0][:-2])))\n tqdm.pandas(desc=storm_name+'_'+region)\n with rio.open(outrast_storm) as src:\n out_image, out_transform = mask(src, geoms, crop=True)\n out_image = out_image[0,:,:]\n gdf_table[storm_name] = gdf_table.centroid.progress_apply(lambda x: get_raster_value(x,out_image,out_transform))\n\n # Obtain storm values for historical storms\n elif (include_storms == True) & (event_set == False):\n storm_list = get_storm_list(data_path)\n for outrast_storm in storm_list:\n storm_name = str(int2date(get_num(outrast_storm[-23:].split('_')[0][:-2])))\n tqdm.pandas(desc=storm_name+'_'+region)\n with rio.open(outrast_storm) as src:\n out_image, out_transform = mask(src, geoms, crop=True)\n out_image = out_image[0,:,:]\n gdf_table[storm_name] = gdf_table.centroid.progress_apply(lambda x: get_raster_value(x,out_image,out_transform))\n gdf_table[storm_name].loc[gdf_table[storm_name] < 0] = 0 \n gdf_table[storm_name].loc[gdf_table[storm_name] > 500] = 0 \n\n # Obtain storm values for event set storms\n elif (include_storms == True) & (event_set == True):\n #geoms = [mapping(nuts_eu.loc[nuts_eu['NUTS_ID']==region].to_crs({'init': 'epsg:4326'}).geometry.envelope.buffer(0.1).values[0])]\n storm_list = get_event_storm_list(data_path)[:10]\n for outrast_storm in tqdm(storm_list,total=len(storm_list),desc=region):\n storm_name = str(int2date(get_num(outrast_storm[-24:].split('_')[0][:-4])))\n with rio.open(outrast_storm) as src:\n out_image = src.read(1)\n out_transform = src.transform\n gdf_table[storm_name] = gdf_table.centroid.apply(lambda x: get_raster_value(x,out_image,out_transform))\n\n if save == True:\n df_exposure = pd.DataFrame(gdf_table)\n df_exposure.to_csv(os.path.join(data_path,'output_exposure',country,'{}_exposure.csv'.format(region))) \n\n print ('Obtained all storm information for {}'.format(region))\n\n return gdf_table", "title": "" }, { "docid": "614f88b9d78585e5c40cb77a177c353f", "score": "0.49162424", "text": "def output_nir_user_openwater(cfg, cells):\n ### Extract the user data from the reference shape file ###\n # Open the shapefile\n o_shapefile = shapefile.Reader(cfg.output_nir_reference_shapefile, 'r')\n\n # Get the attribute table\n sl_fields = o_shapefile.records()\n\n # Close the shapefile\n del o_shapefile\n\n ### Find the total acreage of each crop type in each ET zone ###\n # Get the unique crop types in the file\n sl_crop_types = list(set([sl_fields[x][1] for x in range(0, len(sl_fields), 1)]))\n\n # Get the unique et zones in the file\n ia_et_zones = np.unique(np.array(([sl_fields[x][4] for x in range(0, len(sl_fields), 1)])).astype(int))\n\n # Create a matrix of size et zones by crop types\n dm_crop_acreages = np.zeros((len(ia_et_zones), len(sl_crop_types)))\n\n # Loop and fill the matrix\n ia_column_indices = np.array([sl_crop_types.index(sl_fields[x][1]) for x in range(0, len(sl_fields), 1)])\n ia_row_indices = np.array([np.argwhere(ia_et_zones == int(sl_fields[x][4])).flatten()[0]\n for x in range(0, len(sl_fields), 1)])\n\n for x in range(0, len(sl_fields), 1):\n dm_crop_acreages[ia_row_indices[x], ia_column_indices[x]] += sl_fields[x][2]\n\n ### Get the crop type NIR contribution from the ET zones ###\n # Crack the dictionary into items and keys to make iteration easier\n sl_cell_keys = np.array(list(cells.et_cells_data.keys())).astype(int)\n sl_cell_items = list(cells.et_cells_data.values())\n\n # Determine the size of the data matrix\n i_number_of_timesteps = np.max([x.nir.shape[0] for x in sl_cell_items if hasattr(x, 'nir')])\n da_dates = np.array([x.nir.index for x in sl_cell_items if hasattr(x, 'nir') and len(x.nir.index) == i_number_of_timesteps][0])\n\n # Create a matrix to hold the nir for each et zone of shape et zones by crop types\n dm_zone_nir = np.zeros((len(cells.et_cells_data), i_number_of_timesteps))\n\n # Loop on the cells and fill the NIR requirement\n for i_entry_zone in range(0, len(cells.et_cells_data), 1):\n # Check if the crop nir has been set in cell\n if hasattr(sl_cell_items[i_entry_zone], 'crop2nir'):\n # The correct field exists. Process the data into the\n dm_zone_nir[i_entry_zone, :] = sl_cell_items[i_entry_zone].nir['et'].values - \\\n sl_cell_items[i_entry_zone].nir['ppt'].values\n\n ### Convert the total NIR to per acre amounts ###\n # Find the correct column for each of the shapefile crop types\n ia_shapefile_crop_columns = np.zeros(len(sl_crop_types)).astype(int)\n ia_crop_types = np.zeros(len(sl_crop_types))\n\n for i_entry_crop in range(0, len(sl_crop_types), 1):\n # Get the crop type number from the cross-mapping file\n i_crop_type = cfg.output_nir_crop_mapping[sl_crop_types[i_entry_crop]]\n\n # Set the crop type into the array\n ia_crop_types[i_entry_crop] = i_crop_type\n\n # Loop for the index in the crop numbers array and set into the column vect\n ia_shapefile_crop_columns[i_entry_crop] = np.argwhere(np.array(cfg.crop_type_numbers) == i_crop_type).flatten()[0]\n\n # Filter out the indices that are within the output target\n ba_indices2keep = np.in1d(ia_crop_types, cfg.output_nir_user_openwater)\n\n # Get the total areas\n da_crop_acreages = np.sum(np.atleast_2d(dm_crop_acreages[:, ba_indices2keep]), axis=1)\n\n for i_entry_zone in range(0, len(cells.et_cells_data), 1):\n # Check if the crop nir has been set in cell\n if hasattr(sl_cell_items[i_entry_zone], 'crop2nir'):\n # Check if acreage data exists for the zone\n if sl_cell_keys[i_entry_zone] in ia_et_zones:\n # Align the ET demands zone index to the shape file index\n i_shape_file_index = np.argwhere(ia_et_zones == sl_cell_keys[i_entry_zone]).flatten()[0]\n\n # Loop and divide by the acreage\n mid = 1 / da_crop_acreages[i_shape_file_index]\n if np.isinf(mid):\n mid = 0\n\n dm_zone_nir[i_entry_zone, :] *= mid\n\n else:\n # Zero out all values in the row to eliminate the contribution of the zone\n dm_zone_nir[i_entry_zone, :] = 0\n\n ### Sum across the users ###\n # Get the unique users\n sl_unique_users = list(set([sl_fields[x][3] for x in range(0, len(sl_fields), 1)]))\n\n # Create a vector to hold the aggregated information\n da_unique_users_nir = np.zeros((i_number_of_timesteps, len(sl_unique_users)))\n\n # Loop on the attribute table and fill volumes\n for i_entry_record in range(0, len(sl_fields), 1):\n\n # Unpack values from the row\n s_user = sl_fields[i_entry_record][3]\n i_crop_type = cfg.output_nir_crop_mapping[sl_fields[i_entry_record][1]]\n i_zone = sl_fields[i_entry_record][4]\n i_acres = sl_fields[i_entry_record][2]\n\n # Get the crop index within the ET demands tool data\n i_crop_index = np.argwhere(np.array(cfg.crop_type_numbers) == i_crop_type).flatten()[0]\n\n # Filter by the crop type\n if i_crop_type in cfg.output_nir_user_openwater:\n # Get the zone index within the ET demands data\n if i_zone in sl_cell_keys:\n # Match the zone index\n i_zone_index = np.argwhere(sl_cell_keys == i_zone).flatten()[0]\n\n # Find the user index\n i_user_index = sl_unique_users.index(s_user)\n\n # Add to the output time series, multiplying by the acres\n da_unique_users_nir[:, i_user_index] += dm_zone_nir[i_zone_index, :] * i_acres\n\n else:\n print('ET Zone ' + str(i_zone) + ' from shapefile is not present in the model run.')\n\n ### Output to the text file ###\n if not os.path.isdir(os.path.join(cfg.project_ws, 'accumulated_user_nir')):\n os.mkdir(os.path.join(cfg.project_ws, 'accumulated_user_nir'))\n\n for i_entry_user in range(0, len(sl_unique_users), 1):\n # Extract and filter the data\n da_data = da_unique_users_nir[:, i_entry_user]\n da_data[np.fabs(da_data) < 1e-6] = 0\n\n # Create a pandas series\n ds_user = pd.Series(data=da_data, index=da_dates)\n\n # Write the series to a file\n ds_user.to_csv(os.path.join(cfg.project_ws, 'accumulated_user_nir', sl_unique_users[i_entry_user].replace(':', '') + '_water.txt'),\n sep='\\t', header=False)", "title": "" }, { "docid": "5844220b4b6fd3d5b8bce9632547cdb5", "score": "0.49089292", "text": "def make_maps(ccdfile, nside, bands, catalog, outputdir):\n mode = 1 # 1: fully sequential, 2: parallel then sequential, 3: fully parallel\n\n\n # Each each tuple has [(quantity to be projected, weighting scheme, operation),(etc..)] \n propertiesToKeep = [ 'filter', 'mjd_obs', 'airmass',\n 'ra', 'dec', 'ra0','ra1','ra2','ra3','dec0','dec1','dec2','dec3']\n \n propertiesandoperations = [ ('nobs', '', 'mean'),\n ('airmass', '', 'mean'),\n ('mjd_obs', '', 'min'),\n ('mjd_obs', '', 'mean'),\n ('mjd_obs', '', 'max')] \n \n tbdata = ft.read(ccdfile)\n columns = tbdata.dtype.names \n nobs = np.ones(tbdata.size) # nobs is missing\n \n # Obtain indices that satisfy filter / photometric cuts \n sample_names = []\n inds = []\n \n for band in bands: \n \n good = tbdata['filter'] == band\n if 'photometric' in columns:\n good &= tbdata['photometric'] == True\n if 'bitmaks' in columns:\n good &= tbdata['bitmask'] == 0 \n \n if good.sum() > 0:\n inds.append(np.argwhere(good).flatten())\n sample_names.append('band_%s'%band)\n else:\n print(f'there is no {band} in the ccd file')\n \n # Create big table with all relevant properties. \n tbdata = np.core.records.fromarrays([tbdata[prop] for prop in propertiesToKeep] + [nobs],\n names = propertiesToKeep + [ 'nobs'])\n \n # Read the table, create Healtree, project it into healpix maps, and write these maps.\n project_and_write_maps_simp(mode, propertiesandoperations, tbdata, catalog, \n outputdir, sample_names, inds, nside)", "title": "" }, { "docid": "26567eed17617b504d5d0f569582ea23", "score": "0.49073857", "text": "def _from_latlon(latitude, longitude, force_zone_number):\n easting = []\n northing = []\n\n for i in range(longitude.shape[0]):\n lat_rad = math.radians(latitude[i])\n lat_sin = math.sin(lat_rad)\n lat_cos = math.cos(lat_rad)\n\n lat_tan = lat_sin / lat_cos\n lat_tan2 = lat_tan * lat_tan\n lat_tan4 = lat_tan2 * lat_tan2\n\n if(force_zone_number < 0):\n zone_number = latlon_to_zone_number(latitude[i], longitude[i])\n\n else:\n zone_number = force_zone_number\n\n lon_rad = math.radians(longitude[i])\n central_lon = (zone_number - 1) * 6 - 180 + 3\n central_lon_rad = math.radians(central_lon)\n\n n = R / math.sqrt(1 - E * lat_sin**2)\n c = E_P2 * lat_cos**2\n\n a = lat_cos * (lon_rad - central_lon_rad)\n a2 = a * a\n a3 = a2 * a\n a4 = a3 * a\n a5 = a4 * a\n a6 = a5 * a\n\n m = R * (M1 * lat_rad -\n M2 * math.sin(2 * lat_rad) +\n M3 * math.sin(4 * lat_rad) -\n M4 * math.sin(6 * lat_rad))\n\n easting.append(K0 * n * (a +\n a3 / 6 * (1 - lat_tan2 + c) +\n a5 / 120 * (5 - 18 * lat_tan2 + lat_tan4 + 72 * c - 58 * E_P2)) + 500000)\n\n northing.append(K0 * (m + n * lat_tan * (a2 / 2 +\n a4 / 24 * (5 - lat_tan2 + 9 * c + 4 * c**2) +\n a6 / 720 * (61 - 58 * lat_tan2 + lat_tan4 + 600 * c - 330 * E_P2))))\n\n if latitude[i] < 0:\n northing[-1] += 10000000\n\n return easting, northing, zone_number", "title": "" }, { "docid": "3c9906bdd1da353b24c83d7d77b11f51", "score": "0.48919928", "text": "def change2geocentric(self):\n\n gdl = self.lat*Misc_Routines.CoFactors.d2r\n slat = numpy.sin(gdl)\n clat = numpy.cos(gdl)\n slat2 = slat**2.\n clat2 = (self.ab2*clat)**2.\n \n sbet = slat/numpy.sqrt(slat2 + clat2)\n sbet2 = (sbet**2.) # < 1\n noval = numpy.where(sbet2>1)\n if noval[0].size>0:sbet2[noval] = 1\n cbet = numpy.sqrt(1. - sbet2)\n \n rgeoid = self.a/numpy.sqrt(1. + self.ep2*sbet2)\n \n x = rgeoid*cbet + self.alt*clat\n y = rgeoid*sbet + self.alt*slat\n\n gcalt = numpy.sqrt(x**2. + y**2.)\n gclat = numpy.arctan2(y,x)/Misc_Routines.CoFactors.d2r\n \n return gclat, gcalt", "title": "" }, { "docid": "15baf6537a453c772e1e14c172d0fa94", "score": "0.48804936", "text": "def segmentations_to_2Djuice(by_chr_dct, outfile, resolution):\n\n with open(outfile, 'w') as outf:\n outf.write(\"\\t\".join(\"chr1 x1 x2 chr2 y1 y2 color comment\".split())+\"\\n\")\n for k in by_chr_dct.keys():\n for i in by_chr_dct[k]:\n line = [k, str(i[0]*resolution),str((i[1])*resolution), k, str(i[0]*resolution),str((i[1])*resolution), \"0,0,255\", \"TAD\"]\n outf.write(\"\\t\".join(line) + \"\\n\")", "title": "" }, { "docid": "fd56438c5880f19ac15c4828b38693c7", "score": "0.48776937", "text": "def _prepare_orb_dict(self):\n # adict: dictionary of {'Fe': ['dxy', 'dyz', ...], ...}\n adict = OrderedDict()\n # orb_dict: {ind_atom:[ind_orb,1,2], ...}\n self.orb_dict = {}\n # labels: {0:{dxy, ...}}\n self.labels = {}\n # magnetic atoms index\n self.ind_mag_atoms = []\n\n sdict = symbol_number(self.atoms)\n\n for i, symbol in enumerate(self.basis):\n if i not in self.exclude_orbs:\n # e.g. Fe2, dxy, _, _\n atom_sym, orb_sym = symbol.split('|')[:2]\n if atom_sym in adict:\n adict[atom_sym].append(orb_sym)\n else:\n adict[atom_sym] = [orb_sym]\n iatom = sdict[atom_sym]\n if iatom not in self.orb_dict:\n self.orb_dict[iatom] = [i]\n self.labels[iatom] = [orb_sym]\n else:\n self.orb_dict[iatom] += [i]\n self.labels[iatom] += [orb_sym]\n\n # index of magnetic atoms\n for i, sym in enumerate(self.atoms.get_chemical_symbols()):\n if sym in self.magnetic_elements:\n self.ind_mag_atoms.append(i)\n\n self._spin_dict = {}\n self._atom_dict = {}\n for ispin, iatom in enumerate(self.ind_mag_atoms):\n self._spin_dict[iatom] = ispin\n self._atom_dict[ispin] = iatom", "title": "" }, { "docid": "982f78a572dadf44c426d94bf813157b", "score": "0.48709306", "text": "def read_rmm_eofs(olrfile, u850file, u200file):\n\n # observed EOFs from BOM Australia are saved in individual text files for each variable\n # horizontal resolution of EOFs is 2.5 degree and longitudes go from 0 - 375.5, column1 is eof1\n # column 2 is eof2 in each file\n EOF1 = xr.DataArray(np.empty([3,144]),dims=['var','lon'],\n coords={'var':['olr','u850','u200'], 'lon':np.arange(0,360,2.5)})\n EOF2 = xr.DataArray(np.empty([3,144]),dims=['var','lon'],\n coords={'var':['olr','u850','u200'], 'lon':np.arange(0,360,2.5)})\n nlon = len(EOF1['lon'])\n\n tmp = pd.read_csv(olrfile, header=None, delim_whitespace=True, names=['eof1','eof2'])\n EOF1[0,:] = tmp.eof1.values\n EOF2[0,:] = tmp.eof2.values\n tmp = pd.read_csv(u850file, header=None, delim_whitespace=True, names=['eof1','eof2'])\n EOF1[1,:] = tmp.eof1.values\n EOF2[1,:] = tmp.eof2.values\n tmp = pd.read_csv(u200file, header=None, delim_whitespace=True, names=['eof1','eof2'])\n EOF1[2,:] = tmp.eof1.values\n EOF2[2,:] = tmp.eof2.values\n\n return EOF1, EOF2", "title": "" }, { "docid": "bbe1640056bb0f6d2d1fb995fcce7347", "score": "0.48633295", "text": "def fov_map(self) -> Mapping[int, str]:\n return {\n 0: \"53\",\n 1: \"75\",\n 2: \"106\",\n }", "title": "" }, { "docid": "5b41d2ff1928b89df82ad3ffa1ba36b0", "score": "0.48604083", "text": "def overlap_feature(te_fdb, teg_fdb, rib_fdb, oth_fdb, chr_id):\n te_index=dict()\n for element in te_fdb[chr_id].items(): \n orientation=None\n for region, strand in oth_fdb[chr_id].items():\n if (element[0][1]<=region[0] and element[0][2]>=region[1]) or (element[0][1]>=region[0] and element[0][1]<= region[1]) or (element[0][2]>=region[0] and element[0][2]<=region[1]):\n orientation=strand\n break\n if orientation:\n te_index[element[0][0]]=orientation\n else:\n if chr_id in rib_fdb:\n for region, strand in rib_fdb[chr_id].items():\n if (element[0][1]<=region[0] and element[0][2]>=region[1]) or (element[0][1]>=region[0] and element[0][1]<= region[1]) or (element[0][2]>=region[0] and element[0][2]<=region[1]):\n orientation=strand\n break\n te_index[element[0][0]]=orientation\n return te_index", "title": "" }, { "docid": "f257bc17f947ac45012a2d2f02eeb296", "score": "0.48466218", "text": "def update_homology_map(pdbDir, homologsPath, curatedChains, curatedHomologs):\n\tprint (\"----------- Update homology map -----------\")\n\tif os.path.exists(curatedHomologs):\n\t\tprint ('Curated Yeast ORF - PDB homologs file already exists. Skipping.')\n\t\treturn\n\tokChains = homolog_chains(pdbDir, curatedChains)\n\t\n\t# Intersect curated chains with yeastlike\n\twith open(homologsPath) as fhIn:\n\t\twith open(curatedHomologs, 'w') as fhOut:\n\t\t\tfor line in fhIn:\n\t\t\t\tline = line.strip() # Remove \\n\n\t\t\t\torf, pdbid, chain, evalue = line.split()\n\t\t\t\tif pdbid in okChains and chain in okChains[pdbid]:\n\t\t\t\t\tfhOut.write(line + '\\n')\n\treturn", "title": "" }, { "docid": "2602290c2fd6eebd652607f6d7940ed2", "score": "0.4845031", "text": "def map_intergenic_regions(con, speciesid, intergenic_path): \n \n print \"\\n. Mapping intergenic regions for species\", get_species_name(speciesid, con), \" using intergenic file\", intergenic_path\n \n cur = con.cursor()\n \n fin = open(intergenic_path, \"r\")\n for l in fin.xreadlines():\n tokens = l.split()\n if tokens.__len__() < 5:\n continue\n chrom_name = tokens[0]\n chromid = get_chrom_id(con, chrom_name, speciesid, make_if_missing = False)\n \n start = int(tokens[1])\n stop = int(tokens[2])\n gene_name = tokens[3]\n strand = tokens[4]\n \n geneid = None\n sql = \"select id from Genes where name='\" + gene_name + \"' and chrom=\" + chromid.__str__()\n cur.execute(sql)\n x = cur.fetchone()\n if x == None or x.__len__() == 0:\n print \"\\n. I cannot find a Gene entry for the gene named\", gene_name, \"in your intergenic file.\"\n continue\n geneid = x[0]\n \n sql = \"insert or replace into Intergenics (chromid, start, stop, strand, geneid)\"\n sql += \" values(\" + chromid.__str__() + \",\" + start.__str__() + \",\"\n sql += stop.__str__() + \",'\" + strand.__str__() + \"',\" + geneid.__str__()\n sql += \")\"\n cur.execute(sql)\n con.commit()\n \n chromids = get_chrom_ids(con, speciesid)\n for chromid in chromids: \n sql = \"select count(*) from Intergenics where chromid=\" + chromid.__str__()\n cur.execute(sql)\n count_inserted = cur.fetchone()[0]\n print \". Found\", count_inserted, \"intergenic regions for chromosome\", get_chrom_name(con, chromid)", "title": "" }, { "docid": "241ad4e0408455e4b4c21805a7798f2f", "score": "0.48376364", "text": "def orthorectify(self):\n\n # init profiling\n if logger.level == logging.DEBUG:\n tracemalloc.start()\n proc_profile = cProfile.Profile()\n proc_profile.enable()\n\n with rio.Env(GDAL_NUM_THREADS='ALL_CPUs', GDAL_TIFF_INTERNAL_MASK=True):\n dem_min = self._get_dem_min() # get min of DEM over image area\n\n # set up ortho profile based on source profile and predicted bounds\n with rio.open(self._src_im_filename, 'r') as src_im:\n ortho_profile = src_im.profile\n\n ortho_bl, ortho_tr = self._get_ortho_bounds(dem_min=dem_min) # find extreme case (z=dem_min) image bounds\n ortho_wh = np.int32(np.ceil(np.abs((ortho_bl - ortho_tr).squeeze()[:2] / self.resolution))) # image size\n\n ortho_transform = rio.transform.from_origin(ortho_bl[0], ortho_tr[1], self.resolution[0],\n self.resolution[1])\n ortho_profile.update(nodata=self.nodata, tiled=True, blockxsize=self.tile_size[0],\n blockysize=self.tile_size[1], transform=ortho_transform, width=ortho_wh[0],\n height=ortho_wh[1], num_threads='all_cpus')\n\n # overwrite source attributes in ortho_profile where config is not None\n attrs_to_check = ['driver', 'dtype', 'compress', 'interleave', 'photometric']\n for attr in attrs_to_check:\n val = getattr(self, attr)\n if val is not None:\n ortho_profile[attr] = val\n\n # work around an apparent gdal issue with writing masks, building overviews and non-jpeg compression\n if self.write_mask and ortho_profile['compress'] != 'jpeg':\n self.write_mask = False\n logger.warning('Setting write_mask=False, write_mask=True should only be used with compress=jpeg')\n\n # reproject and resample DEM to ortho bounds, CRS and grid\n with rio.open(self._dem_filename, 'r') as dem_im:\n dem_array = np.zeros((ortho_wh[1], ortho_wh[0]), 'float32')\n reproject(rio.band(dem_im, self.dem_band), dem_array, dst_transform=ortho_transform,\n dst_crs=ortho_profile['crs'], resampling=self.dem_interp, src_transform=dem_im.transform,\n src_crs=dem_im.crs, num_threads=multiprocessing.cpu_count(), dst_nodata=self.nodata,\n init_dest_nodata=True)\n\n self._remap_src_to_ortho(ortho_profile, dem_array)\n\n if logger.level == logging.DEBUG: # print profiling info\n proc_profile.disable()\n # tottime is the total time spent in the function alone. cumtime is the total time spent in the function\n # plus all functions that this function called\n proc_stats = pstats.Stats(proc_profile).sort_stats('cumtime')\n logger.debug(f'Processing time:')\n proc_stats.print_stats(20)\n\n current, peak = tracemalloc.get_traced_memory()\n logger.debug(f\"Memory usage: current: {current / 10 ** 6:.1f} MB, peak: {peak / 10 ** 6:.1f} MB\")", "title": "" }, { "docid": "296e069364e2ee84f146b73323134daa", "score": "0.4835223", "text": "def map_outer_city(outer_city, nb_lands):\n for outer_area in outer_city:\n outer_area.category = generate_outer_category()", "title": "" }, { "docid": "da3506743e0637fe7dfa0644994391bb", "score": "0.48322153", "text": "def find_all_ORFs(dna):\n final=[]\n# while dna.find != -1:\n for x in range(10):\n final.append(rest_of_ORF(dna[dna.find('ATG'):len(dna)]))\n dna=dna[len(rest_of_ORF(dna)):len(dna)]\n if dna.find('ATG')!=-1:\n dna=dna[dna.find('ATG'):len(dna)]\n else:\n return final\n return final", "title": "" }, { "docid": "88a93d3e2762f1f10c9d42db8c55c9d9", "score": "0.48297298", "text": "def main(**kwargs):\n\n res_dir = 'res_90pc/'\n\n hi_file = _DATA_DIR + res_dir + 'hi_braun.fits'\n co_file = _DATA_DIR + res_dir + 'co_nieten.fits'\n #co_file = DATA_DIR + 'co_carma.fits'\n weights_file = _DATA_DIR + res_dir + 'weights_orig.fits'\n sfr_files = sorted(glob.glob(_DATA_DIR + res_dir+ 'sfr_evo*-*.fits'))#[:14]\n\n regfile = _TOP_DIR + 'ism/project/sf_regions_image.reg'\n regpaths = get_reg_coords(regfile)\n\n # get the gas: HI and CO\n hi_data, hi_hdr = get_data(hi_file)\n co_data, co_hdr = get_data(co_file)\n weights, w_hdr = get_data(weights_file)\n\n dshape = co_data.shape[0], co_data.shape[1]\n pixels = get_pixel_coords(co_data, co_hdr)\n ring_reg = regpaths[0].contains_points(pixels).reshape(dshape)\n inner_reg = regpaths[1].contains_points(pixels).reshape(dshape)\n outer_reg = regpaths[2].contains_points(pixels).reshape(dshape)\n\n # determine pixel area\n dthetax = np.radians(np.abs(hi_hdr['cdelt1']))\n dthetay = np.radians(np.abs(hi_hdr['cdelt2']))\n dx, dy = np.tan(dthetax) * D_M31, np.tan(dthetay) * D_M31\n pix_area = dx * dy / np.cos(INCL)\n\n # get galactocentric distances\n # only need to use one set of data because they're all on the same grid\n rads, theta = get_coords(hi_data, hi_hdr)\n\n # convert gas to surface density\n sigma_hi = convert_to_density(hi_data, 'hi') * weights\n sigma_co = convert_to_density(co_data, 'co') * weights\n\n n_times = len(sfr_files)\n n_regions = len(sigma_hi.flatten())\n\n # set up SFR array\n sfr_array = np.zeros((n_times, n_regions))\n time_bins = np.zeros((n_times, 2))\n for i in range(n_times):\n sfr_data, sfr_hdr = pyfits.getdata(sfr_files[i], header=True)\n ts, te = sfr_files[i].split('/')[-1].rstrip('.fits').split('_')[-1].split('-')\n #if te == '6.7':\n # sfr_data = sfr_data * (10**6.7 - 10**6.6) / 10**6.7\n sfr_array[i,:] = sfr_data.flatten()\n time_bins[i,:] = [float(ts), float(te)]\n\n # compute sfrs in different time bins\n sfr100, t100 = get_avg_sfr(sfr_array, time_bins, tstart=6.6, tstop=8.0)\n sfr10, t10 = get_avg_sfr(sfr_array, time_bins, tstart=6.6, tstop=7.0)\n sfr10_100, t10_100 = get_avg_sfr(sfr_array, time_bins, tstart=7.0,\n tstop=8.0)\n sfr316, t316 = get_avg_sfr(sfr_array, time_bins, tstart=6.6, tstop=8.5)\n sfr400, t400 = get_avg_sfr(sfr_array, time_bins, tstart=6.6, tstop=8.6)\n sfr300_400, t300_400 = get_avg_sfr(sfr_array, time_bins, tstart=8.5,\n tstop=8.6)\n sfr100_400, t100_400 = get_avg_sfr(sfr_array, time_bins, tstart=8.0,\n tstop=8.6)\n sfr30_40, t30_40 = get_avg_sfr(sfr_array, time_bins, tstart=7.5,\n tstop=7.6)\n sfr20_30, t20_30 = get_avg_sfr(sfr_array, time_bins, tstart=7.3,\n tstop=7.5)\n\n sfarray = [sfr10, sfr100, sfr10_100, sfr316, sfr400, sfr300_400,\n sfr100_400, sfr30_40, sfr20_30]\n tarray = [t10, t100, t10_100, t316, t400, t300_400, t100_400, t30_40,\n t20_30]\n\n # select desired sfr time\n for ind in range(len(sfarray)):\n #for ind in [1]:\n sigma_sfr = sfr_array[ind] / pix_area\n sfr_time, t_time = sfarray[ind], np.array(tarray[ind])/1e6\n#sfr10, np.array(t100)/1e6\n sigma_sfr_time = sfr_time / pix_area\n\n # choose only regions where values are finite\n sel = (np.isfinite(sigma_hi.flatten())) & (np.isfinite(sigma_co.flatten())) & (np.isfinite(sigma_sfr_time)) & ((inner_reg.flatten()) | (ring_reg.flatten()) | (outer_reg.flatten()))\n\n #sel = (np.isfinite(sigma_hi.flatten())) & (np.isfinite(sigma_sfr_time)) & ((outer_reg.flatten()))\n\n\n total_sigma_hi = np.copy(sigma_hi)\n total_sigma_hi[np.isnan(total_sigma_hi)] = 0.0\n total_sigma_co = np.copy(sigma_co)\n total_sigma_co[np.isnan(total_sigma_co)] = 0.0\n total_gas = total_sigma_hi + total_sigma_co\n\n\n if args.plot:\n #plot_data(sigma_sfr[:,sel], sigma_sfr_time[sel],\n # sigma_hi.flatten()[sel], sigma_co.flatten()[sel],\n # time=t_time, save=kwargs['save'])\n plot_data(sigma_sfr, sigma_sfr_time,\n total_sigma_hi, total_sigma_co,\n time=t_time, save=kwargs['save'])\n\n\n return sigma_sfr[:,sel], sigma_sfr_time[sel], sigma_hi.flatten()[sel], sigma_co.flatten()[sel]", "title": "" }, { "docid": "a4a758c0b591f1c66af941f076753842", "score": "0.48221475", "text": "def test_get_list_from_platemap(self, ef1: EnspireFile, ef2: EnspireFile) -> None:\n assert ef1._wells_platemap[2] == \"A03\"\n assert ef2._wells_platemap[1] == \"F02\"", "title": "" }, { "docid": "27355e38605b5cf150231a6e6eb40ea6", "score": "0.48050162", "text": "def write_maps(self):\n if np.allclose(self.xmap.origin, 0):\n ext = 'ccp4'\n else:\n ext = 'mrc'\n # Create maps\n # for q, coor in zip(self._occupancies, self._coor_set):\n # self.conformer.q = q\n # self.conformer.coor = coor\n # self._transformer.mask(self._rmask)\n # fname = os.path.join(self.options.directory, f'mask.{ext}')\n # self._transformer.xmap.tofile(fname)\n # mask = self._transformer.xmap.array > 0\n # self._transformer.reset(full=True)\n\n for q, coor, b in zip(self._occupancies, self._coor_set, self._bs):\n self.conformer.q = q\n self.conformer.coor = coor\n self.conformer.b = b\n self._transformer.density()\n fname = os.path.join(self.options.directory, f'model.{ext}')\n self._transformer.xmap.tofile(fname)\n self._transformer.xmap.array -= self.xmap.array\n fname = os.path.join(self.options.directory, f'diff.{ext}')\n self._transformer.xmap.tofile(fname)\n self._transformer.reset(full=True)\n # self._transformer.xmap.array *= -1\n # fname = os.path.join(self.options.directory, f'diff_negative.{ext}')\n # self._transformer.xmap.tofile(fname)\n\n # self._transformer.reset(full=True)\n # self._transformer.xmap.array[mask] = values\n # fname = os.path.join(self.options.directory, f'model_masked.{ext}')\n # self._transformer.xmap.tofile(fname)\n # values = self.xmap.array[mask]\n # self._transformer.xmap.array[mask] -= values\n # fname = os.path.join(self.options.directory, f'diff_masked.{ext}')\n # self._transformer.xmap.tofile(fname)", "title": "" }, { "docid": "d8272677c5e62acfb133faba1420ad5d", "score": "0.48046404", "text": "def _to_latlon(easting, northing, zone_number, northern):\n\n longitude = []\n latitude = []\n\n for i in range(easting.shape[0]):\n\n x = easting[i] - 500000\n y = northing[i]\n\n if not northern:\n y -= 10000000\n\n m = y / K0\n mu = m / (R * M1)\n\n p_rad = (mu +\n P2 * math.sin(2 * mu) +\n P3 * math.sin(4 * mu) +\n P4 * math.sin(6 * mu) +\n P5 * math.sin(8 * mu))\n\n p_sin = math.sin(p_rad)\n p_sin2 = p_sin * p_sin\n\n p_cos = math.cos(p_rad)\n\n p_tan = p_sin / p_cos\n p_tan2 = p_tan * p_tan\n p_tan4 = p_tan2 * p_tan2\n\n ep_sin = 1 - E * p_sin2\n ep_sin_sqrt = math.sqrt(1 - E * p_sin2)\n\n n = R / ep_sin_sqrt\n r = (1 - E) / ep_sin\n\n c = _E * p_cos**2\n c2 = c * c\n\n d = x / (n * K0)\n d2 = d * d\n d3 = d2 * d\n d4 = d3 * d\n d5 = d4 * d\n d6 = d5 * d\n\n latitude.append(math.degrees(p_rad - (p_tan / r) *\n (d2 / 2 -\n d4 / 24 * (5 + 3 * p_tan2 + 10 * c - 4 * c2 - 9 * E_P2)) +\n d6 / 720 * (61 + 90 * p_tan2 + 298 * c + 45 * p_tan4 - 252 * E_P2 - 3 * c2)))\n\n longitude.append(math.degrees(d -\n d3 / 6 * (1 + 2 * p_tan2 + c) +\n d5 / 120 * (5 - 2 * c + 28 * p_tan2 - 3 * c2 + 8 * E_P2 + 24 * p_tan4) / p_cos ) + (zone_number - 1) * 6 - 180 + 3)\n\n return (latitude,longitude)", "title": "" }, { "docid": "d49781bb395006310bba733659044354", "score": "0.48003393", "text": "def compute_ortholog_types(data, genome_offs):\n typEnum = tablefmt.PairwiseRelationTable.columns.get('RelType').enum\n query_type = {val: 'm' if cnt > 1 else '1'\n for val, cnt in zip(*numpy.unique(data['EntryNr2'],\n return_counts=True))}\n\n def genome_idx(enr):\n return numpy.searchsorted(genome_offs, enr - 1, side='right')\n\n g0 = genome_idx(data[0]['EntryNr2'])\n it = numpy.nditer(data, flags=['c_index'], op_flags=['readwrite'])\n while not it.finished:\n row0 = it[0]\n i1 = it.index + 1\n # we move i1 forward to the row where the next genome starts, i.e. the\n # current query changes the species or the query itself changes\n while i1 < len(data):\n row1 = data[i1]\n g1 = genome_idx(row1['EntryNr2'])\n if g1 != g0 or row0['EntryNr1'] != row1['EntryNr1']:\n break\n i1 += 1\n subj_type = 'n' if i1 - it.index > 1 else '1'\n while not it.finished and it.index < i1:\n typ = '{}:{}'.format(query_type[int(it[0]['EntryNr2'])], subj_type)\n it[0]['RelType'] = typEnum[typ]\n it.iternext()\n g0 = g1", "title": "" }, { "docid": "823e2ee5ac8175dd199862e765b07793", "score": "0.47971913", "text": "def init_houses_coordinate(self):\n cell1 = {1: [630, 695], 2: [662, 695], 3: [630, 720], 4: [662, 720]}\n cell3 = {1: [504, 695], 2: [536, 695], 3: [504, 720], 4: [536, 720]}\n cell6 = {1: [315, 695], 2: [347, 695], 3: [315, 720], 4: [347, 720]}\n cell8 = {1: [190, 695], 2: [220, 695], 3: [190, 720], 4: [220, 720]}\n cell9 = {1: [130, 695], 2: [160, 695], 3: [130, 720], 4: [160, 720]}\n cell11 = {1: [85, 632], 2: [106, 632], 3: [85, 663], 4: [106, 663]}\n cell13 = {1: [85, 506], 2: [106, 506], 3: [85, 540], 4: [106, 540]}\n cell14 = {1: [85, 446], 2: [106, 446], 3: [85, 476], 4: [106, 476]}\n cell16 = {1: [85, 322], 2: [106, 322], 3: [85, 350], 4: [106, 350]}\n cell18 = {1: [85, 198], 2: [106, 198], 3: [85, 226], 4: [106, 226]}\n cell19 = {1: [85, 136], 2: [106, 136], 3: [85, 164], 4: [106, 164]}\n cell21 = {1: [127, 86], 2: [159, 86], 3: [127, 106], 4: [159, 106]}\n cell23 = {1: [254, 86], 2: [282, 86], 3: [254, 106], 4: [282, 106]}\n cell24 = {1: [316, 86], 2: [347, 86], 3: [316, 106], 4: [347, 106]}\n cell26 = {1: [443, 86], 2: [473, 86], 3: [443, 106], 4: [473, 106]}\n cell27 = {1: [506, 86], 2: [536, 86], 3: [506, 106], 4: [536, 106]}\n cell29 = {1: [631, 86], 2: [661, 86], 3: [631, 106], 4: [661, 106]}\n cell31 = {1: [693, 132], 2: [715, 132], 3: [693, 160], 4: [715, 160]}\n cell32 = {1: [693, 193], 2: [715, 193], 3: [693, 222], 4: [715, 222]}\n cell34 = {1: [693, 318], 2: [715, 318], 3: [693, 350], 4: [715, 350]}\n cell37 = {1: [693, 506], 2: [715, 506], 3: [693, 538], 4: [715, 538]}\n cell39 = {1: [693, 633], 2: [715, 633], 3: [693, 663], 4: [715, 663]}\n self.house_coordinate = {1: cell1, 3: cell3, 6: cell6, 8: cell8, 9: cell9,\n 11: cell11, 13: cell13, 14: cell14, 16: cell16, 18: cell18, 19: cell19,\n 21: cell21, 23: cell23, 24: cell24, 26: cell26, 27: cell27, 29: cell29,\n 31: cell31, 32: cell32, 34: cell34, 37: cell37, 39: cell39}", "title": "" }, { "docid": "d4990d830ef682328f77ec73747db181", "score": "0.4795703", "text": "def ObsOperator_SOSIE_SWOT(state_vectors_names,observation_name,tmp_DA_path,sosie_path,name_sosie_output,name_sosie_map,n_ens=1): \n \n \n state_projection_names=[]\n for i_ens in range(n_ens):\n \n # Erasing previous state_projection\n name_output=\"state_projections_\"+str(i_ens).zfill(2)+\".nc\"\n cmd1=\"rm \"+tmp_DA_path+name_output \n os.system(cmd1)\n \n # Seperating state_vectors by ensemble member\n stringiens=str(i_ens).zfill(2)\n state_vector_iens_name=state_vectors_names[:-4]+\"_\"+stringiens+\".nc\" \n \n cmd2=\"ncks -d member,\"+stringiens+\",\"+stringiens+\" \"+state_vectors_names+\" \"+state_vector_iens_name \n os.system(cmd2) \n \n file=state_vector_iens_name \n if os.path.isfile(file)==False:\n print('Error: No state_vector'+str(i_ens).zfill(2)+' file in '+tmp_DA_path) \n \n #\n # SOSIE reads source.nc and target.nc and writes output in TMP_DA/ directory (details in namelist)\n #\n \n # Copying state_vector_iens and observation as source and target for SOSIE\n cmd3=\"cp \"+state_vector_iens_name+\" \"+tmp_DA_path+\"source.nc\" \n os.system(cmd3) \n cmd4=\"cp \"+observation_name+\" \"+tmp_DA_path+\"target.nc\" \n os.system(cmd4) \n \n # Running SOSIE \n cmd5=sosie_path+\"sosie.x -f \"+sosie_path+\"namelist1\"\n os.system(cmd5) \n \n # Renaming SOSIE output as the i_ens state_projection \n if os.path.isfile(tmp_DA_path+name_sosie_output)==False:\n print('Error: SOSIE failed to work')\n cmd6=\"mv \"+tmp_DA_path+name_sosie_output+\" \"+tmp_DA_path+name_output \n os.system(cmd6) \n \n state_projection_names=np.append(state_projection_names,tmp_DA_path+name_output) \n \n # Erasing SOSIE map \n cmd7=\"rm \"+tmp_DA_path+name_sosie_map \n os.system(cmd7)\n \n return state_projection_names", "title": "" }, { "docid": "e6dcc6f6519a46dc7d9530d38dab4863", "score": "0.4795507", "text": "def no_allosteric(self):\n df = self.ensemble_data\n\n # Make a separate directory for the output:\n new_dir = join(self.root_dir, \"no_allosteric\")\n if not exists(new_dir):\n mkdir(new_dir)\n prot_paths = self.get_protein_paths(mode=\"complex\")\n df['ID'] = [dirname(p) for p in prot_paths]\n\n\n hot_paths = {}\n for path in df[\"ID\"]:\n tar_path = join(path, \"fullsize_hotspots_100000\", \"binding_site_maps\", \"out\")\n if exists(tar_path):\n hot_paths[path] = tar_path\n\n no_allo_hot_paths = {}\n for h in hot_paths.keys():\n row = df.loc[df['ID']==h].squeeze()\n print(row)\n if row['allosteric_PDB'] == \"-\":\n no_allo_hot_paths[h] = hot_paths[h]\n\n from GridEnsemble import GridEnsemble\n\n probes = [\"donor\", \"acceptor\", \"apolar\"]\n for probe in probes:\n probe_paths = [join(path, \"{}.ccp4\".format(probe)) for path in no_allo_hot_paths.values()]\n print(probe_paths)\n ge = GridEnsemble(probe_paths)\n ge.get_ensemble_array()\n save_path = join(new_dir, \"{}_{}.p\".format(self.ensemble_name, probe))\n ge.save_gridensemble(save_path)\n\n s_paths = [join(p, \"complex.mol2\") for p in no_allo_hot_paths.keys()]\n self.save_pdbs(s_paths)\n\n return", "title": "" }, { "docid": "1b6c8c386d0358e38f87cba78550aa2b", "score": "0.4793837", "text": "def test_map_flipped_reaction(self):\n c2h5o3_xyz = {'coords': ((-1.3476727508427788, -0.49923624257482285, -0.3366372557370102),\n (-0.11626816111736853, 0.3110915299407186, 0.018860985632263887),\n (0.7531175607750088, 0.3366822240291409, -1.1050387236863213),\n (0.5228736844989644, -0.3049881931104616, 1.1366016759286774),\n (1.8270658637404131, 0.34102014147584997, 1.2684162942337813),\n (-2.039181700362481, -0.5535509846570477, 0.5100031541057821),\n (-1.865025875161301, -0.06806929272376178, -1.1994046923960628),\n (-1.0711960095793496, -1.5264629385419055, -0.6002175107608478),\n (-0.40133538695862053, 1.3357900487643664, 0.28224155088545305),\n (1.3942569570346546, 1.035594500292526, -0.8890721851777293)),\n 'isotopes': (12, 12, 16, 16, 16, 1, 1, 1, 1, 1),\n 'symbols': ('C', 'C', 'O', 'O', 'O', 'H', 'H', 'H', 'H', 'H')}\n c2h4o_xyz = {'coords': ((-0.6485165220711699, -0.036287809639473964, -0.040072327958319325),\n (0.8441328059817381, 0.04088405476411104, 0.05352861712992162),\n (1.4799812732494606, 1.0748679945888888, -0.1224478071645769),\n (-1.0603388058764294, 0.9464876376852732, -0.28238370478893315),\n (-0.9213427138232859, -0.7478396768473443, -0.8228167900899559),\n (-1.0499663443190728, -0.37234114306362315, 0.9187474043028493),\n (1.3560503068587568, -0.9057710574878411, 0.29544460856901716)),\n 'isotopes': (12, 12, 16, 1, 1, 1, 1),\n 'symbols': ('C', 'C', 'O', 'H', 'H', 'H', 'H')}\n r_1 = ARCSpecies(label='C2H5O3', smiles='CC(O)O[O]', xyz=c2h5o3_xyz)\n p_1 = ARCSpecies(label='C2H4O', smiles='CC=O', xyz=c2h4o_xyz)\n p_2 = ARCSpecies(label='HO2', smiles='O[O]', xyz=self.ho2_xyz)\n # Reverse HO2 elimination:\n rxn_5 = ARCReaction(r_species=[p_1, p_2], p_species=[r_1])\n self.assertEqual(rxn_5.atom_map[:3], [0, 1, 2])\n self.assertIn(tuple(rxn_5.atom_map[3:6]), list(permutations([5, 6, 7])))\n self.assertEqual(rxn_5.atom_map[6], 8)\n self.assertIn(tuple(rxn_5.atom_map[7:9]), list(permutations([3, 4])))\n self.assertEqual(rxn_5.atom_map[9], 9)\n \n # Reverse HO2 elimination, reversed reactant order:\n rxn_6 = ARCReaction(r_species=[p_2, p_1], p_species=[r_1])\n self.assertIn(rxn_6.atom_map[:6], [[4, 3, 9, 0, 1, 2], [3, 4, 9, 0, 1, 2]])\n self.assertIn(tuple(rxn_6.atom_map[6:9]), list(permutations([5, 6, 7])))\n self.assertEqual(rxn_6.atom_map[9], 8)", "title": "" }, { "docid": "df5b4803b4b5ee635913f6c810bef7ff", "score": "0.4793405", "text": "def fetch_mappings():\n output = {'województwa': {}, 'okręgi': {}}\n\n wojewodztwo_prefix = get_nr_okr(FIRST_ROW)\n\n woj = 2\n\n for idx in range(FIRST_ROW, LAST_ROW):\n new_nr = get_nr_okr(idx)\n if new_nr == wojewodztwo_prefix:\n last_wojewodztwo = get_siedziba(idx)\n output['województwa']['%02d' % woj] = nameof_wojewodztwo(last_wojewodztwo)\n woj += 2\n else:\n output['okręgi'][new_nr] = nameof_okreg(idx)\n\n return output", "title": "" }, { "docid": "de96430ee35dd13db8af78124d846513", "score": "0.47933438", "text": "def get_sp_info():\n\n # Read csv file with information on species, return as dictionary\n splist = pd.read_csv(os.path.join(r'd:\\NW_src_data', 'soorten_lijst.txt'), sep=';', comment='#')\n\n # convert species names to lowercase just to be sure\n splist['sp_nm'] = splist['sp_nm'].str.lower()\n\n out = {}\n\n # all species names\n out['all'] = {'sp_nm': splist['sp_nm'].unique().tolist(),\n 'sp_nr': splist['sp_nr'].unique().tolist()}\n\n # Nulsoorten\n out['nulsoort'] = {True: {'sp_nm': splist.loc[splist['nulsoort'] == 1, 'sp_nm'].tolist(),\n 'sp_nr': splist.loc[splist['nulsoort'] == 1, 'sp_nr'].tolist()},\n False: {'sp_nm': splist.loc[splist['nulsoort'] != 1, 'sp_nm'].tolist(),\n 'sp_nr': splist.loc[splist['nulsoort'] != 1, 'sp_nr'].tolist()}}\n\n # species names and numbers for each taxonomic group\n tax_groups = splist['tax_groep'].unique().tolist()\n out['taxgroep'] = {}\n for group in tax_groups:\n out[group] = {'sp_nm': splist.loc[splist['tax_groep'] == group, 'sp_nm'].tolist(),\n 'sp_nr': splist.loc[splist['tax_groep'] == group, 'sp_nr'].tolist()}\n\n # taxgroepen\n out['taxgroep'][group] = group\n out['taxgroep']['all'] = tax_groups\n\n # species names and numbers for each subhabitat\n habtypes = [x for x in list(splist) if 'SNL_' in x]\n\n out['habitattypes'] = {}\n for hab in habtypes:\n out[hab.lower()] = {'sp_nm': splist.loc[splist[hab] == 1, 'sp_nm'].tolist(),\n 'sp_nr': splist.loc[splist[hab] == 1, 'sp_nr'].tolist()}\n out['habitattypes'][hab] = hab\n out['habitattypes']['all'] = habtypes\n\n # species names and numbers for NW, LPI, SNL\n # TODO 12-02-2019: dit net zo doen als voor Nulsoorten\n for ding in ['NW', 'LPI', 'SNL']:\n out[ding] = {'sp_nm': splist.loc[splist[ding] == 1, 'sp_nm'].tolist(),\n 'sp_nr': splist.loc[splist[ding] == 1, 'sp_nr'].tolist()}\n\n # all specs per species name and species nummer\n row_iterator = splist.iterrows()\n for i, row in row_iterator:\n # sp nr:sp naam, tax_groep, NW, LPI, SNL membership\n out[splist.loc[i, 'sp_nr']] = {'sp_nm': splist.loc[i, 'sp_nm'],\n 'tax_groep': splist.loc[i, 'tax_groep'],\n 'NW': iftrue(splist.loc[i, 'NW']),\n 'LPI': iftrue(splist.loc[i, 'LPI']),\n 'SNL': iftrue(splist.loc[i, 'SNL'])}\n\n # sp naam:sp nr, tax_groep, NW, LPI, SNL membership\n out[splist.loc[i, 'sp_nm']] = {'sp_nr': splist.loc[i, 'sp_nr'],\n 'tax_groep': splist.loc[i, 'tax_groep'],\n 'NW': iftrue(splist.loc[i, 'NW']),\n 'LPI': iftrue(splist.loc[i, 'LPI']),\n 'SNL': iftrue(splist.loc[i, 'SNL'])}\n\n return out", "title": "" }, { "docid": "b23e60594cae9b2428a3ef04bf8e7271", "score": "0.47897243", "text": "def test_treat_imaginary_modes(ph_srtio3: Phonopy):\n ph = ph_srtio3\n rd = RandomDisplacements(ph.supercell, ph.primitive, ph.force_constants)\n # for freqs in (rd.frequencies[0], rd.frequencies[-1]):\n # print(\", \".join([f\"{v:10.7f}\" for v in freqs]))\n ref0 = [\n -2.3769150,\n -2.3769150,\n -2.3769150,\n -0.0000003,\n -0.0000003,\n -0.0000001,\n 4.6902115,\n 4.6902115,\n 4.6902115,\n 6.7590219,\n 6.7590219,\n 6.7590219,\n 16.0075351,\n 16.0075351,\n 16.0075351,\n ]\n ref13 = [\n 3.2707508,\n 3.3132392,\n 3.4395550,\n 3.4395550,\n 3.6676862,\n 3.6676862,\n 10.7490284,\n 10.7970960,\n 10.7970960,\n 12.0900533,\n 12.0900533,\n 13.8508135,\n 15.0638793,\n 15.0638793,\n 24.6446671,\n ]\n np.testing.assert_allclose(ref0, rd.frequencies[0], atol=1e-5)\n np.testing.assert_allclose(ref13, rd.frequencies[-1], atol=1e-5)\n\n rd.treat_imaginary_modes()\n # for freqs in (rd.frequencies[0], rd.frequencies[-1]):\n # print(\", \".join([f\"{v:10.7f}\" for v in freqs]))\n ref0 = [\n 2.3769150,\n 2.3769150,\n 2.3769150,\n 0.0000003,\n 0.0000003,\n 0.0000001,\n 4.6902115,\n 4.6902115,\n 4.6902115,\n 6.7590219,\n 6.7590219,\n 6.7590219,\n 16.0075351,\n 16.0075351,\n 16.0075351,\n ]\n ref13 = [\n 3.2707508,\n 3.3132392,\n 3.4395550,\n 3.4395550,\n 3.6676862,\n 3.6676862,\n 10.7490284,\n 10.7970960,\n 10.7970960,\n 12.0900533,\n 12.0900533,\n 13.8508135,\n 15.0638793,\n 15.0638793,\n 24.6446671,\n ]\n np.testing.assert_allclose(ref0, rd.frequencies[0], atol=1e-5)\n np.testing.assert_allclose(ref13, rd.frequencies[-1], atol=1e-5)\n\n # Test frequency shifts\n rd.treat_imaginary_modes(freq_to=3)\n # for freqs in (rd.frequencies[0], rd.frequencies[-1]):\n # print(\", \".join([f\"{v:10.7f}\" for v in freqs]))\n ref0 = [\n 3.3769150,\n 3.3769150,\n 3.3769150,\n 0.0000003,\n 0.0000003,\n 0.0000001,\n 4.6902115,\n 4.6902115,\n 4.6902115,\n 6.7590219,\n 6.7590219,\n 6.7590219,\n 16.0075351,\n 16.0075351,\n 16.0075351,\n ]\n ref13 = [\n 3.2707508,\n 3.3132392,\n 3.4395550,\n 3.4395550,\n 3.6676862,\n 3.6676862,\n 10.7490284,\n 10.7970960,\n 10.7970960,\n 12.0900533,\n 12.0900533,\n 13.8508135,\n 15.0638793,\n 15.0638793,\n 24.6446671,\n ]\n np.testing.assert_allclose(ref0, rd.frequencies[0], atol=1e-5)\n np.testing.assert_allclose(ref13, rd.frequencies[-1], atol=1e-5)", "title": "" }, { "docid": "0617228ce2904db9242b3d445213df7d", "score": "0.47895285", "text": "def geo_position_rand(splice_junctions):\n u5='UCAUUUUCCGC'\n geo_position={}\n pair=''\n \n Watson_Crick=[0,0,0,0,0,0,0,0,0,0,0]\n isosteric=[0,0,0,0,0,0,0,0,0,0,0]\n different=[0,0,0,0,0,0,0,0,0,0,0]\n\n for j in splice_junctions:\n n=0\n while n<11:\n \n pair=str(j[n])+str(u5[n])\n\n if pair=='GC' or pair=='CG' or pair=='AU' or pair=='TA':\n Watson_Crick[n]+=1\n elif pair=='AA' or pair=='GA' or pair=='GG' or pair=='AG' or pair=='CC':\n different[n]+=1\n else:\n isosteric[n]+=1\n n+=1\n if n==11:\n break\n \n geo_position['Watson_Crick']=Watson_Crick\n geo_position['isosteric']=isosteric\n geo_position['different']=different\n\n #print(geo_position)\n return(geo_position)", "title": "" }, { "docid": "5cf73a7ca01a380f423a96b485cfd580", "score": "0.4786056", "text": "def main():\n\n args = get_args()\n \n fasta_seqs = list(SeqIO.parse(args.card_db, \"fasta\"))\n\n family_allele_dict = {}\n shortname_family_dict = {}\n family_count_dict = {}\n\n with open(args.aro_index, \"r\") as infile1:\n for line in infile1:\n if not line.startswith(\"ARO Accession\"):\n line_elements = line.strip().split(\"\\t\")\n gene_family = \"_\".join(line_elements[8].split(\" \"))\n gene_shortname = line_elements[11]\n \n if gene_shortname not in shortname_family_dict:\n shortname_family_dict[gene_shortname] = gene_family\n \n elif gene_shortname in shortname_family_dict:\n print(\"Error: duplicate gene shortname.\")\n break\n \n if gene_family not in family_allele_dict:\n family_allele_dict[gene_family] = 0\n elif gene_family in family_allele_dict:\n continue\n \n fam_count = 0\n for family in family_allele_dict:\n fam_count +=1\n family_count_dict[family] = fam_count\n \n for seq in fasta_seqs:\n seq_header = seq.id.split(\"|\")\n seq_shortname = seq_header[5].split(\" \")[0]\n seq_family = shortname_family_dict[seq_shortname]\n seq_family_count = family_count_dict[seq_family]\n family_allele_dict[seq_family] += 1\n\n tr_table = str.maketrans(dict.fromkeys('!@#$%^&*();:,<>/?_=`~', '-'))\n tr_seq_family = seq_family.translate(tr_table)\n tr_seq_shortname = seq_shortname.translate(tr_table)\n\n new_seq_id = f\"{seq_family_count}__{tr_seq_family}__{tr_seq_shortname}__{family_allele_dict[seq_family]}\"\n seq.id = new_seq_id\n seq.description = seq.description.translate(tr_table)\n \n SeqIO.write(fasta_seqs, args.output_fasta, \"fasta\")\n\n\n\n\n\n # print(len(gene_shortname_list), len(set(gene_shortname_list)))\n # import collections\n # print([item for item, count in collections.Counter(gene_shortname_list).items() if count > 1])\n\n\n # for seq in fasta_seqs:\n # seq_header = seq.id.split(\"|\")\n # seq_family = seq_header[5].split(\" \")[0]\n # if \"-\" in seq_family:\n # seq_fam_elements = seq_family.split(\"-\")\n # # print(\"-\".join(seq_fam_elements[0:-1]), seq_fam_elements[-1])\n # seq_family = \"-\".join(seq_fam_elements[0:-1])\n # seq_allele = seq_fam_elements[-1]\n\n # if seq_family not in family_dict:\n # family_dict[seq_family] = [seq]\n # elif seq_family in family_dict:\n # family_dict[seq_family].append(seq)\n\n # elif \"-\" not in seq_family:\n # if seq_family not in family_dict:\n # family_dict[seq_family] = [seq]\n # elif seq_family in family_dict:\n # family_dict[seq_family].append(seq)", "title": "" }, { "docid": "8a8c34e93640ec0bde93499d9cf8ae49", "score": "0.47844437", "text": "def map_hsps(self, hsps):\r\n offset = min([int(x.id[1]) for x in self.residues])\r\n q = hsps.query\r\n sbjct = hsps.sbjct\r\n sbjct_counter = hsps.sbjct_start\t\r\n q_counter = hsps.query_start\r\n for s, q in zip(sbjct, q):\r\n if s == q:\r\n self.mapping[sbjct_counter] = offset - 1 + q_counter\r\n sbjct_counter += 1\r\n q_counter += 1\r\n elif s != '-' and q != '-':\r\n self.mapping[sbjct_counter] = offset - 1 + q_counter\r\n sbjct_counter += 1\r\n q_counter += 1\r\n elif s != '-' and q == '-':\r\n sbjct_counter += 1\r\n else:\r\n sbjct_counter += 1\r\n q_counter += 1", "title": "" }, { "docid": "3c7d992e7a2ee40a20c05a53947dcce2", "score": "0.47842917", "text": "def find_matches(self):\n os.chdir(self.hiresdir)\n hc = sextractor(self.hires_cat)\n self.objid = np.zeros(len(self.ra), 'int')\n self.match_dist = np.zeros(len(self.ra)) # in arcsec\n self.x_hires = np.zeros(len(self.ra))\n self.y_hires = np.zeros(len(self.ra))\n self.x_lores = np.zeros(len(self.ra))\n self.y_lores = np.zeros(len(self.ra))\n self.xmin_hr = np.zeros(len(self.ra))\n self.xmax_hr = np.zeros(len(self.ra))\n self.ymin_hr = np.zeros(len(self.ra))\n self.ymax_hr = np.zeros(len(self.ra))\n self.xmin_bkgd = np.zeros(len(self.ra))\n self.xmax_bkgd = np.zeros(len(self.ra))\n self.ymin_bkgd = np.zeros(len(self.ra))\n self.ymax_bkgd = np.zeros(len(self.ra))\n for i in range(len(self.ra)):\n angdist = angsep.angsep(self.ra[i], self.dec[i], hc.alpha_j2000, hc.delta_j2000)\n index_min = np.argsort(angdist)[0]\n self.objid[i] = hc.number[index_min]\n self.match_dist[i] = np.min(angdist) * 3600.\n self.x_hires[i] = hc.x_image[index_min]\n self.y_hires[i] = hc.y_image[index_min]\n # Now determine the pixel coordinates in the low-res image\n # The pixel coordinates here are 1-based\n lores_xy = self.lr_wcs.wcs_sky2pix([[self.ra[i], self.dec[i]]], 1)[0]\n self.x_lores[i] = lores_xy[0]\n self.y_lores[i] = lores_xy[1]", "title": "" }, { "docid": "c1464321f09fe7a63807878177dd3e80", "score": "0.4782266", "text": "def compute_entropies(self):\n # Genotypic entropy for each set of loci\n entropy_gt = {}\n for k in [\"s\",\"r\",\"n\",\"a\"]: # Survival, reproductive, neutral, all\n d = self[\"density\"][k].T\n entropy_gt[k] = np.apply_along_axis(st.entropy, 0, d)\n self[\"entropy_gt\"] = entropy_gt\n # Bit entropy\n n1_total = np.mean(self[\"n1\"], 1)\n bit_distr = np.vstack((n1_total,1-n1_total))\n entropy_bits = np.apply_along_axis(st.entropy, 0, bit_distr)\n self[\"entropy_bits\"] = entropy_bits", "title": "" }, { "docid": "9ebb6b4d8ca4ff2062c0d4021ee1fe63", "score": "0.47809425", "text": "def testORCA_ORCA4_2_947_out(logfile):\r\n assert len(logfile.data.atomcoords) == 7\r\n assert len(logfile.data.grads) == 6", "title": "" }, { "docid": "6c3482865fc613474c923bddb8850a33", "score": "0.4773786", "text": "def _apply_map(self, mapping):\n # Allocate arrays\n _beads = []\n atomnames = []\n atomids = []\n resids = []\n resnames = []\n segids = []\n charges = []\n masses = []\n\n residues = self.atu.atoms.split(\"residue\")\n select_residues = enumerate(\n itertools.product(residues, viewitems(mapping)))\n for i, (res, (name, selection)) in select_residues:\n bead = res.select_atoms(selection)\n if bead:\n _beads.append(bead)\n atomnames.append(name)\n atomids.append(i)\n resids.append(bead.resids[0])\n resnames.append(bead.resnames[0])\n segids.append(bead.segids[0].split(\"_\")[-1])\n try:\n charges.append(bead.total_charge())\n except AttributeError:\n charges.append(0.)\n masses.append(bead.total_mass())\n\n _beads = np.array(_beads)\n n_atoms = len(_beads)\n\n # Atom\n # _beads = topattrs._Beads(_beads)\n vdwradii = np.zeros_like(atomids)\n vdwradii = topologyattrs.Radii(vdwradii)\n atomids = topologyattrs.Atomids(np.asarray(atomids))\n atomnames = topologyattrs.Atomnames(\n np.asarray(atomnames, dtype=np.object))\n atomtypes = topologyattrs.Atomtypes(\n np.asarray(np.arange(n_atoms) + 100))\n charges = topologyattrs.Charges(np.asarray(charges))\n masses = topologyattrs.Masses(np.asarray(masses))\n\n # Residue\n # resids, resnames\n segids = np.asarray(segids, dtype=np.object)\n resids = np.asarray(resids, dtype=np.int32)\n resnames = np.asarray(resnames, dtype=np.object)\n residx, (new_resids, new_resnames,\n perres_segids) = topbase.change_squash(\n (resids, resnames, segids), (resids, resnames, segids))\n\n # transform from atom:Rid to atom:Rix\n residueids = topologyattrs.Resids(new_resids)\n residuenums = topologyattrs.Resnums(new_resids.copy())\n residuenames = topologyattrs.Resnames(new_resnames)\n\n # Segment\n segidx, perseg_segids = topbase.squash_by(perres_segids)[:2]\n segids = topologyattrs.Segids(perseg_segids)\n\n # Setup topology\n top = topology.Topology(\n len(atomids),\n len(new_resids),\n len(segids),\n attrs=[\n atomids, atomnames, atomtypes, charges, masses, vdwradii,\n residueids, residuenums, residuenames, segids\n ],\n atom_resindex=residx,\n residue_segindex=segidx)\n return top", "title": "" }, { "docid": "2b8a807315c5fffae8c45b68cb0e9f7f", "score": "0.4772674", "text": "def test_dispersion_map(self):\n # light profile\n light_profile_list = ['HERNQUIST']\n r_eff = 1.5\n kwargs_light = [{'Rs': r_eff, 'amp': 1.}] # effective half light radius (2d projected) in arcsec\n # 0.551 *\n # mass profile\n mass_profile_list = ['SPP']\n theta_E = 1.2\n gamma = 2.\n kwargs_mass = [{'theta_E': theta_E, 'gamma': gamma}] # Einstein radius (arcsec) and power-law slope\n\n # anisotropy profile\n anisotropy_type = 'OM'\n r_ani = 2.\n kwargs_anisotropy = {'r_ani': r_ani} # anisotropy radius [arcsec]\n\n # aperture as shell\n #aperture_type = 'shell'\n #kwargs_aperture_inner = {'r_in': 0., 'r_out': 0.2, 'center_dec': 0, 'center_ra': 0}\n\n #kwargs_aperture_outer = {'r_in': 0., 'r_out': 1.5, 'center_dec': 0, 'center_ra': 0}\n\n # aperture as slit\n r_bins = np.linspace(0, 2, 3)\n kwargs_ifu = {'r_bins': r_bins, 'center_ra': 0, 'center_dec': 0, 'aperture_type': 'IFU_shells'}\n kwargs_aperture = {'aperture_type': 'shell', 'r_in': r_bins[0], 'r_out': r_bins[1], 'center_ra': 0,\n 'center_dec': 0}\n\n psf_fwhm = 1. # Gaussian FWHM psf\n kwargs_cosmo = {'d_d': 1000, 'd_s': 1500, 'd_ds': 800}\n kwargs_numerics = {'interpol_grid_num': 500, 'log_integration': True,\n 'max_integrate': 100}\n kwargs_model = {'mass_profile_list': mass_profile_list,\n 'light_profile_list': light_profile_list,\n 'anisotropy_model': anisotropy_type}\n kwargs_psf = {'psf_type': 'GAUSSIAN', 'fwhm': psf_fwhm}\n\n galkinIFU = Galkin(kwargs_aperture=kwargs_ifu, kwargs_psf=kwargs_psf, kwargs_cosmo=kwargs_cosmo,\n kwargs_model=kwargs_model, kwargs_numerics=kwargs_numerics, analytic_kinematics=True)\n sigma_v_ifu = galkinIFU.dispersion_map(kwargs_mass={'theta_E': theta_E, 'gamma': gamma}, kwargs_light={'r_eff': r_eff},\n kwargs_anisotropy=kwargs_anisotropy, num_kin_sampling=1000)\n galkin = Galkin(kwargs_model, kwargs_aperture, kwargs_psf, kwargs_cosmo, kwargs_numerics,\n analytic_kinematics=True)\n sigma_v = galkin.dispersion(kwargs_mass={'theta_E': theta_E, 'gamma': gamma}, kwargs_light={'r_eff': r_eff},\n kwargs_anisotropy=kwargs_anisotropy, sampling_number=1000)\n npt.assert_almost_equal(sigma_v, sigma_v_ifu[0], decimal=-1)", "title": "" }, { "docid": "0d6725ed25a77b26a63eac74dc1c0555", "score": "0.47633153", "text": "def display_mapping_evolvability(self, reso=100):\n xx, yy = np.meshgrid(np.linspace(-5, 5, reso), np.linspace(-5, 5, reso))\n genomes = np.hstack((xx.reshape(-1, 1), yy.reshape(-1, 1)))\n coverages, uniformities, _, _ = self.dummy_experiment.evaluate_coverage_and_uniformity(genomes)\n\n plt.figure(figsize=(13, 6))\n plt.subplot(121)\n plt.contourf(genomes[:, 0].reshape(reso, reso),\n genomes[:, 1].reshape(reso, reso),\n coverages.reshape(reso, reso),\n reso, cmap=\"rainbow\")\n plt.title(\"coverage\")\n plt.xlabel(\"gene 1\")\n plt.ylabel(\"gene 2\")\n plt.axis(\"equal\")\n\n plt.subplot(122)\n plt.contourf(genomes[:, 0].reshape(reso, reso),\n genomes[:, 1].reshape(reso, reso),\n uniformities.reshape(reso, reso),\n reso, cmap=\"rainbow\")\n plt.title(\"uniformity\")\n plt.xlabel(\"gene 1\")\n plt.ylabel(\"gene 2\")\n plt.axis(\"equal\")\n plt.show()\n\n if self.save_figures:\n plt.savefig(\"figures/{}/evolvability mapping.svg\".format(self.experiment_name))\n plt.savefig(\"figures/{}/evolvability mapping.png\".format(self.experiment_name))", "title": "" }, { "docid": "71a7d31f9379a2429410bc3f821f7435", "score": "0.47596875", "text": "def find_all_ORFs_both_strands(dna):\n half_codons = [] #sets empty list for strand 1\n half_codons2 = [] #sets empty list for strand 2 (reverse of strand 1)\n all_the_codons = [] #sets empty list for strand 1 + strand 2\n half_codons += find_all_ORFs(dna) #finds all codons in strand 1\n half_codons2 += find_all_ORFs(get_reverse_complement(dna)) #finds all codons in strand 2\n# print 'hc2', half_codons2\n all_the_codons = half_codons + half_codons2 #creates of list of all the codons in strands 1 and 2\n return all_the_codons", "title": "" }, { "docid": "49d58d6a0a06e2d210ebb55070a596ce", "score": "0.4759174", "text": "def find_all_ORFs(dna):\n\n # TODO: implement this\n ORFlist = []\n new_list1 = find_all_ORFs_oneframe(dna) #find ORF by reading in multiples of 3\n ORFlist += new_list1\n new_list2 = find_all_ORFs_oneframe(dna[1:]) #find ORF by reading in 1 order after multiples of 3\n ORFlist += new_list2\n new_list3 = find_all_ORFs_oneframe(dna[2:]) #find ORF by reading in 2 order after multiples of 3\n ORFlist += new_list3\n return ORFlist\n\n\n pass", "title": "" }, { "docid": "8432063d5006953287aa9b756cebb77d", "score": "0.47585806", "text": "def map_genes_to_sc_chr(annotation_data):\n\n\troman_numerals_as_ints = {\n\t'I': '1', \n\t'II': '2', \n\t'III': '3', \n\t'IV': '4',\n\t'V': '5',\n\t'VI': '6',\n\t'VII': '7',\n\t'VIII': '8',\n\t'IX': '9',\n\t'X': '10',\n\t'XI': '11',\n\t'XII': '12',\n\t'XIII': '13',\n\t'XIV': '14',\n\t'XV': '15',\n\t'XVI': '16'\n\t}\n\n\tgene_chr_dict = {}\n\n\tfor each_gene in annotation_data:\n\t\tspecies = each_gene[0][0:2]\n\t\tif species == 'sp': \n\t\t\tif each_gene[0][2:6] == 'Scer': #account for weird sp genes that are labeled this way\n\t\t\t\tgene = each_gene[0][2:]\n\t\t\t\tif each_gene[0][7:9].isdigit() == True:\n\t\t\t\t\tgene_chr_dict[gene] = each_gene[0][7:9]\n\t\t\t\telse:\n\t\t\t\t\tgene_chr_dict[gene] = each_gene[0][7:8]\n\t\t\telse:\n\t\t\t\tcontinue # don't process paradoxus genes\n\t\tif species == 'sc':\n\t\t\tif each_gene[1] == 'sc2-micron' or each_gene[1] == 'scchrMito': # don't process genes in mito DNA or 2-micro plasmid\n\t\t\t\tcontinue\n\t\t\tif each_gene[0] != 'sc':\t# gets rid of \"empty\" genes that just say sc ...\n\t\t\t\tgene = each_gene[0][2:]\n\t\t\t\tscaffold_roman = each_gene[1][5:]\n\t\t\t\tscaffold_int = roman_numerals_as_ints[scaffold_roman] # convert roman to int\n\t\t\t\tgene_chr_dict[gene] = scaffold_int\n\n\treturn gene_chr_dict", "title": "" }, { "docid": "62abc9172a6ac69f9898296e5b899459", "score": "0.47572812", "text": "def _makeimap(self):\n self.map_['source'] = 'nasa'\n self.map_['instrument'] = 'goes'\n self.map_['physobs'] = 'irradiance'\n self.map_['provider'] = 'sdac'", "title": "" }, { "docid": "36c7db3aa63a5c5354ebe9c2963480d7", "score": "0.47552645", "text": "def write_clade2otus_map(filename, map_clade2otu):\n try:\n fh = open(filename, 'w')\n fh.write('#MetaPhlAn clade\\tmatching GreenGenes OTUs\\n')\n for clade in sorted(map_clade2otu):\n fh.write('\\t'.join([clade] + sorted(map(str,\n map_clade2otu[clade]))))\n fh.write(\"\\n\")\n fh.close()\n except IOError:\n raise IOError('Cannot write to file \"%s\"' % filename)", "title": "" }, { "docid": "822c50723e9019456c8fb639e8cabd72", "score": "0.47403425", "text": "def calc_zern_sens(mp):\n indsZnoll = mp.eval.indsZnoll\n Rsens = mp.eval.Rsens # Radii ranges. Can overlap.\n Nannuli = Rsens.shape[0]\n Nzern = indsZnoll.size\n\n # Make scoring masks\n maskCube = np.zeros((mp.Fend.Neta, mp.Fend.Nxi, Nannuli))\n for ni in range(Nannuli):\n # Make scoring masks for the annular regions\n # Set Inputs:\n maskDict = {}\n maskDict[\"pixresFP\"] = mp.Fend.res\n maskDict[\"rhoInner\"] = Rsens[ni, 0] # [lambda0/D]\n maskDict[\"rhoOuter\"] = Rsens[ni, 1] # [lambda0/D]\n maskDict[\"angDeg\"] = mp.Fend.corr.ang # [degrees]\n maskDict[\"centering\"] = mp.centering\n maskDict[\"FOV\"] = mp.Fend.FOV\n maskDict[\"whichSide\"] = np.atleast_1d(mp.Fend.sides)[0]\n if hasattr(mp.Fend, 'shape'):\n maskDict[\"shape\"] = np.atleast_1d(mp.Fend.shape)[0]\n maskCube[:, :, ni], xisDL, etasDL = falco.mask.falco_gen_SW_mask(maskDict)\n\n if not mp.full.flagPROPER: # When using full models made with PROPER\n # Generate cube of normalized (RMS = 1) Zernike modes.\n ZmapCube = gen_norm_zern_maps(mp.P1.full.Nbeam, mp.centering,\n indsZnoll)\n # Make sure ZmapCube is padded or cropped to the right array size\n if not ZmapCube.shape[0] == mp.P1.full.Narr:\n ZmapCubeTemp = np.zeros((mp.P1.full.Narr, mp.P1.full.Narr, Nzern))\n for zi in range(Nzern):\n ZmapCubeTemp[:, :, zi] = falco.util.pad_crop(\n np.squeeze(ZmapCube[:, :, zi]), mp.P1.full.Narr)\n ZmapCube = ZmapCubeTemp\n del ZmapCubeTemp\n\n # Number of polarization states used\n if not hasattr(mp, 'full'):\n mp.full = falco.config.Object() # Initialize if this doesn't exist\n if hasattr(mp.full, 'pol_conds'):\n Npol = len(mp.full.pol_conds)\n else:\n Npol = 1\n\n # Get unaberrated E-fields\n # Loop over all wavelengths and polarizations\n inds_list = [(x, y) for x in np.arange(mp.full.NlamUnique)\n for y in np.arange(Npol)]\n Nvals = mp.full.NlamUnique*Npol\n\n # Get nominal, unaberrated E-field at each wavelength and polarization\n E0array = np.zeros((mp.Fend.Neta, mp.Fend.Nxi, mp.full.NlamUnique, Npol),\n dtype=complex)\n Eunab = np.zeros((mp.Fend.Neta, mp.Fend.Nxi, Nvals), dtype=complex)\n\n print('Computing unaberrated E-fields for Zernike sensitivities...\\t',\n end='')\n if mp.flagParallel:\n # pool = multiprocessing.Pool(processes=mp.Nthreads)\n # resultsRaw = [pool.apply_async(falco_get_single_sim_Efield_LamPol,\n # args=(iv, inds_list, mp)) for iv in range(Nvals)]\n # results = [p.get() for p in resultsRaw] # All the E-fields in a list\n # pool.close()\n # pool.join()\n # for iv in range(Nvals):\n # Eunab[:, :, iv] = results[iv]\n\n pool = multiprocessing.Pool(processes=mp.Nthreads)\n results = pool.starmap(falco_get_single_sim_Efield_LamPol,\n [(iv, inds_list, mp) for iv in range(Nvals)])\n pool.close()\n pool.join()\n for iv in range(Nvals):\n Eunab[:, :, iv] = results[iv]\n else:\n for iv in range(Nvals):\n Eunab[:, :, iv] = falco_get_single_sim_Efield_LamPol(iv, inds_list,\n mp)\n print('done.')\n\n # Reorganize the output\n for iv in range(Nvals):\n ilam = inds_list[iv][0]\n ipol = inds_list[iv][1]\n E0array[:, :, ilam, ipol] = Eunab[:, :, iv]\n del Eunab\n\n # Get E-fields with Zernike aberrations\n # Loop over all wavelengths, polarizations, and Zernike modes\n inds_list_zern = [(x, y, z) for x in np.arange(mp.full.NlamUnique)\n for y in np.arange(Npol) for z in np.arange(Nzern)]\n NvalsZern = mp.full.NlamUnique*Npol*Nzern\n\n # Get nominal, unaberrated final E-field at each wavelength and polarization\n dEZarray = np.zeros((mp.Fend.Neta, mp.Fend.Nxi, mp.full.NlamUnique, Npol,\n Nzern),\n dtype=complex)\n Eab = np.zeros((mp.Fend.Neta, mp.Fend.Nxi, NvalsZern), dtype=complex)\n\n print('Computing aberrated E-fields for Zernike sensitivities...\\t', end='')\n if mp.flagParallel:\n # pool = multiprocessing.Pool(processes=mp.Nthreads)\n # resultsRaw = [pool.apply_async(falco_get_single_sim_Efield_LamPolZern,\n # args=(iv, inds_list_zern, mp)) for iv in range(NvalsZern)]\n # results = [p.get() for p in resultsRaw] # All the E-fields in a list\n # pool.close()\n # pool.join()\n\n pool = multiprocessing.Pool(processes=mp.Nthreads)\n results = pool.starmap(falco_get_single_sim_Efield_LamPolZern,\n [(iv, inds_list_zern, mp)\n for iv in range(NvalsZern)])\n pool.close()\n pool.join()\n for iv in range(NvalsZern):\n Eab[:, :, iv] = results[iv]\n pass\n else:\n for iv in range(NvalsZern):\n Eab[:, :, iv] = falco_get_single_sim_Efield_LamPolZern(iv, inds_list_zern, mp)\n print('done.')\n\n # Reorganize the output and compute delta E-field\n for ni in range(NvalsZern):\n ilam = inds_list_zern[ni][0]\n ipol = inds_list_zern[ni][1]\n izern = inds_list_zern[ni][2]\n dEZarray[:, :, ilam, ipol, izern] = Eab[:, :, ni] - E0array[:, :, ilam, ipol]\n del Eab\n\n # Compute Zernike sensitivity values averaged across each annulus in dark hole\n # |dE|^2 averaged over wavelength and polarization state\n dE2cube = np.squeeze(np.mean(np.mean(np.abs(dEZarray)**2, axis=3), axis=2))\n dE2mat = np.zeros((Nzern, Nannuli))\n for iz in range(Nzern):\n dEtemp = np.squeeze(dE2cube[:, :, iz])\n for ia in range(Nannuli):\n dE2mat[iz, ia] = np.mean(dEtemp[(np.squeeze(maskCube[:, :, ia]) == 1)])\n\n # Print Zernike sensitivity results to command line\n for iz in range(Nzern):\n print('|dE|^2 at %dnm with %dnm RMS of Z%d =' %\n (np.round(mp.lambda0*1e9), np.round(1e9*mp.full.ZrmsVal), indsZnoll[iz]), end='')\n for ia in range(Nannuli):\n print('\\t%.2e (%.1f-%.1f l/D)' % (dE2mat[iz, ia], Rsens[ia, 0], Rsens[ia, 1]), end='')\n print('\\n', end='')\n\n return dE2mat", "title": "" }, { "docid": "ea087e48cf74ea3e9157c923bda6efc8", "score": "0.47357464", "text": "def testORCA_ORCA4_1_orca_from_issue_736_out(logfile):\r\n assert len(logfile.data.scfvalues) == 23\r\n # The first iteration in the problematic block:\r\n # ITER Energy Delta-E Max-DP RMS-DP [F,P] Damp\r\n # *** Starting incremental Fock matrix formation ***\r\n # 0 -257.0554667435 0.000000000000537.42184135 4.76025534 0.4401076 0.8500\r\n assert abs(logfile.data.scfvalues[14][0][1] - 537) < 1.0, logfile.data.scfvalues[14][0]", "title": "" }, { "docid": "24922b90288653e2f502c2d3e561b276", "score": "0.47338605", "text": "def restruct(self, mapping, ztr):", "title": "" }, { "docid": "9756eb9669a9ac6d6562f44d6355b88d", "score": "0.47300354", "text": "def find_all_ORFs_oneframe(dna):\n\n restOfOrf = ''\n allOrfs = []\n x = 0\n y = 0\n\n while x <= len(dna)/3:\n # #Look for a start codon\n # for x in range(0, len(dna), 3)\n if dna[3*x:3*x+3] == 'ATG':\n restOfOrf += dna[3*x:3*x+3]\n x += 1\n y = 0\n while x <= len(dna)/3 and y == 0:\n #Continue ORF until a stop codon\n if dna[3*x:3*x+3] not in ['TAG', 'TAA','TGA']:\n restOfOrf += dna[3*x:3*x+3]\n x += 1\n else:\n allOrfs.append(restOfOrf)\n restOfOrf = ''\n x += 1\n y += 1\n if restOfOrf != '':\n allOrfs.append(restOfOrf)\n else:\n x += 1\n return allOrfs", "title": "" }, { "docid": "f37e6bf548dc41a9af7a222c7297b469", "score": "0.47275504", "text": "def fringes2pistons(fringephases, nholes):\n Anrm = makeA(nholes)\n Apinv = np.linalg.pinv(Anrm)\n\n return np.dot(Apinv, fringephases)", "title": "" }, { "docid": "07a7d5f7708d81a6a7c86eeef5d99fc9", "score": "0.47257614", "text": "def find_all_ORFs(dna):\n result = []\n for i in range(3): # frame changes from 0 to 2\n result.extend(find_all_ORFs_oneframe(dna[i:]))\n return result", "title": "" }, { "docid": "8ddf8b86ed3a55c398170a673f36f8e8", "score": "0.47208253", "text": "def save_ocn_vars_as_SHstereo():\n SST = np.zeros((1,1))\n varlist = {'SST':SST}\n ncfile = '/glade/p/work/aordonez/cesm_mapping/map_gx1v6SH_TO_SHstereo25km_blin.161213.nc'\n for varname in varlist:\n print(\"reading data from file\")\n var = read_ocn_data(varname,'001')\n print(\"regridding to stereo\")\n tmp= grid1togrid2(var,ncfile)\n print(\"reshaping\")\n #tmp = np.transpose(tmp,(2,0,1))\n varlist[varname] = tmp\n np.save('/glade/scratch/aordonez/'+varname+'SHproj.npy',tmp)\n return varlist", "title": "" }, { "docid": "9c3b35aed7696952ef3d8019909b80f1", "score": "0.47172502", "text": "def compute_image_from_velodyne_matrices(calibration_dir: str) -> dict:\n # Based on code from monodepth2 repo.\n\n # Load cam_to_cam calib file.\n cam2cam = read_calibration_file(os.path.join(calibration_dir, 'calib_cam_to_cam.txt'))\n # Load velo_to_cam file.\n velo2cam = read_calibration_file(os.path.join(calibration_dir, 'calib_velo_to_cam.txt'))\n\n velo2cam = np.hstack((velo2cam['R'].reshape(3, 3), velo2cam['T'].reshape(3, 1)))\n velo2cam = np.vstack((velo2cam, np.array([0, 0, 0, 1.0])))\n camera_image_from_velodyne_dict = {}\n\n for camera_name in KITTICameraNames:\n # Get camera number by slicing last 2 characters off of camera_name string.\n camera_path = CAMERA_NAME_TO_PATH_MAPPING[camera_name]\n\n cam_num = camera_path[-2:]\n R_cam2rect = np.eye(4)\n R_cam2rect[:3, :3] = cam2cam[f\"R_rect_{cam_num}\"].reshape(3, 3)\n P_rect = cam2cam[f\"P_rect_{cam_num}\"].reshape(3, 4)\n camera_image_from_velodyne = np.dot(np.dot(P_rect, R_cam2rect), velo2cam)\n camera_image_from_velodyne = np.vstack((camera_image_from_velodyne, np.array([[0, 0, 0, 1.0]])))\n camera_image_from_velodyne_dict.update({KITTICameraNames(camera_name).name: camera_image_from_velodyne})\n\n return camera_image_from_velodyne_dict", "title": "" }, { "docid": "35f4ba7488805e4e225f043882d5a39d", "score": "0.47160769", "text": "def find_all_solutions(dimacsfile, mapfile):", "title": "" }, { "docid": "7b164c0999de5215de0e59be926d5197", "score": "0.47095913", "text": "def main(): # pylint: disable=too-many-locals,too-many-statements,too-many-branches,line-too-long\n # 1- Get gene name and species\n # 2- Match with ensembl gene ID\n # 3- Get the set of orthologous genes as ensembl geneID\n # (option for filtering the set of species)\n # 4a- Get all transcript information for the orthologous genes\n # and store the information on the species next to it\n # 4b- Get the exons annotation and sequence\n # 5- Get the gene tree for the selected species\n\n # 1-\n args = parse_command_line().parse_args()\n\n # 2-\n orthokeep = utils.species.get_species_list(args.specieslist)\n\n cdirectory = args.genename\n\n if is_esemble_id(args.genename):\n curgene = args.genename\n else:\n print(f\"Searching ID for gene with name {args.genename} in species {args.species} ...\")\n geneids = get_geneids_from_symbol(args.species, args.genename)\n _print_if(\n args.verbose,\n f\"Found the following list of ids: {json.dumps(geneids)}\")\n if not geneids:\n raise Exception(f\"No results for {args.genename}\")\n curgene = geneids[0]\n print(f\"... using gene id {curgene} from now on.\")\n\n _print_if(args.verbose,\n f\"Results will be saved in directory {cdirectory}\")\n query_result_subdir = os.path.join(cdirectory, \"Ensembl\")\n if not os.path.exists(cdirectory):\n os.makedirs(query_result_subdir)\n\n save_ensembl_version(query_result_subdir)\n # 3-\n # print \"Searching for orthologous sequences (ignoring paralogues for now)\"\n print(\"Writing the gene tree\")\n tree_text = get_genetree(curgene)\n if tree_text is not None:\n with open(os.path.join(query_result_subdir, \"tree.nh\"),\n \"w\", encoding=\"utf-8\") as treeout:\n treeout.write(tree_text)\n\n print(\"Looking for orthologs\")\n orthologs = get_orthologs(curgene)\n nparalogs = len(\n [x for x in orthologs if x['type'] == \"within_species_paralog\"])\n _print_if(\n args.verbose, f\"Found a total of {len(orthologs)} orthologs, of which {nparalogs} paralogs\")\n # ['taxonomy_level']\n _print_if(args.verbose, \"Orthologous species:\")\n number = 0\n corthologs = Counter(\n [ortholog['target']['species'] for ortholog in orthologs])\n for i, j in corthologs.most_common():\n _print_if(args.verbose, \" %-23s: %4d\" % (i, j)) # pylint: disable=consider-using-f-string,line-too-long\n # if nt > 5: break\n number += 1\n ##\n orthologs_filtered = filter_ortho(orthologs,\n orthokeep,\n relationship=args.orthology)\n # TO DO print : orthokeep can be None\n # _print_if(args.verbose,\n # \"Filtering on %d species, %d matches\" % (len(orthokeep),\n # len(orthologs_filtered)))\n\n print(\"Getting all the transcripts for TSL file\")\n tsl_cur, tsl_ortho = get_transcripts_orthologs(curgene, orthologs_filtered)\n write_tsl_file(query_result_subdir, [tsl_cur] + tsl_ortho)\n\n _print_if(args.verbose, f\"**** Query species : {args.species}\")\n _print_if(args.verbose,\n f\"Got a total of {len(tsl_cur)} transcripts with biotypes\")\n for i, j in Counter([dic['biotype'] for dic in tsl_cur]).most_common():\n _print_if(args.verbose, \" %-23s: %4d\" % (i, j)) # pylint: disable=consider-using-f-string,line-too-long\n _print_if(args.verbose, \"**** Orthologues\")\n for tr_o in tsl_ortho:\n _print_if(args.verbose,\n \"%-22s: %4d transcripts \" % (tr_o[0]['species'], len(tr_o))) # pylint: disable=consider-using-f-string,line-too-long\n\n print(\"Getting exons sequences\")\n # TO DO revert to multiple files if it is easier\n ffasta = os.path.join(query_result_subdir, \"sequences.fasta\")\n fexonstable = os.path.join(query_result_subdir, \"exonstable.tsv\")\n with open(ffasta, \"w\", encoding=\"utf-8\") as fastaout:\n with open(fexonstable, \"w\", encoding=\"utf-8\") as exonstableout:\n dex = get_listofexons(curgene)\n lexid = list({x['exon_id'] for x in dex})\n _print_if(args.verbose,\n f\"Getting the sequences files for {curgene}\")\n exfasta = get_exons_sequences(lexid)\n extable = get_biomart_exons_annot(args.species, curgene)\n if extable is None:\n _store_errors(query_result_subdir, args.species, curgene)\n sys.exit(1)\n extable = _rename(extable)\n exonstableout.write(extable)\n exons_name = r\"{args.species}:{args.genename}\"\n for dseq in exfasta:\n dictseq2fasta(dseq, exons_name, fastaout)\n for ortholog in orthologs_filtered:\n orthoid = ortholog['target']['id']\n orthospecies = ortholog['target']['species']\n # orthotaxon = ortholog['target']['taxon_id']\n ortho_name = f\"{orthospecies}:{orthoid}\"\n _print_if(args.verbose,\n f\"Getting exons information for {ortho_name}\")\n dexortho = get_listofexons(orthoid)\n lexidortho = list({x['exon_id'] for x in dexortho})\n _print_if(args.verbose, f\" - {len(lexidortho)} exons\")\n exorthofasta = get_exons_sequences(lexidortho)\n _print_if(args.verbose,\n f\" - {len(exorthofasta)} fasta sequences\")\n ortho_exontable = get_biomart_exons_annot(orthospecies,\n orthoid,\n header=False)\n if ortho_exontable is None:\n warnings.warn(f'Download failed for {orthoid} in {orthospecies}! ')\n _store_errors(query_result_subdir, orthospecies, orthoid)\n continue\n\n _print_if(\n args.verbose, \" - %d lines in the exon table\" %\n (ortho_exontable.count(\"\\n\") + 1))\n exonstableout.write(ortho_exontable)\n for dseq in exorthofasta:\n dictseq2fasta(dseq, ortho_name, fastaout)\n print(\"------------------- transcript_query finished -------------------\")", "title": "" }, { "docid": "617b6d8e1b3bf6f45fba23316fde1a13", "score": "0.4705475", "text": "def coralmapTXT(self,path,reefmap,coralmaps,sc_ben):\r\n # Looping through every year\r\n for run in range(len(coralmaps)):\r\n for year in range(len(coralmaps[run])):\r\n outpath = path+'_run'+str(run)+'_year'+str(year)+'.txt'\r\n outs = open(outpath,'w')\r\n coralmapscaled = [i * sc_ben for i in coralmaps[run][year]]\r\n print >>outs,'ncols 326'\r\n print >>outs,'nrows 631'\r\n print >>outs,'xllcorner 307316.369'\r\n print >>outs,'yllcorner 1751552.92'\r\n print >>outs,'cellsize 1000'\r\n print >>outs,'NODATA_value -9999'\r\n for ii in range(reefmap.length):\r\n \t\tfor jj in range(reefmap.width):\r\n if reefmap.reefmap[ii,jj] != 0.0:\r\n print >>outs,coralmapscaled.pop(0),\r\n else:\r\n print >>outs,-9999,\r\n if jj == reefmap.width - 1:\r\n print >>outs,'\\n',\r\n outs.close()", "title": "" }, { "docid": "b7cebb9e00fe738ea2c0bc9b2e108e4c", "score": "0.4699609", "text": "def create_orf_data(project_directory, correct_size_contigs_path):\n\n print(\"Creating ORF data...\")\n\n prodigal_output = project_directory + '/intermediate_outputs/prodigal/prodigal_orf_output'\n prodigal_prtn_seq_output = project_directory + '/intermediate_outputs/prodigal/prodigal_prtn_seq_output.faa'\n run_prodigal(correct_size_contigs_path, prodigal_output, prodigal_prtn_seq_output)\n\n prodigal_forward_and_reverse_df = parse_prodigal_sco_output(prodigal_output)\n\n prodigal_forward_and_reverse_df[0].to_csv(project_directory + '/circos_diagram_input_data/ORF.txt', sep=' ', index=False, header=False)\n prodigal_forward_and_reverse_df[1].to_csv(project_directory + '/circos_diagram_input_data/ORF_reverse.txt', sep=' ', index=False, header=False)\n\n return prodigal_prtn_seq_output", "title": "" }, { "docid": "a2adb23377a65f3cb13461571d0ad71e", "score": "0.46993995", "text": "def walk_map( direct_orbiters ):\n for orbiter in direct_orbiters:\n yield path_to_root( direct_orbiters, orbiter )", "title": "" }, { "docid": "499ebfb0b8ad7975b30a8cb4fe3315db", "score": "0.46946135", "text": "def makePhotoDat_lp():\n # ==========\n # H-band\n # ==========\n lists = [workDir + 'starlists/mag09jun26_w51a_f1_lp_rms_rel.maglis',\n workDir + 'starlists/mag09jun26_w51a_f2_lp_rms_rel.maglis',\n workDir + 'starlists/mag09jun26_w51a_f3_lp_rms_rel.maglis',\n workDir + 'starlists/mag09jun26_w51a_f4_lp_rms_rel.maglis']\n\n # Coo Star Coordinates relative to f1_psf1\n # Pull coo stars from /u/jlu/data/w51/09jun26/reduce/analysis.py\n # f1_psf1 f2_psf0 f3_psf2 f4_psf1\n cooStarX = np.array([ 0.000, 8.004, -6.399, -9.215])\n cooStarY = np.array([ 0.000, 2.748, 4.607, 0.448])\n\n fields = ['f1', 'f2', 'f3', 'f4']\n\n # Output file\n _out = open('results_photo_calib_lp.dat', 'w')\n\n \n for ii in range(len(lists)):\n list = asciidata.open(lists[ii])\n \n mag = list[1].tonumpy()\n x = list[3].tonumpy()\n y = list[4].tonumpy()\n snr = list[7].tonumpy()\n \n magErr = 2.5 * math.log10(math.e) / snr\n \n xref = x[0]\n yref = y[0]\n xarc = ((x - xref) * 0.01 * -1.0) + cooStarX[ii]\n yarc = ((y - yref) * 0.01) + cooStarY[ii]\n rarc = np.hypot(xarc, yarc)\n\n # Read in the old photometric calibration file and try\n # to get names of the sources using a crude coordinate matching.\n names = []\n oldCalib = asciidata.open('/u/jlu/data/w51/source_list/w51a_photo.dat')\n oldNames = oldCalib[0].tonumpy()\n oldX = oldCalib[1].tonumpy()\n oldY = oldCalib[2].tonumpy()\n\n for jj in range(len(xarc)):\n diff = np.hypot(xarc[jj] - oldX, yarc[jj] - oldY)\n minIdx = diff.argmin()\n \n if diff[minIdx] < 0.2:\n names.append(oldNames[minIdx])\n else:\n names.append('')\n names = np.array(names)\n\n\n# # Loop through stars and reject close pairs\n# keepStar = np.ones(len(rarc), dtype=bool)\n# pairCutRad = 0.2\n# pairCutMag = 2\n\n# for ss in range(len(rarc)):\n# diffx = xarc - xarc[ss]\n# diffy = yarc - yarc[ss]\n# diffr = np.hypot(diffx, diffy)\n# diffm = abs(mag - mag[ss])\n \n# idx = np.where((diffr != 0) & \n# (diffr < pairCutRad) & \n# (diffm < pairCutMag))[0]\n\n# keepStar[idx] = False\n \n# mag = mag[keepStar == True]\n# x = x[keepStar == True]\n# y = y[keepStar == True]\n# snr = snr[keepStar == True]\n# magErr = magErr[keepStar == True]\n# xarc = xarc[keepStar == True]\n# yarc = yarc[keepStar == True]\n# rarc = rarc[keepStar == True]\n# names = names[keepStar == True]\n \n # Figure out which stars we should use as calibrators.\n idx1 = np.where(mag < 14.5)[0] # Get brightest stars\n magErrFloor = np.median(magErr[idx1]) # Find median\n \n magErrCut = 1.3\n magCut = 14.5\n rarcCut = 12\n\n # Print out the brightest stars as they might be coo/psf stars\n # and need to be in w51a_photo.dat even if they aren't calibrators.\n idx = np.where((mag < 20) & (rarc < rarcCut) &\n (magErr >= (magErrCut * magErrFloor)))[0]\n\n print ''\n print '*** Lp-band Bright Sources *** (not necessarily calibrators)'\n for jj in idx:\n print '%8.3f %8.3f %5.2f +/- %5.2f' % \\\n (xarc[jj], yarc[jj], mag[jj], magErr[jj])\n\n _out.write('*** Lp-band Bright Sources *** (not necessarily calibrators)\\n')\n for jj in idx:\n _out.write('%-13s %8.3f %8.3f %5.2f +/- %5.2f\\n' % \n (names[ii], xarc[ii], yarc[ii], mag[ii], magErr[ii]))\n\n idx2 = np.where((magErr < (magErrCut * magErrFloor)) &\n (mag < magCut) & \n (rarc < rarcCut))[0]\n\n mag = mag[idx2]\n x = x[idx2]\n y = y[idx2]\n snr = snr[idx2]\n magErr = magErr[idx2]\n xarc = xarc[idx2]\n yarc = yarc[idx2]\n rarc = rarc[idx2]\n names = names[idx2]\n\n print ''\n print '*** Lp-band Photometric Calibrators for %s ***' % (fields[ii])\n# print 'Pairwise Cut: dr < %.1f and dm < %.1f' % (pairCutRad, pairCutMag)\n print 'Magnitude Error Cut: %.1f * %.2f = %.2f' % \\\n (magErrCut, magErrFloor, magErrCut*magErrFloor)\n print 'Magnitude Cut: %.2f' % (magCut)\n print 'Radius Cut: %.1f' % (rarcCut)\n print 'Number of calibrators: %d' % len(mag)\n print 'Magnitude Range: %.2f - %.2f' % (mag.min(), mag.max())\n\n _out.write('\\n')\n _out.write('*** Lp-band Photometric Calibrators for %s***\\n' % \n (fields[ii]))\n# _out.write('Pairwise Cut: dr < %.1f and dm < %.1f\\n' % \n# (pairCutRad, pairCutMag))\n _out.write('Magnitude Error Cut: %.1f * %.2f = %.2f\\n' %\n (magErrCut, magErrFloor, magErrCut*magErrFloor))\n _out.write('Magnitude Cut: %.2f\\n' % (magCut))\n _out.write('Radius Cut: %.1f\\n' % (rarcCut))\n _out.write('Number of calibrators: %d\\n' % len(mag))\n _out.write('Magnitude Range: %.2f - %.2f\\n' % (mag.min(), mag.max()))\n \n # Over plot the calibrators on an image\n dataDir = '/u/jlu/data/w51/09jun26/combo/'\n img = pyfits.getdata(dataDir + 'mag09jun26_w51a_'+fields[ii]+'_lp.fits')\n \n xaxis = ((np.arange(img.shape[1]) - xref) * 0.01 * -1.0) + cooStarX[ii]\n yaxis = ((np.arange(img.shape[0]) - yref) * 0.01) + cooStarY[ii]\n \n py.figure(2, figsize=(10,10))\n py.clf()\n py.imshow(np.sqrt(img), cmap=py.cm.gist_heat,\n extent=[xaxis.max(), xaxis.min(), yaxis.min(), yaxis.max()],\n interpolation=None, vmin=math.sqrt(800), vmax=math.sqrt(1500))\n \n py.plot(xarc, yarc, 'go', mfc='none', mec='green', ms=10, mew=1)\n py.xlabel('R.A. Offset from f1_psf1 (arcsec)')\n py.ylabel('Dec. Offset from f1_psf1 (arcsec)')\n py.title('Lp-band Photometric Calibrators for ' + fields[ii])\n \n for jj in range(len(xarc)):\n py.text(xarc[jj], yarc[jj], '%.2f' % mag[jj], \n fontsize=6, color='yellow')\n \n py.savefig(workDir + \n 'starlists/plots/photo_calib_map_lp_' + fields[ii] + '.png')\n \n # Print out calibrators\n for jj in range(len(xarc)):\n print '%8.3f %8.3f %5.2f +/- %5.2f' % \\\n (xarc[jj], yarc[jj], mag[jj], magErr[jj])\n\n # Print out calibrators\n for ii in range(len(xarc)):\n print '%-13s %8.3f %8.3f %5.2f +/- %5.2f' % \\\n (names[ii], xarc[ii], yarc[ii], mag[ii], magErr[ii])\n _out.write('%-13s %8.3f %8.3f %5.2f +/- %5.2f\\n' % \n (names[ii], xarc[ii], yarc[ii], mag[ii], magErr[ii]))", "title": "" }, { "docid": "8acf82f5d6bb39c38e82ae04a3592881", "score": "0.46908996", "text": "def format_isochrones(input_iso_dir):\n # Store current directory for later\n start_dir = os.getcwd()\n\n # Move into metallicity direcotry, read iso.fits file\n os.chdir(input_iso_dir)\n \n print( 'Read Input: this is slow')\n iso = Table.read('iso.fits')\n print( 'Done' )\n \n ages_all = iso['col1']\n \n # Extract the unique ages\n age_arr = np.unique(ages_all)\n\n # For each unique age, extract the proper rows and make corresponding\n # table. Be sure to separate rotating from non-rotating, and put in\n # separate subdirectories\n\n # First make the rot and norot directories, if they don't exit\n if os.path.exists('rot'):\n pass\n else:\n os.mkdir('rot')\n os.mkdir('norot')\n \n print( 'Making individual isochrone files')\n for age in age_arr:\n good = np.where(ages_all == age)\n\n # Identify rot vs. non-rot\n idx_r = np.where(iso[good]['col2'] == 'r')\n idx_n = np.where(iso[good]['col2'] == 'n')\n\n tmp_r = iso[good][idx_r]\n tmp_n = iso[good][idx_n]\n \n # Write tables\n tmp_r.write('rot/iso_{0:4.2f}.fits'.format(age))\n tmp_n.write('norot/iso_{0:4.2f}.fits'.format(age))\n\n # Return to starting directory\n os.chdir(start_dir)\n\n return", "title": "" }, { "docid": "0d89a24fca19dab37c859fc16691e1ec", "score": "0.46904254", "text": "def CenterGen2(rij,E):\n n = len(rij)\n rflat = rij.reshape(n*n,3)\n rSflat = map(SymSector,rflat)\n rUnique = set(map(tuple,rSflat))\n dic = {k:ListGF(E,k) for k in rUnique}\n gflat = np.array([dic[tuple(r)] for r in rSflat])\n \n g_mx = gflat.reshape(n,n) \n return g_mx", "title": "" }, { "docid": "44cd97990cad229c42b101c97c58b2a4", "score": "0.4689914", "text": "def write_matched_catalogs(x,y,ra,dec,flags,slnames):\n for ctr in range(0,2):\n sl_name = slnames[ctr]\n for file_ending in [\"daophot.txt\", \"daophot_corrected.txt\", \"point-cat.ecsv\", \"sexphot.txt\",\n \"sexphot_corrected.txt\", \"segment-cat.ecsv\"]:\n if sl_name.endswith(file_ending):\n output_filename = sl_name.replace(file_ending,\"matched_sources_only_{}\".format(file_ending))\n flagscolname = \"Flags\"\n if (sl_name.endswith(\"daophot.txt\") or sl_name.endswith(\"daophot_corrected.txt\") or sl_name.endswith(\"point-cat.ecsv\")):\n xcolname = \"X-Center\"\n ycolname = \"Y-Center\"\n elif sl_name.endswith(\"segment-cat.ecsv\"):\n xcolname = \"X-Centroid\"\n ycolname = \"Y-Centroid\"\n else:\n xcolname = \"X_IMAGE\"\n ycolname = \"Y_IMAGE\"\n flagscolname = \"FLAGS\"\n output_table = Table([x[ctr,:],y[ctr,:],ra[ctr,:],dec[ctr,:],flags[ctr,:]],names=(xcolname,ycolname,\"RA\",\"DEC\",flagscolname))\n if output_filename.endswith(\".ecsv\"):\n output_format = \"ascii.ecsv\"\n if output_filename.endswith(\".txt\"):\n output_format = \"ascii.csv\"\n output_table.write(output_filename, format = output_format)\n log.info(\"Wrote matched sources only catalog {}\".format(output_filename))", "title": "" }, { "docid": "3298ba7fb622f41570c1e2bd5771552e", "score": "0.46861726", "text": "def gen_xrf_map_dict(nx=10, ny=5, elines=[\"S_K\", \"Au_M\", \"Fe_K\"]):\n img = {}\n for e in elines:\n map = np.random.rand(ny, nx) * np.random.rand() * 10\n img[e] = map\n\n # Scaler\n map_sclr = np.ones(shape=(ny, nx), dtype=float) * np.random.rand() * 2\n img[\"sclr\"] = map_sclr\n\n return img", "title": "" } ]
098f625be7728afddfd2b2c1387e0124
Test the system removes files. First create a temporary empty file in the temporary path then remove it.
[ { "docid": "6627f05b2bc7c4b188734c79718fa220", "score": "0.7850327", "text": "def test_remove(self):\n test_file = os.path.join(self._system.get_temporary_path(), \"nusoft.test\")\n with open(test_file, 'a'):\n os.utime(test_file, None)\n self.assertTrue(os.path.exists(test_file))\n self._system.remove(test_file)\n self.assertFalse(os.path.exists(test_file))", "title": "" } ]
[ { "docid": "2d968d9edc44d54ee6dad643d82abd51", "score": "0.81269306", "text": "def test_removed(self):\n path = None\n with TemporaryDirectory() as tmp:\n path = tmp\n self.assertTrue(os.path.isdir(tmp))\n tmpfile = os.path.join(tmp, \"a_temp_file\")\n open(tmpfile, \"w\").write(\"data\")\n self.assertTrue(os.path.isfile(tmpfile))\n self.assertFalse(os.path.isdir(path))\n self.assertFalse(os.path.exists(path))", "title": "" }, { "docid": "54faffdd9df0f5c5818c6c4d00992fdc", "score": "0.78863084", "text": "def remove_temporary_files():\n try:\n xml_file_path, bin_file_path = get_ida_exported_files()\n if os.path.isfile(xml_file_path):\n os.remove(xml_file_path)\n\n if os.path.isfile(bin_file_path):\n os.remove(bin_file_path)\n\n except Exception:\n print(\"GhIDA:: [!] Unexpected error while removing temporary files.\")", "title": "" }, { "docid": "f0d40f19f2bb8ff51e5621fe337e2c38", "score": "0.7584222", "text": "def test_file_deleted(self):\n try:\n with get_temp_file() as (fd, name):\n os.unlink(name)\n except Exception as err:\n self.fail('Failed with exception \"{}\"'.format(err))", "title": "" }, { "docid": "71ca5b78012da9bd384480c731c4093d", "score": "0.7533892", "text": "def del_tmp() -> None:\n for elem in os.listdir('./tmp'):\n path = f\"./tmp/{elem}\"\n if os.path.isfile(path):\n os.remove(path)\n else:\n shutil.rmtree(path)", "title": "" }, { "docid": "21ade44d89f37ed82c613cc450427b80", "score": "0.7507764", "text": "def tearDown(self):\n for f in os.listdir('/tmp'):\n if not f.startswith(self.FILE_PREFIX):\n continue\n\n os.remove(os.path.join('/tmp', f))", "title": "" }, { "docid": "f264ee141ba6730703a3fc01a6e1842f", "score": "0.74953747", "text": "def _remove_tmpfiles():\n for f in tmpfiles:\n try:\n os.remove(f)\n except OSError:\n pass", "title": "" }, { "docid": "502bd6236692818f30275b53447a08ec", "score": "0.73938257", "text": "def tearDown(self):\n for d in os.listdir(tmp_dir_path):\n d_path = os.path.join(tmp_dir_path,d)\n try:\n os.remove(d_path)\n except:\n for f in os.listdir(d_path):\n f_path = os.path.join(d_path,f)\n os.remove(f_path)\n os.rmdir(d_path)\n assert os.listdir(tmp_dir_path) == []", "title": "" }, { "docid": "5a3b7d65ac91ced805d70df2affcaa51", "score": "0.73225266", "text": "def test_remove(self):\n reposDir = self.makeRepository(self.tmpDir)\n testFile = reposDir.child(\"some-file\")\n testFile.setContent(b\"something\")\n self.commitRepository(reposDir)\n self.assertTrue(testFile.exists())\n\n self.createCommand.remove(testFile)\n testFile.restat(False) # Refresh the file information\n self.assertFalse(testFile.exists(), \"File still exists\")", "title": "" }, { "docid": "3dbd545eb2891de7a0bdb59e8e4e5d06", "score": "0.73133916", "text": "def test_copy_delete_file(tmp_path: Path) -> None:\n sample_file = Path(__file__).parent.joinpath(\"sample.txt\")\n\n sample_file_tmp = tmp_path.joinpath(\"sample.txt\")\n assert not os.path.exists(sample_file_tmp)\n\n shutil.copyfile(sample_file, sample_file_tmp)\n assert os.path.isfile(sample_file_tmp)\n # pathlib.Path equivalent\n assert sample_file_tmp.is_file()\n\n os.remove(sample_file_tmp)\n assert not os.path.exists(sample_file_tmp)\n # pathlib.Path equivalent\n assert not sample_file_tmp.exists()", "title": "" }, { "docid": "f5b332afce3456ee48497b3e03b93843", "score": "0.73034614", "text": "def tearDown(self):\r\n remove_files(self.files_to_remove, False)\r\n if self.tmpdir:\r\n rmtree(self.tmpdir)\r\n\r\n # clean up the file from init_flowgram_file\r\n if (hasattr(self, \"tmp_filename\") and exists(self.tmp_filename)):\r\n remove(self.tmp_filename)", "title": "" }, { "docid": "50ddb98a1afe33437d4a01eafa88b26c", "score": "0.73018605", "text": "def test_09_cleanup(self, mock_remove, mock_config):\n udocker.Config = mock_config\n udocker.Config.tmpdir = \"/tmp\"\n udocker.FileUtil.tmptrash = {'file1.txt': None, 'file2.txt': None}\n udocker.FileUtil(\"\").cleanup()\n self.assertEqual(mock_remove.call_count, 2)", "title": "" }, { "docid": "f05f9755b7b577012a5e1825645b61ac", "score": "0.7301515", "text": "def _remove_temp_path():\n if os.path.exists(_temp_path):\n if os.path.isdir(_temp_path):\n def onerror(function, path, excinfo):\n persist.printf(\"{}: Unable to delete '{}' while cleaning up temporary directory\"\n .format(p_name, path))\n import traceback\n traceback.print_exc(*excinfo)\n import shutil\n shutil.rmtree(_temp_path, onerror=onerror)\n else:\n persist.printf(\"{}: For some reason, '{}' is a file. Removing...\"\n .format(p_name, _temp_path))\n os.remove(_temp_path)", "title": "" }, { "docid": "13d0df5c6a533d35f769084907bd01d4", "score": "0.72893023", "text": "def CleanUp(self, path):\n try:\n if os.path.exists(path):\n os.remove(path)\n except (OSError, IOError) as e:\n logging.info(\"Failed to remove temporary file %s. Err: %s\", path, e)", "title": "" }, { "docid": "b70db26e05b17cd1004f7e07787b3ab9", "score": "0.72319627", "text": "def clean_up_temp_dir():\n files = glob.glob(f'{CONFIG_DIR}/tmp/*')\n for f in files:\n try:\n os.remove(f)\n except Exception:\n pass", "title": "" }, { "docid": "865403c8de3527a1ad7d837f37f8a380", "score": "0.72295904", "text": "def clean_file_before_test():\n\n if os.path.exists(LOG_FOLDER):\n for file in os.listdir(LOG_FOLDER):\n os.remove(LOG_FOLDER + \"/\" + file)", "title": "" }, { "docid": "d19bc315934dea33a91606504c6f8e7d", "score": "0.7197689", "text": "def delete_temporary_files(request, tmp_path_factory):\r\n _tmp_path_factory = tmp_path_factory\r\n\r\n def cleanup():\r\n tmp_path = _tmp_path_factory.getbasetemp()\r\n if pathlib.Path(tmp_path).exists() and pathlib.Path(tmp_path).is_dir():\r\n shutil.rmtree(tmp_path)\r\n\r\n request.addfinalizer(cleanup)", "title": "" }, { "docid": "e24acd896e1a65bce6fc7689386f721b", "score": "0.71972746", "text": "def test_999_remove_testfiles(self):\n __dir_game_saves = os.path.dirname(__file__)\n __dir_game_saves = os.path.join(__dir_game_saves, 'games')\n __test_filename = consts.TEST_FILENAME\n __dir_game_testfile = os.path.join(__dir_game_saves, __test_filename)\n __test_filename_append1 = __test_filename + \"__1\"\n __dir_game_testfile_append1 = os.path.join(__dir_game_saves, __test_filename_append1)\n __test_filename_append2 = __test_filename + \"__2\"\n __dir_game_testfile_append2 = os.path.join(__dir_game_saves, __test_filename_append2)\n __test_filename_append3 = __test_filename + \"__3\"\n __dir_game_testfile_append3 = os.path.join(__dir_game_saves, __test_filename_append3)\n __dir_game_log = os.path.join(__dir_game_saves, \"log\")\n __test_logname = __test_filename + \"_log.txt\"\n __dir_game_logfile = os.path.join(__dir_game_log, __test_logname)\n os.remove(__dir_game_logfile)\n self.assertFalse(os.path.isfile(__dir_game_logfile))\n __list_files = os.listdir(__dir_game_log)\n if len(__list_files) == 0:\n os.removedirs(__dir_game_log)\n os.remove(__dir_game_testfile)\n self.assertFalse(os.path.isfile(__dir_game_testfile))\n os.remove(__dir_game_testfile_append1)\n self.assertFalse(os.path.isfile(__dir_game_testfile_append1))\n os.remove(__dir_game_testfile_append2)\n self.assertFalse(os.path.isfile(__dir_game_testfile_append2))\n os.remove(__dir_game_testfile_append3)\n self.assertFalse(os.path.isfile(__dir_game_testfile_append3))\n __list_files = os.listdir(__dir_game_saves)\n if len(__list_files) == 0:\n os.removedirs(__dir_game_saves)", "title": "" }, { "docid": "8cd5686f1e7e2e5d97de98fb4be4f092", "score": "0.7147713", "text": "def tearDown(self):\n for root, dirs, files in os.walk(TEMPDIR, topdown=False):\n for name in files:\n os.remove(os.path.join(root, name))\n for name in dirs:\n os.rmdir(os.path.join(root, name))\n os.rmdir(root)", "title": "" }, { "docid": "501e869d9729a722a4de914fc0eb8167", "score": "0.7095067", "text": "def test_rmtree(tmp_path):\n file_path = Path(tmp_path, \"bar\")\n file_path.write_text(\"Test data\")\n make_readonly(file_path)\n\n utils.rmtree(tmp_path)\n\n assert not Path(tmp_path).exists()", "title": "" }, { "docid": "32272efaeb7a1448dbc64cb5839cf52b", "score": "0.7040762", "text": "def tearDown(self):\n print(\n \"\\nDeleting temporary files...\\n\")\n try:\n shutil.rmtree(TEST_DIR)\n except OSError:\n pass", "title": "" }, { "docid": "9af330a7288fb5de6f645f53fa6f6b5a", "score": "0.70179486", "text": "def test_04_remove_file(self, mock_uid, mock_isfile,\n mock_islink, mock_exists, mock_remove,\n mock_msg):\n mock_uid.return_value = os.getuid()\n mock_isfile.return_value = True\n # file does not exist (regression of #50)\n mock_exists = False\n futil = udocker.FileUtil(\"/tmp/filename4.txt\")\n status = futil.remove()\n self.assertTrue(status)\n # under /\n mock_exists = True\n futil = udocker.FileUtil(\"/filename4.txt\")\n futil.topdir = \"/home/user/.udocker\"\n futil.tmpdir = \"/tmp\"\n status = futil.remove()\n self.assertFalse(status)\n # wrong uid\n mock_uid.return_value = os.getuid() + 1\n futil = udocker.FileUtil(\"/tmp/filename4.txt\")\n futil.topdir = \"/home/user/.udocker\"\n futil.tmpdir = \"/tmp\"\n status = futil.remove()\n self.assertFalse(status)\n # under /tmp\n mock_uid.return_value = os.getuid()\n futil = udocker.FileUtil(\"/tmp/filename4.txt\")\n futil.topdir = \"/home/user/.udocker\"\n futil.tmpdir = \"/tmp\"\n status = futil.remove()\n self.assertTrue(status)\n # under user home\n futil = udocker.FileUtil(\"/home/user/.udocker/filename4.txt\")\n futil.topdir = \"/home/user/.udocker\"\n futil.tmpdir = \"/tmp\"\n futil.safe_prefixes.append(futil.topdir)\n status = futil.remove()\n self.assertTrue(status)\n # outside of scope 1\n futil = udocker.FileUtil(\"/etc/filename4.txt\")\n futil.topdir = \"/home/user/.udocker\"\n futil.tmpdir = \"/tmp\"\n futil.safe_prefixes = []\n status = futil.remove()\n self.assertFalse(status)", "title": "" }, { "docid": "2742e38673219d86566e5bac92dabc55", "score": "0.70162207", "text": "def clear_tempfiles(self, remove=True):\n while self._tempfiles:\n self.pop(remove)\n self.push()", "title": "" }, { "docid": "92b090484e2d5c256bad9f2103d663b9", "score": "0.69968367", "text": "def clear_tmp_folder(self):\r\n for file in os.listdir(self.temp_dir):\r\n if file.endswith('.png') or file.endswith('.jpg'):\r\n path = os.path.join(self.temp_dir, file)\r\n print ('Cleaned up {}'.format(path))\r\n os.remove(path)", "title": "" }, { "docid": "a09f64731080aed938074f27b81e746b", "score": "0.698895", "text": "def clean_temp_storage_dir(self, filenames):\n for fn in filenames:\n try:\n pathlib.Path(pathlib.PurePath(self.temp_storage_dir, fn)).unlink()\n except FileNotFoundError:\n pass", "title": "" }, { "docid": "83068fc4a06cc97d05e802339d612835", "score": "0.69876045", "text": "def clean_up_temp_files():\n global __tmp_model_dir\n\n if __tmp_model_dir is not None:\n FileUtils.deleteDirectory(__tmp_model_dir)\n __tmp_model_dir = None", "title": "" }, { "docid": "4533987e0626a237abb6f0ebae2698bd", "score": "0.68943", "text": "def test_cannot_remove_file(self):\n self.api.remove_file('/some-fake/path/to-delete-file.json')", "title": "" }, { "docid": "71122dc885ac0b4f3354b5ceffae8397", "score": "0.68895096", "text": "def __del__(self) -> None:\n try:\n shutil.rmtree(self.temp_path)\n except FileNotFoundError:\n pass", "title": "" }, { "docid": "1ea6e00aee9bee4d475a3dea9244773d", "score": "0.68800896", "text": "def test_deleting_local_file_using_file_io() -> None:\n with tempfile.TemporaryDirectory() as tmpdirname:\n # Write to the temporary file\n output_file_location = os.path.join(tmpdirname, \"foo.txt\")\n with open(output_file_location, \"wb\") as f:\n f.write(b\"foo\")\n\n # Instantiate the file-io\n file_io = PyArrowFileIO()\n\n # Confirm that the file initially exists\n assert os.path.exists(output_file_location)\n\n # Delete the file using the file-io implementations delete method\n file_io.delete(output_file_location)\n\n # Confirm that the file no longer exists\n assert not os.path.exists(output_file_location)", "title": "" }, { "docid": "a1f830cfd9b9a2ecf34c5a81e5edb29d", "score": "0.6867051", "text": "def tearDown(self):\n os.remove(self.testfilename)", "title": "" }, { "docid": "24289f9fbb172c03871cf64763d1bfff", "score": "0.68652517", "text": "def _remove_temporary_files(filename=None):\n if filename is not None:\n if filename in _temporary_files:\n # If this condition is not met then probably\n # _remove_temporary_files() has already been run at\n # exit\n dirname, _lock_file, _other_lock_files = _temporary_files[filename]\n try:\n remove(_lock_file)\n except OSError:\n pass\n\n # Only remove the temporary file if it is not being\n # used by any other ranks\n if not _lock_files_present(_other_lock_files):\n # Remove the given temporary file\n try:\n remove(filename)\n rmdir(dirname)\n except OSError:\n pass\n del _temporary_files[filename]\n # --- End: if\n\n return\n\n # Still here? Then remove all temporary files and lock files\n for filename in _temporary_files:\n try:\n remove(filename)\n except OSError:\n pass\n dirname, _lock_file, _other_lock_files = _temporary_files[filename]\n try:\n remove(_lock_file)\n except OSError:\n pass\n for lock_file in _other_lock_files:\n try:\n remove(lock_file)\n except OSError:\n pass\n # --- End: for\n\n try:\n rmdir(dirname)\n except OSError:\n pass\n # --- End: for\n\n _temporary_files.clear()", "title": "" }, { "docid": "7f9ad304d343401396e0d85fbaa1bf30", "score": "0.6862017", "text": "def clean_up(user, fname, tango_output):\n time.sleep(1)\n run(['rm', fname])\n time.sleep(1)\n path = tango_output + user + '.out'\n run(['rm', path])", "title": "" }, { "docid": "56da28aebdcfd14d45f976a131abab56", "score": "0.6858987", "text": "def tearDown(self):\n try:\n os.remove(self.junk_file)\n except OSError as doh:\n if doh.errno == 2:\n # No such File, ignore\n pass\n else:\n raise", "title": "" }, { "docid": "4e8a32b47408dd72a2458d69942151e3", "score": "0.68466514", "text": "def remove(self,path):\n path = os.path.join(self.testpath,path)\n if os.path.isfile(path):\n os.remove(path)\n if os.path.isdir(path):\n shutil.rmtree(path)", "title": "" }, { "docid": "2ba2416204fd32a8978dd1ccb9462a95", "score": "0.6836615", "text": "def tearDown(self):\n if os.path.exists(self.temp):\n shutil.rmtree(self.temp)", "title": "" }, { "docid": "10386d35b088dd27569397611510ad75", "score": "0.6824965", "text": "def tearDown(self):\n for fn in self.tempImages:\n os.remove(os.path.join(self.root, fn))\n os.rmdir(self.root)", "title": "" }, { "docid": "00e3caa09a8fe657140b0a71b8f37f7b", "score": "0.68062884", "text": "def tearDown(self):\r\n shutil.rmtree(self.temp_dir_path)", "title": "" }, { "docid": "00f6ce3fdbce91450a259e82bf353d63", "score": "0.6798098", "text": "def remove_file(self, path):\n pass", "title": "" }, { "docid": "aecd5e28dfd2381e4585d089ed958248", "score": "0.6793995", "text": "def clean_filesystem(files=[]):\n remove_files(files + find_cache_files())", "title": "" }, { "docid": "e34ff0eb74c29fca1d12a9094ebc7438", "score": "0.6789692", "text": "def remover_files():\n directory = os.getcwd()\n for file_name in glob.glob((\"{}/tmp/*\").format(directory)):\n remove(file_name)", "title": "" }, { "docid": "3ba2734be53d40e780f96bcb7f17732d", "score": "0.67805916", "text": "def _remove_files(self):\n if hasattr(self, 'files'):\n for file in self.files:\n if os.path.exists(file):\n os.remove(file)\n\n self._remove_changes()\n self._remove_temporary_files()", "title": "" }, { "docid": "70dd60ab0e507f35d673fff7dd7afa02", "score": "0.6775496", "text": "def teardown():\n for filename in files_to_delete:\n delete_file(filename)", "title": "" }, { "docid": "ffae3cb3ab6efd875dc9ac22bf7e6e9b", "score": "0.6770154", "text": "def test_tmp_file_content(self, mocker):\n payload = dict(id=\"B\", data={\"some\": \"data\"}, ai_service='A')\n headers = {'x-rh-identity': 'ABC'}\n\n with mocker.mock_module.patch.object(os, 'remove') as mock:\n self.client.post(self.url, json=payload, headers=headers)\n filename = mock.call_args[0][0]\n\n with tarfile.open(filename, 'r:gz') as tar:\n content = tar.extractfile('A_B.json').read()\n assert content == b'{\"some\": \"data\"}'\n\n os.remove(filename)", "title": "" }, { "docid": "460b0a0f5f60de0a364b4116898b6cff", "score": "0.6761863", "text": "def remove_tmp_sources(source_filename):\n logging.info('Removing temporary files ...')\n source_dir = os.path.dirname(source_filename)\n if os.path.exists(source_filename):\n os.remove(source_filename)\n for f in os.listdir(source_dir):\n if f.startswith('tmp_'):\n os.remove(os.path.join(source_dir, f))", "title": "" }, { "docid": "dda912cae82790388ffa267b869ebf8f", "score": "0.675963", "text": "def delete_tempfile(path):\n try:\n unlink(path)\n except:\n pass", "title": "" }, { "docid": "48662a23925cf0e54166382f375761c6", "score": "0.67329085", "text": "def clear_data():\n for i in range(_MAX_NUM_TESTS):\n rand, ref = filename(i)\n if os.path.exists(rand):\n os.remove(rand)\n if os.path.exists(ref):\n os.remove(ref)", "title": "" }, { "docid": "7ae1e70dd914c6bc7b101944e450ce6c", "score": "0.67169297", "text": "def test_force_delete(mocker, tmp_path):\n ro_file = Path(tmp_path, 'bar')\n ro_file.write_text(\"Test data\")\n make_readonly(ro_file)\n\n rmtree = mocker.Mock()\n utils.force_delete(rmtree, ro_file, sys.exc_info())\n\n assert (ro_file.stat().st_mode & stat.S_IWRITE) == stat.S_IWRITE\n rmtree.assert_called_once_with(ro_file)\n\n utils.rmtree(tmp_path)", "title": "" }, { "docid": "290868928cc38ca0ec27a9565d910720", "score": "0.6710131", "text": "def clean(self):\n if os.path.exists(self.initial):\n if os.path.exists(self.path) and os.stat(self.path).st_size == os.stat(\n self.initial).st_size:\n os.remove(self.initial)\n else:\n # if it doesn't match, something probably crashed; rename the temporary file and\n # it'll get uploaded at some point\n self.auto_filename()\n self.rename()\n self.connect()\n os.remove(self.initial)\n if os.path.exists(self.path):\n os.remove(self.path)\n self.filename_set = False", "title": "" }, { "docid": "88057bc51bc7e06a1caa716d394d9736", "score": "0.6705171", "text": "def test_rm(self, client, remote_mock_dir):\n\n file_path = posixpath.join(remote_mock_dir, \"test.txt\")\n assert client.exists(file_path)\n\n with HdfsHook() as hook:\n hook.rm(file_path)\n\n assert not client.exists(file_path)", "title": "" }, { "docid": "1fda8cb37d0fe1a7d0c3bc637c0cc755", "score": "0.66856575", "text": "def delete_temp_file(filename):\n try:\n os.remove(filename)\n except OSError as e:\n if e.errno != errno.ENOENT:\n raise e", "title": "" }, { "docid": "6612d0365ee82b4f75eff32ef343c99e", "score": "0.6665749", "text": "def tearDown(self):\n try:\n os.remove(self.filename)\n except:\n pass", "title": "" }, { "docid": "6612d0365ee82b4f75eff32ef343c99e", "score": "0.6665749", "text": "def tearDown(self):\n try:\n os.remove(self.filename)\n except:\n pass", "title": "" }, { "docid": "6612d0365ee82b4f75eff32ef343c99e", "score": "0.6665749", "text": "def tearDown(self):\n try:\n os.remove(self.filename)\n except:\n pass", "title": "" }, { "docid": "064292ba582992439fe62a98cbe7b0ed", "score": "0.6656492", "text": "def tearDown(self):\n try:\n remove(\"file.json\")\n except:\n pass", "title": "" }, { "docid": "9e3bb50b845630a7ee441f60a708710b", "score": "0.6647406", "text": "def tearDown(self):\n if 'turbinia-test-tmp' in self.tmp_dir:\n shutil.rmtree(self.tmp_dir)", "title": "" }, { "docid": "9b5a838a5921708b454dc9e8563ef989", "score": "0.66435385", "text": "def tearDown(self):\r\n\r\n # turn off the alarm\r\n signal.alarm(0)\r\n\r\n remove_files(self.files_to_remove, False)\r\n if self.server_socket:\r\n self.server_socket.close()\r\n # give clients time to clean up\r\n sleep(1)\r\n if exists(self.tmp_dir):\r\n try:\r\n rmdir(self.tmp_dir)\r\n except OSError:\r\n # give clients some more time, fail if still error\r\n sleep(5)\r\n rmdir(self.tmp_dir)", "title": "" }, { "docid": "bbb6079ce2255514e7d4534a80081d98", "score": "0.6638979", "text": "def test_remove_empty_paths_basic_tests():\n from research_pyutils import mkdir_p, remove_empty_paths\n p1 = mkdir_p(join(test_p, 'tmp', ''))\n\n # test that it actually removes the sub-folders but not the root.\n remove_empty_paths(test_p, removeRoot=False, verbose=False)\n assert not isdir(p1)\n assert isdir(test_p)\n\n # test that it removes the path including the root.\n p1 = mkdir_p(p1)\n remove_empty_paths(test_p, removeRoot=True, verbose=False)\n assert not isdir(test_p)\n\n # test that it does not remove in case of non-empty folder.\n p1 = mkdir_p(p1)\n open(p1 + 'temp_files.txt', 'a').close()\n remove_empty_paths(test_p, removeRoot=True, verbose=False)\n assert isdir(p1)\n # remove the temp path and files\n rmtree(test_p_parent)", "title": "" }, { "docid": "5f7798a16a56c09b11404ed5cbfdf984", "score": "0.6635801", "text": "def temporary_file(request):\n file_handle, path = tempfile.mkstemp()\n os.close(file_handle)\n\n def cleanup():\n \"\"\"Remove temporary file.\"\"\"\n try:\n os.remove(path)\n except OSError:\n pass\n\n request.addfinalizer(cleanup)\n return path", "title": "" }, { "docid": "35c22fbd84d95726652584a7ca832bdf", "score": "0.6633704", "text": "def tearDown(self):\n\n for fname in self.fnames:\n os.remove(fname)", "title": "" }, { "docid": "a6fc0582dfe00102a1c9323f9403b0eb", "score": "0.66313124", "text": "def vtest_ut_cleanup(self):\n shutil.rmtree(self.get_test_file_path(), ignore_errors=True)", "title": "" }, { "docid": "7218a5e79da3c561ed9b6e7fcdc3846e", "score": "0.662901", "text": "def test_final_cleanup():\n cleanup_file(\"tfsaves\")", "title": "" }, { "docid": "11b19633323752d82ddd38a463343cbc", "score": "0.66096085", "text": "def cleanup(fname):\n if os.path.isfile(fname):\n try:\n os.remove(fname)\n print \"Cleaned up\", fname\n except OSError:\n print \"Failed to clean up\", fname", "title": "" }, { "docid": "cdf087b8562d74a0298f80cf0f3adc7c", "score": "0.6604262", "text": "def test_rmtree():\n\n tmpdir = os.path.join(tempfile.gettempdir(), \"jade-test-tmp87alkj8ew\")\n os.makedirs(tmpdir, exist_ok=True)\n\n assert os.path.exists(tmpdir)\n rmtree(tmpdir)", "title": "" }, { "docid": "2628facb96d496e524a4dd7636de3b72", "score": "0.6602219", "text": "def _clean_up_temporary_files(dataset_dir):\n filename = _DATA_URL.split('/')[-1]\n filepath = os.path.join(dataset_dir, filename)\n tf.gfile.Remove(filepath)\n\n tmp_dir = os.path.join(dataset_dir, 'cifar-100-python')\n tf.gfile.DeleteRecursively(tmp_dir)", "title": "" }, { "docid": "bcc0e19c58bcf362d5570a9257eae613", "score": "0.65986174", "text": "def tearDown(self):\r\n remove_files(self.files_to_remove)\r\n\r\n # Remove directories last, so we don't get errors trying to remove\r\n # files which may be in the directories.\r\n for d in self.dirs_to_remove:\r\n if exists(d):\r\n rmtree(d)", "title": "" }, { "docid": "580aa112cc76d4ed9f028371a06b0cf7", "score": "0.6598071", "text": "def tearDown(self):\n\n for fname in self.fnames:\n FileSystem.unlink(fname)", "title": "" }, { "docid": "1971bf085a09442062b53b97a99f0076", "score": "0.6587847", "text": "def tearDown(self):\n utils.rm_rf(TMP_DIR_PATH)", "title": "" }, { "docid": "6aafe4b3000e119be7ea6bd959e1d35d", "score": "0.6587443", "text": "def cleanup_file(path_to_file):\n print \"Removing generated file: %s\" % path_to_file\n os.remove(path_to_file)", "title": "" }, { "docid": "e026690fe7aba8d6f8f0444d4afcfa7a", "score": "0.6584706", "text": "def clean(self):\n os.remove(\"temp.py\") # Delete the file \"temp.py\", to free up disk space", "title": "" }, { "docid": "83b380cf20e82d3b08418f07f190c439", "score": "0.65834713", "text": "def remove(path):", "title": "" }, { "docid": "4db4acc955fca2b7f7fbb0f7930d7553", "score": "0.6582238", "text": "def _delete_temp():\n global _TEMP_NAME\n\n try:\n database.delete_temp(_TEMP_NAME)\n outputtools.delete_temp(_TEMP_NAME)\n except:\n raise", "title": "" }, { "docid": "d5e1e054a7e2474e98ff651e46034276", "score": "0.6578313", "text": "def test_05_remove_dir(self, mock_uid, mock_isfile, mock_islink,\n mock_isdir, mock_exists, mock_call,\n mock_msg):\n mock_uid.return_value = os.getuid()\n mock_isfile.return_value = False\n mock_islink.return_value = False\n mock_isdir.return_value = True\n mock_exists = True\n mock_call.return_value = 0\n # remove directory under /tmp OK\n futil = udocker.FileUtil(\"/tmp/directory\")\n futil.topdir = \"/home/user/.udocker\"\n futil.tmpdir = \"/tmp\"\n status = futil.remove()\n self.assertTrue(status)\n # remove directory under /tmp NOT OK\n mock_call.return_value = 1\n futil = udocker.FileUtil(\"/tmp/directory\")\n futil.topdir = \"/home/user/.udocker\"\n futil.tmpdir = \"/tmp\"\n status = futil.remove()\n self.assertFalse(status)", "title": "" }, { "docid": "d575455d3455dd0193210c728b208cd9", "score": "0.6576539", "text": "def _clean_up_temporary_files(dataset_dir):\n return", "title": "" }, { "docid": "e1695cb7c3022b6c8ca4c7f01c5a1154", "score": "0.6572027", "text": "def clearTemp():\n Installer.tempDir.rmtree(safety='Temp')", "title": "" }, { "docid": "40bb94efaf75088ffcceef00ec802504", "score": "0.6565856", "text": "def delete_temp_folder():\n\n tempFolder = os.path.join(os.getenv(\"APPDATA\"), \"GARI\\Temp\")\n\n if os.path.exists(tempFolder):\n for file in os.listdir(tempFolder):\n arcpy.Delete_management(os.path.join(tempFolder, file))", "title": "" }, { "docid": "b34a866fd40f9c6aeaeff26ed456b6a4", "score": "0.6559842", "text": "def remove_file(path: str) -> None:\n\tremove(path)", "title": "" }, { "docid": "276f089264e67c1a9e684388c80113c9", "score": "0.6553879", "text": "def cleanup_file(name: str):\n if os.path.exists(name) and os.path.isfile(name): # h5\n os.remove(name)\n elif os.path.exists(name) and os.path.isdir(name): # tf\n shutil.rmtree(name)", "title": "" }, { "docid": "123b284516311d00d7e35c3ecee37855", "score": "0.6547168", "text": "def tearDown(self):\n os.remove(self._file)", "title": "" }, { "docid": "19922cdb0bfc7df0ae9d7c732278a65f", "score": "0.65463954", "text": "def tearDown(self):\n try:\n os.remove(self.fixtureFile)\n except OSError:\n pass", "title": "" }, { "docid": "85f5ee8ec6e7d61a8c172c175185b163", "score": "0.6543503", "text": "def tearDown(self):\n with contextlib.suppress(FileNotFoundError):\n Path(\"test.xlsx\").absolute().unlink()", "title": "" }, { "docid": "49a5ef4a318fa6dc3fa2c40db942ce17", "score": "0.6533122", "text": "def tearDown(self):\n rmtree(getcwd(), ignore_errors=True)", "title": "" }, { "docid": "49a5ef4a318fa6dc3fa2c40db942ce17", "score": "0.6533122", "text": "def tearDown(self):\n rmtree(getcwd(), ignore_errors=True)", "title": "" }, { "docid": "a78453ddaed231652973e46436c1cf8b", "score": "0.6526341", "text": "def _remove_unique_file(self):\n if self._uniquefile_created:\n self._unlink(self.uniquefile)\n self._uniquefile_created = False\n self._p(\"Unique file deleted: %s\" % self.uniquefile)", "title": "" }, { "docid": "af8b1b23933e272ca8b9d50549fb7f50", "score": "0.6523309", "text": "def remove_local():\n\n try:\n # if str(Settings.SKIP_DELETE) == \"True\":\n # Settings.maybe_print(\"skipping local remove\")\n # return\n # Settings.print('Deleting Local File(s)')\n # delete /tmp\n tmp = File.get_tmp()\n if os.path.exists(tmp):\n shutil.rmtree(tmp)\n Settings.print('Local File(s) Removed')\n else:\n Settings.print('Local Files Not Found')\n except Exception as e:\n Settings.dev_print(e)", "title": "" }, { "docid": "a76408bfef4a85cc2fb3fd550710e57c", "score": "0.652248", "text": "def delete_tempfolder(path):\n try:\n rmtree(path)\n except:\n pass", "title": "" }, { "docid": "5523141587bd710fe667334f413b7bcc", "score": "0.65189725", "text": "def cleanup(self):\r\n if self.tempDirectory != None:\r\n shutil.rmtree(self.tempDirectory, True)\r\n self.tempDirectory = None", "title": "" }, { "docid": "912e84dfd5ec0c8b552efef4a9d8c5f0", "score": "0.6501509", "text": "def test_deleting_local_file_using_file_io_output_file() -> None:\n with tempfile.TemporaryDirectory() as tmpdirname:\n # Write to the temporary file\n file_location = os.path.join(tmpdirname, \"foo.txt\")\n with open(file_location, \"wb\") as f:\n f.write(b\"foo\")\n\n # Instantiate the file-io\n file_io = PyArrowFileIO()\n\n # Confirm that the file initially exists\n assert os.path.exists(file_location)\n\n # Instantiate the custom OutputFile\n output_file = PyArrowFileIO().new_output(location=f\"{file_location}\")\n\n # Delete the file using the file-io implementations delete method\n file_io.delete(output_file)\n\n # Confirm that the file no longer exists\n assert not os.path.exists(file_location)", "title": "" }, { "docid": "73ceeaec03cbd87e13eaa82fa5a024dd", "score": "0.6500316", "text": "def teardown(self):\n super(TestCisAsciiFileInput, self).teardown()\n if os.path.isfile(self.tempfile):\n os.remove(self.tempfile)", "title": "" }, { "docid": "d894f8e35f5c2f324738bd9dc38bc2e6", "score": "0.6499741", "text": "def tearDown(self) -> None:\n filtered = [f for f in glob.glob('steps/tests/test_output/*') if not re.match(r'\\.keep', f)]\n for file in filtered:\n try:\n if Path(file).is_dir():\n shutil.rmtree(file)\n else:\n os.remove(file)\n except PermissionError as pe:\n # We don't necessarily care that much\n continue", "title": "" }, { "docid": "4344cf78e94dc0ac12cfe0a8352a12b8", "score": "0.649922", "text": "def tearDown(self):\n \tshutil.rmtree(self.tempdir)", "title": "" }, { "docid": "2b5c5965d2a742e5ad9091782c3c9161", "score": "0.6497349", "text": "def tearDown(self):\n self.remove_test_files()", "title": "" }, { "docid": "efdd95b9f7f201b8975b40c4d2e35707", "score": "0.6494502", "text": "def __del__(self):\n\t\tif self.temp_dir:\n\t\t\tself.temp_dir.cleanup()", "title": "" }, { "docid": "287b5130ccb548712b4c4f424c9a88b2", "score": "0.64933854", "text": "def test_create1(self):\n fname = TempfileManager.create_tempfile()\n OUTPUT = open(fname, 'w')\n OUTPUT.write('tempfile\\n')\n OUTPUT.close()\n self.assertEqual(len(list(glob.glob(tempdir + '*'))), 1)\n fname = os.path.basename(fname)\n self.assertTrue(fname.startswith('tmp'))", "title": "" }, { "docid": "18d6f9cb18ea8096a7e09f788491b048", "score": "0.64919686", "text": "def _delete_file(path):\n if os.path.isfile(path):\n os.remove(path)", "title": "" }, { "docid": "27a61a1c0bea7d7ffbc176402a0e446b", "score": "0.6490362", "text": "def test_provider_system_hook_file_remove(change_dir, fix_file_perms):\n o = tackle(context_file='remove.yaml', no_input=True)\n assert o['if_file']\n assert not o['not_file']\n assert o['if_files']\n assert not o['not_files']", "title": "" }, { "docid": "094f0fb5214eaf0dd40c6d95cd504011", "score": "0.648024", "text": "def tearDown(self):\n\t\ttry:\n\t\t\tos.remove(self.filename)\n\t\texcept:\n\t\t\tpass", "title": "" }, { "docid": "52d54b2b90a90c76946d88fafd560768", "score": "0.64775956", "text": "def test_cleanup_file_deleted(fs: FakeFilesystem, requests_mock: Mocker) -> None:\n requests_mock.get(\"https://example.com/test.csv\", text=CONTENTS)\n\n adapter = CSVFile(\"https://example.com/test.csv\")\n assert adapter.path.exists()\n adapter.path.unlink()\n adapter.close()", "title": "" }, { "docid": "212460e63b3eb5a4ed5cb2ce537728db", "score": "0.6476713", "text": "def cleanup_temp_dir(context):\n\n try:\n os.chdir(context.cwd)\n except:\n print(\"Current working file record does not exist\")\n\n try:\n context.tempdir.cleanup()\n except:\n print(\"Temporary directory cannot be cleaned up - does it exist?\")", "title": "" }, { "docid": "2ecda5c2fdda2d03603ff8a907679da3", "score": "0.64764506", "text": "def _clean_workdir(self):\n\t\ttoremove = [self._get_config_filepath(), self._get_params_filepath(), self._get_conv_filepath(), self._get_psf_filepath()]\n\t\tfor filepath in toremove:\n\t\t\tif os.path.exists(filepath):\t\n\t\t\t\tlogger.debug(\"Removing existing file %s...\" % (filepath))\n\t\t\t\tos.remove(filepath)", "title": "" }, { "docid": "2d1d02d18b9b37000c420d7a441da7df", "score": "0.64755917", "text": "def tearDown(self):\n try:\n os.remove(self.fixture_file)\n except OSError:\n pass", "title": "" }, { "docid": "6688e08a4887ac39807ab70576d11b43", "score": "0.6472516", "text": "def a_temp_file():\n filename = None\n try:\n tmpfile = tempfile.NamedTemporaryFile(delete=False)\n filename = tmpfile.name\n yield tmpfile\n finally:\n if filename and os.path.exists(filename):\n os.remove(filename)", "title": "" } ]
92ae2600eb34202e158228d1cda285c0
Send an IObjectCopiedEvent for object. `original` is the object the copy was created from.
[ { "docid": "0d5bd68503e07959a932e521c8752d40", "score": "0.6960044", "text": "def copied(object, original):", "title": "" } ]
[ { "docid": "d97ab3e40ca5419cf0fc9f77c17777cc", "score": "0.56083983", "text": "def copy_view(self, request, object_id, extra_context=None):\n opts = self.model._meta\n app_label = opts.app_label\n\n if not self.draft_copy_allowed:\n return HttpResponseBadRequest(\"Draft copy not allowed for %s.\" % \n force_unicode(opts.verbose_name_plural)\n )\n\n obj = self.get_object(request, unquote(object_id))\n object_refs = None\n\n # For our purposes, permission to copy is equivalent to \n # has_add_permisison.\n if not self.has_add_permission(request):\n raise PermissionDenied\n\n if obj is None:\n raise Http404(_(\n '%(name)s object with primary key %(key)r does not exist.') % \n {\n 'name': force_unicode(opts.verbose_name), \n 'key': escape(object_id)\n }\n )\n \n if request.POST: # The user has already confirmed the copy.\n if obj.is_draft_copy():\n self.message_user(\n request, \n _('You cannot copy a draft copy.')\n ) \n return HttpResponseRedirect(request.path)\n if obj.get_draft_copy():\n self.message_user(\n request, \n _('A draft copy already exists.')\n )\n return HttpResponseRedirect(request.path)\n copy = self._copy_item(obj)\n original_message = 'Created a draft copy for %s \"%s\".' % (\n force_unicode(obj._meta.verbose_name),\n force_unicode(obj)\n )\n copy_message = 'Copied from %s \"%s\".' % (\n force_unicode(obj._meta.verbose_name),\n force_unicode(obj)\n )\n self.log_change(request, obj, original_message)\n self.log_change(request, copy, copy_message) \n self.message_user(\n request, \n _('The %(name)s \"%(obj)s\" was copied successfully.') % {\n 'name': force_unicode(opts.verbose_name), \n 'obj': force_unicode(obj)\n }\n )\n \n url = reverse(\n \"admin:%s_%s_change\" % (\n app_label, \n self.model._meta.module_name\n ),\n args=(copy.id,)\n )\n return HttpResponseRedirect(url)\n\n if self.model.objects.filter(copy_of=obj).exists():\n draft_already_exists = True\n title = _(\"Draft Copy Exists\")\n edit_copy_url = reverse(\n \"admin:%s_%s_change\" % (\n app_label, \n self.model._meta.module_name\n ),\n args=(self.model.objects.filter(copy_of=obj)[0].id,)\n )\n \n else:\n draft_already_exists = False\n title = _(\"Are you sure?\")\n edit_copy_url = None\n generic_refs = get_generic_referencing_objects(obj)\n direct_refs = get_referencing_objects(obj)\n object_refs = [(unicode(o), o._meta.verbose_name) for o in \\\n chain(direct_refs, generic_refs)\n ]\n \n context = {\n \"title\": title,\n \"object_name\": force_unicode(opts.verbose_name),\n \"object\": obj,\n \"referencing_objects\": object_refs,\n \"opts\": opts,\n \"root_path\": self.admin_site.root_path,\n \"app_label\": app_label,\n 'draft_already_exists': draft_already_exists,\n 'edit_copy_url': edit_copy_url\n }\n context.update(extra_context or {})\n context_instance = template.RequestContext(\n request, \n current_app=self.admin_site.name\n )\n return render_to_response(self.copy_form_template, context, \n context_instance=context_instance\n )", "title": "" }, { "docid": "3314cbec6a8de7473ab90eaef7c7c58f", "score": "0.5304531", "text": "def weave_object(self, obj, aspect_class, *args):\n pointcut = PointCut.create_from_object(obj)\n self.weave_pointcut(pointcut, aspect_class, *args)", "title": "" }, { "docid": "f4241950d41a6ffaab1615ebf6fb8542", "score": "0.52134216", "text": "def copy_instance(sender, instance, **kwargs):\n instance._orig_state = instance.__dict__.copy()", "title": "" }, { "docid": "c7ff99238848d998b1b2ec79d317f719", "score": "0.52018887", "text": "def test_copy_object(self):\n data, size = str_buffer(self.object_data)\n self.storage.put_object(self.bucket_name, self.object_name, data, size)\n new_object_name = random_str()\n self.storage.copy_object(\n self.bucket_name,\n self.object_name,\n self.bucket_name,\n new_object_name,\n )\n self.assertTrue(\n self.storage.object_exists(self.bucket_name, new_object_name)\n )\n new_data = self.storage.get_object(self.bucket_name, new_object_name)\n data.seek(0)\n self.assertEqual(new_data.read(), data.read())\n self.storage.delete_object(self.bucket_name, new_object_name)", "title": "" }, { "docid": "60afa478cb806d8397dc15ccd00e6057", "score": "0.517509", "text": "def orig_obj(self):\n return self._orig_obj", "title": "" }, { "docid": "7502b1bf9de54c597d36f7bb0ab67238", "score": "0.5163784", "text": "def _merge_item(self, original, draft_copy):\n # Handle FK and M2M references.\n refs = filter(\n lambda obj: obj != draft_copy,\n get_referencing_objects(original)\n )\n for ref in refs:\n field_names = lookup_referencing_object_relationships(original, ref)\n for field_name in field_names:\n fld_class = ref._meta.get_field(field_name).__class__\n if issubclass(fld_class, models.fields.related.ManyToManyField):\n getattr(ref, field_name).remove(original)\n getattr(ref, field_name).add(draft_copy)\n else:\n setattr(ref, field_name, draft_copy)\n ref.save()\n # Handle generic references.\n for ref in get_generic_referencing_objects(original):\n generic_fk_field = [f for f in ref._meta.virtual_fields \\\n if isinstance(f, generic.GenericForeignKey)\n ][0].name\n setattr(ref, generic_fk_field, draft_copy)\n ref.save()\n # Overwrite the old object.\n if self.slug:\n setattr(original, self.slug_field, original.slug + \"-merge\")\n original.save()\n if self.slug:\n import re\n slug = re.sub(\n \"-draft-copy$\", \"\", getattr(draft_copy, self.slug_field)\n )\n setattr(draft_copy, self.slug_field, slug)\n draft_copy.copy_of = None\n draft_copy.save()\n original.delete()\n draft_copy.publish()\n return draft_copy", "title": "" }, { "docid": "b7feee52341a53c983f3608aa562e34d", "score": "0.5161342", "text": "def object_copy():\n # SELECT SOURCE BUCKET\n if not (source_bucket := select_bucket('Which bucket would you like to copy the file from: ')):\n input('Invalid bucket. Press enter to go back to the main menu.')\n return\n\n # SELECT SOURCE FILE\n if not (obj := select_object(source_bucket, 'Which object would you like to copy from the bucket: ')):\n input('Invalid object. Press enter to go back to the main menu.')\n return\n\n # SELECT DESTINATION BUCKET\n if not (destination_bucket := select_bucket('Which bucket would you like to copy the file to: ')):\n input('Invalid bucket. Press enter to go back to the main menu.')\n return\n\n # COPY FILE\n copy_key = {\n 'Bucket': source_bucket,\n 'Key': obj\n }\n s3.meta.client.copy(copy_key, destination_bucket, obj)\n\n # CONFIRMATION\n if obj in get_objects(destination_bucket):\n print(f'{obj} has been copied from {source_bucket} to {destination_bucket}.')\n else:\n print('Uh oh. Something went wrong...\\n')\n\n input('Press enter to continue.')", "title": "" }, { "docid": "bf0b73e483710b4ae51b6839701b69ed", "score": "0.51513755", "text": "def testCopy(self):\n impl = self.impl\n ws_name = self.ws_name\n conf = self.conf\n ws_meta = self.ws_meta\n\n test_object5 = {\n \"id\": \"test_object_id5\",\n \"type\": \"Genome\",\n \"data\": {\"name\":\"testgenome5\", \"string\":\"ACACGATTACA\"},\n \"workspace\": ws_name,\n \"command\": \"something\",\n \"metadata\": {\"origin\":\"shreyas\"},\n \"auth\": self.__class__.token\n }\n obj_meta5 = impl.save_object(test_object5)\n\n\n ws_name2 = \"testWS_%s\" % datetime.utcnow().strftime('%s')\n conf2 = {\"workspace\": ws_name2,\"default_permission\": \"a\", \"auth\": self.__class__.token }\n ws_meta2 = self.impl.create_workspace(conf2)\n\n impl.copy_object({\n \"new_id\": \"new_object_id5\",\n \"new_workspace\": ws_name2,\n \"source_id\": \"test_object_id5\",\n \"source_workspace\": ws_name,\n \"type\": \"Genome\",\n \"auth\": self.__class__.token\n })\n\n has_object = impl.has_object({\n \"id\": \"new_object_id5\",\n \"workspace\": ws_name2,\n \"type\": \"Genome\",\n \"auth\": self.__class__.token\n })\n self.assertTrue(has_object)", "title": "" }, { "docid": "312e3f8975a33e3cd734f36f2306a52f", "score": "0.5137043", "text": "def copy(self, *args, **kwargs): # real signature unknown\n pass", "title": "" }, { "docid": "312e3f8975a33e3cd734f36f2306a52f", "score": "0.5137043", "text": "def copy(self, *args, **kwargs): # real signature unknown\n pass", "title": "" }, { "docid": "615b74b42d0c5e69163577921957b81f", "score": "0.5130287", "text": "def copyObject(oldObject, newInstance=None):\n if newInstance is None:\n newInstance = oldObject.__class__()\n for attr in oldObject.__dict__:\n if len(attr) >= 2:\n if attr[0:2] == \"__\":\n continue\n value = oldObject.__getattribute__(attr)\n if \"copy\" in dir(value):\n newValue = value.copy()\n else:\n newValue = copy.deepcopy(value)\n try:\n newInstance.__setattr__(attr, newValue)\n except:\n continue\n return newInstance", "title": "" }, { "docid": "fe041403fb4df0306fe8ebc2019bbfcd", "score": "0.51031137", "text": "def copySpecial(self, copyType: ghidra.app.util.ClipboardType, monitor: ghidra.util.task.TaskMonitor) -> java.awt.datatransfer.Transferable:\n ...", "title": "" }, { "docid": "f8a27ada2d5c0e626329d26adb8037c7", "score": "0.50804317", "text": "def Copy(AnyObject):\n return copy.deepcopy(AnyObject)", "title": "" }, { "docid": "6191b2ee29d215184524d3084d405476", "score": "0.5063158", "text": "def __copy__(self, *args, **kwargs): # real signature unknown\n pass", "title": "" }, { "docid": "b5bca529f01a659d20efea398df988a0", "score": "0.50517815", "text": "def make_copy(self,obj):\n try:\n newobj = obj.__class__(obj) # bug?: doesn't have unique name\n except:\n newobj = obj.Clone()\n return newobj", "title": "" }, { "docid": "4f6b50542bdb19b438b7e82d5bd3fb7b", "score": "0.50496453", "text": "def dup_object(self): # real signature unknown; restored from __doc__\n pass", "title": "" }, { "docid": "93993e449bd0c0943cd236ac55af3f6e", "score": "0.5047597", "text": "def copy(self): # real signature unknown; restored from __doc__\n pass", "title": "" }, { "docid": "e66733eca9e3638248133008a71debfa", "score": "0.5023282", "text": "def monitor_copy_global(sock, orig_name, new_name):\r\n return communicate(sock, '__copy_global__(\"%s\", \"%s\")' \\\r\n % (orig_name, new_name))", "title": "" }, { "docid": "4b8c34de5e901bb9a7cb7bde40140534", "score": "0.49908575", "text": "def CopyTo(self, *args, **kwargs):\n pass", "title": "" }, { "docid": "146261ca53474c472ca2daa1be8f1a04", "score": "0.4970167", "text": "def test_deepcopy(self):\n class MockEntity(Entity):\n foo = fields.TypedField(\"foo\")\n bar = fields.TypedField(\"bar\")\n\n eorig = MockEntity()\n eorig.foo = \"FOO\"\n eorig.bar = \"BAR\"\n\n ecopy = copy.deepcopy(eorig)\n\n # Test that the values copied and that value retrieval works.\n self.assertEqual(ecopy.foo, eorig.foo)\n self.assertEqual(ecopy.bar, eorig.bar)", "title": "" }, { "docid": "8cbac719668faa9e4c5fc66bb10476b6", "score": "0.4953749", "text": "def copyTo(self, dst=None): # real signature unknown; restored from __doc__\n pass", "title": "" }, { "docid": "836f585a6577f3d0393ce87a269f25e4", "score": "0.4947685", "text": "def test_copy_object(self):\n query_factory = mock_query_factory(None)\n def check_query_args(passthrough):\n self.assertEqual(query_factory.credentials.access_key, \"foo\")\n self.assertEqual(query_factory.credentials.secret_key, \"bar\")\n self.assertEqual(\n RequestDetails(\n service=b\"s3\",\n region=REGION_US_EAST_1,\n method=b\"PUT\",\n url_context=client.s3_url_context(self.endpoint, \"newbucket\", \"newobjectname\"),\n metadata={\"key\": \"some meta data\"},\n amz_headers={\n \"copy-source\": \"/mybucket/objectname\",\n },\n content_sha256=EMPTY_CONTENT_SHA256,\n ),\n query_factory.details,\n )\n return passthrough\n\n creds = AWSCredentials(\"foo\", \"bar\")\n s3 = client.S3Client(creds, query_factory=query_factory)\n d = s3.copy_object(\n \"mybucket\", \"objectname\", \"newbucket\",\n \"newobjectname\",\n metadata={\"key\": \"some meta data\"},\n )\n d.addCallback(check_query_args)\n return d", "title": "" }, { "docid": "57939b842d4866854d448e43e8a731ee", "score": "0.49434778", "text": "def copy():", "title": "" }, { "docid": "e3260ebbfaaecdc549cd7b4045394580", "score": "0.49293163", "text": "def __copy__(self):\n pass", "title": "" }, { "docid": "a052cb0403e5a94dfd35ab21c91f3747", "score": "0.48962474", "text": "def op_insert_obj(self, object, dest):\n self._objects.insert_object(dest, object)", "title": "" }, { "docid": "edc4a3897dd9bf17d89364246bc9e21c", "score": "0.4882384", "text": "def copy(self, obj):\n\n obj.internalDict = self.internalDict.copy()\n obj.descriptions = self.descriptions.copy()", "title": "" }, { "docid": "87e7f2c8869a971160f2d0ccdf9b30a7", "score": "0.48543411", "text": "def duplicate(scene, ob):\n copy = ob.copy()\n\n # some ops will fail (like triangle mesh) if the object we're operating on\n # is hidden. i think its safe to unhide it\n copy.hide = False\n\n copy.data = ob.data.copy()\n scene.objects.link(copy)\n return copy", "title": "" }, { "docid": "ce6150607009d69415548a6153d1568e", "score": "0.48464495", "text": "def copy(self):", "title": "" }, { "docid": "ce6150607009d69415548a6153d1568e", "score": "0.48464495", "text": "def copy(self):", "title": "" }, { "docid": "ce6150607009d69415548a6153d1568e", "score": "0.48464495", "text": "def copy(self):", "title": "" }, { "docid": "ce6150607009d69415548a6153d1568e", "score": "0.48464495", "text": "def copy(self):", "title": "" }, { "docid": "c8da00207a5ddbd8fcefbb950de8c18b", "score": "0.48203573", "text": "def copy(self, *args, **kwargs):\n return self._apply(\"copy\", *args, **kwargs)", "title": "" }, { "docid": "11b54b7520cd878b2ac1f080a28f78dd", "score": "0.48108944", "text": "def copy(self,):\n pass", "title": "" }, { "docid": "bc8f948ac7987d0f64ad3c9dbabad120", "score": "0.4810738", "text": "def MakeCopy(self) -> 'QFunction':\n pass", "title": "" }, { "docid": "281a13e1d6371cef1d600a6ec487ffc0", "score": "0.47977462", "text": "def copy(self):\n pass", "title": "" }, { "docid": "281a13e1d6371cef1d600a6ec487ffc0", "score": "0.47977462", "text": "def copy(self):\n pass", "title": "" }, { "docid": "bf5fe60b04c0be3b0047fca9ffc6aab3", "score": "0.47969186", "text": "def copy(self, src, dst):\n console(f'copy {src} --> {dst}')\n copy_if_needed(src, dst)", "title": "" }, { "docid": "72ef09f86699c67508c1e6d16f20d689", "score": "0.47843722", "text": "def copyCallback(self, num, total, orig, dupe):\n print \"copying %d of %d: %s\" % (num, total, dupe)", "title": "" }, { "docid": "360cd1a043dcaa25e0dc5944431e6193", "score": "0.47763658", "text": "def copy(self):\n raise NotImplemented()", "title": "" }, { "docid": "6f1a57dd353a860571b0bb15e3254112", "score": "0.47682858", "text": "def copyTo(target=None, new_id=None):", "title": "" }, { "docid": "cc2ae386240fe00b6413075becc74c2d", "score": "0.47631657", "text": "def addCopy(*args):\n return _coin.SoFieldContainer_addCopy(*args)", "title": "" }, { "docid": "6f5dfe4d18ce51be18779104701eeff7", "score": "0.47588658", "text": "def _copy_item(self, item):\n if not item.is_published:\n return None\n new_item = deepcopy(item)\n new_item.id = None\n new_item.status = UNPUBLISHED_STATES[0][0]\n new_item.copy_of = item\n if self.slug:\n slug = getattr(new_item, self.slug_field)\n slug += \"-draft-copy\"\n setattr(new_item, self.slug_field, slug)\n new_item.save()\n fk_rels = [f.name for f in self.model._meta.fields \\\n if issubclass(f.__class__, RelatedField) and f.name != 'copy_of'\n ]\n for field in fk_rels:\n setattr(new_item, field, getattr(item, field))\n m2m_rels = [f.name for f, _ in self.model._meta.get_m2m_with_model()]\n for field in m2m_rels:\n # If there is a custom \"through\" model, punt on trying to copy \n # things over.\n model_field = new_item._meta.get_field_by_name(field)[0]\n if model_field.rel.through._meta.auto_created:\n setattr(new_item, field, getattr(item, field).all())\n new_item.save()\n return new_item", "title": "" }, { "docid": "a084ce611e7f7646003c3acf3a48ea87", "score": "0.47271806", "text": "def handle_checkout_event(event):\n original = event.object\n wc = event.working_copy\n # {\n # 'admin': ['Owner'],\n # 'tibi_countryrep': [u'Contributor', u'Reader'],\n # 'tibi_eea_rep': [u'Reviewer', u'Reader'],\n # 'tibi_etc_rep': [u'Editor', u'Reader']\n # }\n # copy all local roles, but filter out local roles\n\n logger.info(\"Copying local roles from original to working copy\")\n\n for user, roles in original.__ac_local_roles__.items():\n roles = [r for r in roles if r != 'Owner']\n if roles:\n ex = wc.__ac_local_roles__.get(user, [])\n roles = list(set(roles + ex))\n wc.__ac_local_roles__[user] = roles\n wc._p_changed = True\n\n # We grant \"Delete objects\" permission on the wc, to Contributor, to allow\n # canceling checkouts\n # perm = 'Delete objects'\n # from Products.DCWorkflow.utils import modifyRolesForPermission\n # from AccessControl.PermissionMapping import getPermissionMapping\n # pm = set(getPermissionMapping(perm, wc, st=tuple))\n # pm.add('Contributor')\n # pm.add('Owner')\n # modifyRolesForPermission(wc, perm, tuple(pm))", "title": "" }, { "docid": "df86bf356d68995baa130b1069ada20e", "score": "0.47251245", "text": "def copy(*args):", "title": "" }, { "docid": "0cbe71c367c2a4411a1f860737199e30", "score": "0.47186273", "text": "def __copy__(self):\n raise NotImplementedError", "title": "" }, { "docid": "8e5d2ec4640c7f32cb179d48e6b53027", "score": "0.47173148", "text": "def a_copy(ctx, scene, ob):\n copy = duplicate(scene, ob)\n try:\n yield copy\n finally:\n scene.objects.unlink(copy)", "title": "" }, { "docid": "fbef12df3882add36af3e14482bd639f", "score": "0.47157142", "text": "def can_copy_related(self, obj):\n return True", "title": "" }, { "docid": "fed31812d286ef842daf95c5c64ce7b2", "score": "0.47004545", "text": "def copyFlexor(objects):\n pass", "title": "" }, { "docid": "339324b13a4cda73e5d5f419be26acd7", "score": "0.4694394", "text": "def __init__(self, object, *descriptions):\n super(ObjectModifiedEvent, self).__init__(object)\n self.descriptions = descriptions", "title": "" }, { "docid": "6e7aa72a72c03616bb9537e2edcfecac", "score": "0.46872863", "text": "def copy(self, cut=False):\n files = [QtCore.QUrl.fromLocalFile(\n os.path.join(self.location, self.proxy.itemData(index).get(0)))\n for index in self.view.selectionModel().selectedIndexes()]\n mime_data = self.proxy.mimeData(self.view.selectionModel().\n selectedIndexes())\n if cut:\n data = b'1' # same as QtCore.QByteArray(0, '1')\n mime_data.setData(\"application/x-kde-cutselection\", data)\n data = b'cut'\n mime_data.setData(\"x-special/gnome-copied-files\", data)\n mime_data.setUrls(files)\n clipboard = QtWidgets.QApplication.clipboard()\n clipboard.setMimeData(mime_data)", "title": "" }, { "docid": "040abbd3c46c555f31184b096ce6da13", "score": "0.46782258", "text": "def clone(obj):\n with tempfile.TemporaryFile() as tmp:\n persistent = CopyPersistent(obj)\n\n # Pickle the object to a temporary file\n pickler = Pickler(tmp, protocol=-1)\n pickler.persistent_id = persistent.id\n pickler.dump(obj)\n\n # Now load it back\n tmp.seek(0)\n unpickler = Unpickler(tmp)\n unpickler.persistent_load = persistent.load\n\n res = unpickler.load()\n\n # run the registered cleanups\n def convert(obj):\n pid = _get_pid(pickler, id(obj))\n try:\n return _get_obj(unpickler, pid)\n except KeyError: # pragma: no cover (PyPy)\n return _get_obj(unpickler, str(pid))\n for call in persistent.registered:\n call(convert)\n return res", "title": "" }, { "docid": "b1410bf4019c2663bba04834cf2d4f38", "score": "0.46600595", "text": "def will_copy(self):\r\n return True", "title": "" }, { "docid": "b1410bf4019c2663bba04834cf2d4f38", "score": "0.46600595", "text": "def will_copy(self):\r\n return True", "title": "" }, { "docid": "0b7161bc2cde247ea4c5e7b7a0ee2f7d", "score": "0.46571496", "text": "def test_copy_information(self):\n with Transaction():\n self.layer.login('author')\n with IContainerManager(self.root).copier() as copier:\n copy = copier(self.root.document)\n\n self.assertTrue(verifyObject(IVersionedContent, copy))\n\n creator = copy.get_creator_info()\n self.assertTrue(verifyObject(IMember, creator))\n self.assertEqual(creator.userid(), 'author')\n author = copy.get_last_author_info()\n self.assertTrue(verifyObject(IMember, author))\n self.assertEqual(author.userid(), 'author')\n self.assertNotEqual(copy.get_creation_datetime(), None)\n self.assertNotEqual(copy.get_modification_datetime(), None)", "title": "" }, { "docid": "43c0d003f1bb7f3e9a5992acb255d86c", "score": "0.46445003", "text": "def test_copy_instrel(client):\n name1 = \"Dataset X\"\n name2 = \"Dataset Y\"\n invname1 = \"Investigation A\"\n invname2 = \"Investigation B\"\n inv = client.new(\"Investigation\", id=82, name=invname1)\n ds = client.new(\"Dataset\", id=541, investigation=inv, name=name1)\n cds = ds.copy()\n assert cds.investigation == ds.investigation\n assert cds.investigation.id == ds.investigation.id\n assert cds.investigation.name == invname1\n # The copy and the original refer to the same related objects.\n # Changing attributes of a related object of the copy does affect\n # the original.\n cds.investigation.name = invname2\n assert ds.investigation.name == invname2", "title": "" }, { "docid": "2047332dea10859db29eb98d43285ac8", "score": "0.4635211", "text": "def clone(self, new_object):\n return self.__class__(new_object, self.method, self.name)", "title": "" }, { "docid": "be89b1a5c1a69e400c070536dc389722", "score": "0.46313363", "text": "def get_mutable_copy(self, save=True):\n # call super\n copy_obj = super(Proxy, self).get_mutable_copy(save=False)\n # fix date_created\n copy_obj.date_created = timezone.now()\n # fix ManyToMany problems\n if save:\n copy_obj.save()\n copy_obj.delegates.add(*self.delegates.all())\n copy_obj.tags.add(*self.tags.all())\n else:\n raise NotImplementedError(\"get_mutable_copy can't work without saving because there are ManyToMany fields\")\n return copy_obj", "title": "" }, { "docid": "edb3bf9ce9015a365ebb31cdaecc9dcf", "score": "0.46251333", "text": "def copy(self, monitor: ghidra.util.task.TaskMonitor) -> java.awt.datatransfer.Transferable:\n ...", "title": "" }, { "docid": "7c5dfe9d0ab2e6f13fcea535c62497cd", "score": "0.46196842", "text": "def copy(self):\r\n raise NotImplementedError", "title": "" }, { "docid": "7c5dfe9d0ab2e6f13fcea535c62497cd", "score": "0.46196842", "text": "def copy(self):\r\n raise NotImplementedError", "title": "" }, { "docid": "4d7c02838c0b1d644fcfa0dde8453c06", "score": "0.46141922", "text": "def copyidfobject(self, idfobject):\n return addthisbunch(self.idfobjects, self.model, self.idd_info, idfobject, self)", "title": "" }, { "docid": "750a46ee89a8de92f51d36c6f6d06583", "score": "0.46123505", "text": "def copy(destination):", "title": "" }, { "docid": "1f476d9832c75e629df42a83a6ce009d", "score": "0.46085688", "text": "def postCopyTracks(self):\n pass", "title": "" }, { "docid": "b0c0390c87a225cb903594b87a181d9d", "score": "0.46053106", "text": "def _copy_related(request: HttpRequest, obj: _models.Plakat) -> None:\n if 'copy_related' in request.POST:\n copy_related_set(\n request, obj, 'veranstaltung__band', 'veranstaltung__musiker'\n )", "title": "" }, { "docid": "a322d79867485f3ba8b816564cda6503", "score": "0.45871952", "text": "def copy(obj):\n res = clone(obj)\n if getattr(res, '__parent__', None) is not None:\n try:\n res.__parent__ = None\n except AttributeError:\n pass\n if getattr(res, '__name__', None) is not None:\n try:\n res.__name__ = None\n except AttributeError:\n pass\n return res", "title": "" }, { "docid": "f11f10ba560f8020db719fe11c5f2d8c", "score": "0.45768002", "text": "def UpdateCopy(self, target: 'QFunction') -> None:\n pass", "title": "" }, { "docid": "5c8f5f1152850bb4017877cc3f469d85", "score": "0.4559176", "text": "def CvvImage_CopyOf(CvvImage_self, img): # real signature unknown; restored from __doc__\n pass", "title": "" }, { "docid": "a51a55a5267b79521de320928ee4c9db", "score": "0.45580953", "text": "def print_copying_statement(self, label, src, dest):\n self.print_msg(' -Copying %s: %s to %s' % (label, src, dest))", "title": "" }, { "docid": "70716ec2bc653d6ccccee35d92f4318b", "score": "0.45529485", "text": "def copy(self):\n raise NotImplementedError", "title": "" }, { "docid": "70716ec2bc653d6ccccee35d92f4318b", "score": "0.45529485", "text": "def copy(self):\n raise NotImplementedError", "title": "" }, { "docid": "70716ec2bc653d6ccccee35d92f4318b", "score": "0.45529485", "text": "def copy(self):\n raise NotImplementedError", "title": "" }, { "docid": "ad8fd570b50c4096c69184780f667202", "score": "0.45498058", "text": "def copy(self):\n raise NotImplementedError()", "title": "" }, { "docid": "ad8fd570b50c4096c69184780f667202", "score": "0.45498058", "text": "def copy(self):\n raise NotImplementedError()", "title": "" }, { "docid": "f672772e28d9725b9f25f62e44473da1", "score": "0.45429474", "text": "def copy(self): # pragma: no cover\n raise NotImplementedError()", "title": "" }, { "docid": "3b03811eeb394015d0dfe9fd8ec32a13", "score": "0.45349663", "text": "def copy(self):\r\n raise Exception, \"not implemented\"", "title": "" }, { "docid": "af70f8391ac340a637530f7e6413d05e", "score": "0.45291427", "text": "def doCopyTransform(self,sourceObject):\n try:\n #If we have an Object Factory instance, link it\n sourceObject.mNode\n sourceObject = sourceObject.mNode\n #log.debug(\"Source is an instance\") \n except:\n #If it fails, check that the object name exists and if so, initialize a new Object Factory instance\n assert mc.objExists(sourceObject) is True, \"'%s' - source object doesn't exist\" %sourceObject\n\n assert mc.ls(sourceObject,type = 'transform'),\"'%s' has no transform\"%sourceObject\n\tobjRot = mc.xform (sourceObject, q=True, ws=True, ro=True)\n\tself.doCopyPivot(sourceObject)\n\tself.rotateAxis = objRot", "title": "" }, { "docid": "5311aa1315052d4615309b52b354dbb2", "score": "0.45262662", "text": "def new_copy(self):\n obj = self.create_copy()\n obj._print_name = self.print_name\n return obj", "title": "" }, { "docid": "48985ff401010aa94f8b4ee747ac108f", "score": "0.4524235", "text": "def publish_save(self, copy):\n saved = copy.save_base(cls=copy.__class__)\n #if hasattr(self, 'update_after_save'):\n # self.update_after_save()\n return saved", "title": "" }, { "docid": "e21726736d9eb49542c4df13cfc1e1c7", "score": "0.45122623", "text": "def __itemSingleClickedCopy(self, item, col):\n del item\n del col\n selected = [job.data.name for job in self.selectedObjects() if cuegui.Utils.isJob(job)]\n if selected:\n QtWidgets.QApplication.clipboard().setText(\n \" \".join(selected), QtGui.QClipboard.Selection)", "title": "" }, { "docid": "b4120d886b885d0a852cc467d3f21d85", "score": "0.45086417", "text": "def copy_events(self):\r\n import copy\r\n return copy.deepcopy(self.events)", "title": "" }, { "docid": "01c0d7c7a3bc6aed92e23bd2f7256812", "score": "0.4500605", "text": "def checkCopy(*args):\n return _coin.SoFieldContainer_checkCopy(*args)", "title": "" }, { "docid": "e27d974e07ab0746607f14e2cb63b191", "score": "0.44954914", "text": "def copy(cls, src):\n dst = cls(src.attribute)\n dst.__dict__.update(src.__dict__)\n return dst", "title": "" }, { "docid": "dee43c203ad2e94d8c1e08f9ecee9bed", "score": "0.449283", "text": "def copy(self, other_object):\n self.path_to_data = other_object.path_to_data\n self.df_invoice = other_object.df_invoice\n self.df_invoice_sample = other_object.df_invoice_sample\n self._rfm_encoder = other_object._rfm_encoder\n self._vectorizer = other_object._vectorizer \n self._oP5_SegmentClassifier = other_object._oP5_SegmentClassifier\n if other_object._y_clusters is not None:\n self._y_clusters = other_object._y_clusters.copy()\n else:\n self._y_clusters = None\n self.classifier_name = other_object.classifier_name\n self.dict_classifier_param = other_object.dict_classifier_param\n self._classifier_model = other_object._classifier_model\n self.is_data_sampling = other_object.is_data_sampling\n self.list_quant_feature = other_object.list_quant_feature\n self._is_rfm_encode = other_object._is_rfm_encode\n self._nb_customers = other_object._nb_customers\n self._nb_invoices = other_object._nb_invoices\n self._cluster_model_name = other_object._cluster_model_name\n self._dict_cluster_model = other_object._dict_cluster_model\n self._df_invoice_original = other_object._df_invoice_original.copy()\n self._df_invoice_line_out_sample \\\n = other_object._df_invoice_line_out_sample.copy()", "title": "" }, { "docid": "63c6addd78a4ef839810192f7e931632", "score": "0.44681433", "text": "def objectEventNotify(event):\n for handler in IEventHandler.subscription(event.object, event):\n handler(event.object, event)", "title": "" }, { "docid": "4fefc7b3b4677138f8974081a6a9d409", "score": "0.44598418", "text": "def copy(self, source):\n self._copy(source)", "title": "" }, { "docid": "a5cbe78b8658521b29f45273656230f5", "score": "0.4458579", "text": "def doCopyPivot(self,sourceObject):\n try:\n #If we have an Object Factory instance, link it\n sourceObject.mNode\n sourceObject = sourceObject.mNode\n #log.debug(\"Source is an instance\") \n except:\n #If it fails, check that the object name exists and if so, initialize a new Object Factory instance\n assert mc.objExists(sourceObject) is True, \"'%s' - source object doesn't exist\" %sourceObject\n\n assert mc.ls(sourceObject,type = 'transform'),\"'%s' has no transform\"%sourceObject\n rigging.copyPivot(self.mNode,sourceObject)", "title": "" }, { "docid": "519e3763076fddff119e5314953e325a", "score": "0.4450289", "text": "def test_other(self):\n factory = self.root.source.manage_addProduct['Silva']\n factory.manage_addMockupNonPublishable('stuff', 'Stuff')\n manager = IContainerManager(self.root.target)\n with assertTriggersEvents('ObjectWillBeAddedEvent',\n 'ObjectAddedEvent',\n 'ContainerModifiedEvent'):\n with manager.ghoster() as ghoster:\n ghost = ghoster(self.root.source.stuff)\n\n self.assertTrue(verifyObject(IMockupNonPublishable, ghost))\n self.assertIn('stuff', self.root.target.objectIds())", "title": "" }, { "docid": "ddb56b3900c799ef86d752913e8ca4d8", "score": "0.4445463", "text": "def __dragSnapshot(self):\n drag = QDrag(self)\n mimeData = QMimeData()\n mimeData.setImageData(self.__snapshot)\n drag.setMimeData(mimeData)\n drag.setPixmap(self.preview.pixmap())\n drag.exec_(Qt.CopyAction)", "title": "" }, { "docid": "7890dd94b2445b7014ef69e6da3cfd82", "score": "0.44421124", "text": "def create_copy(self):\n print('WARNING: Implementation and testing still in progress!!!!')\n\n new_obj = self.__class__()\n new_obj.data = copy.deepcopy(self.data)\n new_obj.topography = copy.deepcopy(self.topography)\n new_obj.electrode_positions = copy.deepcopy(\n self.electrode_positions)\n\n # what about the log?\n print('WARNING: Journal and log is not copied!')\n\n return new_obj", "title": "" }, { "docid": "ab2fecb95fbdc9c98eb964ce6fabf625", "score": "0.4441426", "text": "def SoFieldContainer_addCopy(*args):\n return _coin.SoFieldContainer_addCopy(*args)", "title": "" }, { "docid": "3829a268b8f12cd436943ec7773c0a1c", "score": "0.4435783", "text": "def copy(self, source, dest):\n raise NotImplementedError", "title": "" }, { "docid": "1ec5b05a593bfedb177c408e801787bb", "score": "0.44341844", "text": "def copy(self):\n new = self.__class__()\n referenced = self.reference_attributes\n for attribute, value in self.__dict__.items():\n if attribute in referenced:\n setattr(new, attribute, value)\n elif hasattr(value, 'copy'):\n setattr(new, attribute, value.copy())\n else:\n setattr(new, attribute, deepcopy(value))\n\n for attribute, value in new.__dict__.items():\n if hasattr(value, 'integration'):\n value.integration = new\n\n return new", "title": "" }, { "docid": "90b4aa9a2f9e38ee91e241c4cef230bc", "score": "0.44321528", "text": "def _paste_copied_bytes_at_cursor(self):\n if self._clipboard is None:\n return\n if self._data_source_combo.currentData() == HexDataSource.Loader:\n self.project_memory_write_bytearray(self.inner_widget.hex.cursor, self._clipboard)\n # FIXME: Support pasting data to current debugger state", "title": "" }, { "docid": "24b7b13b1e9b6f7af414aea25208f43f", "score": "0.44234386", "text": "def copy(self, a: ClassUnderTest) -> ClassUnderTest:", "title": "" }, { "docid": "24b7b13b1e9b6f7af414aea25208f43f", "score": "0.44234386", "text": "def copy(self, a: ClassUnderTest) -> ClassUnderTest:", "title": "" }, { "docid": "24b7b13b1e9b6f7af414aea25208f43f", "score": "0.44234386", "text": "def copy(self, a: ClassUnderTest) -> ClassUnderTest:", "title": "" }, { "docid": "8df42f6c8e47beeb5c86913d64a95bca", "score": "0.4423125", "text": "def minimalClone(self, other):\n \n pass", "title": "" }, { "docid": "777d81ac50e59b763a99d243b2d1d1bb", "score": "0.44220346", "text": "def copy(derive: InstrumentedAttribute, from_parent: any):\n return Copy(derive=derive, from_parent=from_parent)", "title": "" }, { "docid": "99d52be788245562a78c28065a3ed089", "score": "0.44145766", "text": "def process_copy_container(self, force):", "title": "" }, { "docid": "47571bcbad3631d81fde44504480e780", "score": "0.44139227", "text": "def test__Channel__copy():\n channel_type = ChannelType.guild_text\n name = 'Yuuka'\n \n channel = Channel(\n channel_type = channel_type,\n name = name,\n )\n \n copy = channel.copy()\n _assert_fields_set(copy)\n vampytest.assert_is_not(channel, copy)\n \n vampytest.assert_eq(channel, copy)", "title": "" } ]
b4aed3b4a87ea4aef402346694d0ff72
Plot the distribution of each variable in the joint probability space using matplotlib.
[ { "docid": "1fa41e1ea2a607c9d83a6b231e539917", "score": "0.6270921", "text": "def Plot(self):\n inf = 10**30\n plotDict = {}\n minX = inf\n maxX = -inf\n numPts = 200\n pdfs = []\n for v in self.fieldList:\n d = self.distr(v)\n minval = d.minVal()\n maxval = d.maxVal()\n if maxval > maxX:\n maxX = maxval\n if minval < minX:\n minX = minval\n pdfs.append(d)\n xvals = []\n for i in range(numPts):\n rangex = maxX - minX\n incrx = rangex / numPts\n xvals.append(minX + i * incrx)\n for i in range(len(self.fieldList)):\n yvals = []\n var = self.fieldList[i]\n pdf = pdfs[i]\n for j in range(numPts):\n xval = xvals[j]\n if j == numPts - 1:\n P = pdf.P((xval, maxX))\n else:\n P = pdf.P((xval, xvals[j+1]))\n yvals.append(P)\n plotDict[var] = yvals\n plotDict['_x_'] = xvals\n probCharts.plot(plotDict)", "title": "" } ]
[ { "docid": "e51c6607e2bead15b80003b660fb3059", "score": "0.7280935", "text": "def plot(self):\r\n\t\t#should be as simple as calling the hist method of the particle list then the plot method of \r\n\t\t#the distributions datamember\r", "title": "" }, { "docid": "f15678733146d91a320fcd41ae9f2006", "score": "0.7230453", "text": "def plot_distribution(x):\n sns.distplot(x)\n plt.show()", "title": "" }, { "docid": "40d95d3a28d2c58a602708501188078e", "score": "0.70494217", "text": "def plot_joint_distribution_potential(df):\n sns.jointplot(df[\"Y1\"], df[\"Y0\"]).set_axis_labels(\"$Y_1$\", r\"$Y_0$\", fontsize=15)", "title": "" }, { "docid": "fd2e3f5c584d8d63900c5793e956e3a0", "score": "0.693895", "text": "def distplot(prior, model, start):\n for p in prior.keys():\n plt.figure()\n seaborn.distplot(model.chain.theta[p][start:]).set_title(p)\n plt.show()", "title": "" }, { "docid": "44f6ce0f084a6e5e0f2c48e4f689635b", "score": "0.6654395", "text": "def pp_plot(x, dist, line=True, ax=None):\n if ax is None:\n ax = plt.figure().add_subplot(1, 1, 1)\n\n n = len(x)\n p = np.arange(1, n + 1) / n - 0.5 / n\n pp = np.sort(dist.cdf(x))\n sns.scatterplot(x=p, y=pp, color='blue', edgecolor='blue', ax=ax)\n ax.set_title('PP-plot')\n ax.set_xlabel('Theoretical Probabilities')\n ax.set_ylabel('Sample Probabilities')\n ax.margins(x=0, y=0)\n\n if line: ax.plot(np.linspace(0, 1), np.linspace(0, 1), 'r', lw=2)\n\n return ax", "title": "" }, { "docid": "0c119d8cd5d67012222ac65af9de82bc", "score": "0.6652935", "text": "def plot_distribution(x_values, y_values, x_label, y_label):\n\n fig = plt.figure()\n plt.plot(x_values, y_values)\n plt.xlabel(x_label)\n plt.ylabel(y_label)\n\n return fig", "title": "" }, { "docid": "8d74069517f043abbf1169db6fb1cca4", "score": "0.6421278", "text": "def plot_density(self):\n fig = plt.figure()\n plt.title(\"Empirical distribution of the solution at terminal time\")\n sns.distplot(self.target)\n plt.xlabel(\"t\")\n plt.ylabel(\"f(y)\")\n fig.show()", "title": "" }, { "docid": "9a48dcb2eb3a972ac9fcb36eb5d36978", "score": "0.63925856", "text": "def plot_joint_distribution_unobservables(df, df_eh):\n g1 = sns.jointplot(df[\"V\"], df[\"U1\"]).set_axis_labels(\"$V$\", \"$U_1$\", fontsize=15)\n g1.fig.subplots_adjust(top=0.9)\n g1.fig.suptitle(\"Abscence of essential heterogeneity\", fontsize=18)\n\n g2 = sns.jointplot(df_eh[\"V\"], df_eh[\"U1\"]).set_axis_labels(\n \"$V$\", \"$U_1$\", fontsize=15\n )\n g2.fig.subplots_adjust(top=0.9)\n g2.fig.suptitle(\"Presence of essential heterogeneity\", fontsize=18)", "title": "" }, { "docid": "15725bd16ad31749b90c5ed39aa796cb", "score": "0.6323826", "text": "def NormalProbPlot(samples):\n pyplot.clf()\n\n markers = dict(male='b', female='g')\n\n for label, sample in samples.items():\n NormalPlot(sample, label, markers[label], jitter=0.0)\n \n myplot.Save(show=True,\n #root='bayes_height_normal',\n title='Normal probability plot',\n xlabel='Standard normal',\n ylabel='Reported height (cm)')", "title": "" }, { "docid": "2fc6e25a3a6277b5f361ae3387e6de47", "score": "0.62710464", "text": "def jointplot(x, y, c = 'k', cmap = 'gray_r',\n xmin = None, xmax = None, xdelta = None, \n ymin = None, ymax = None, ydelta = None, \n logscale = False, gridsize = 50, bins = None, alpha = 0.2,\n joint_xlabel = None, joint_ylabel = None,\n marginal_xlabel = None, marginal_ylabel = None, \n fig_axes = None, joint_type = 'hex', scatter_label = '',\n highlighting = None, edge_values = None, anchor_legend = None):\n\n if fig_axes == None:\n fig = plt.figure()\n gs = GridSpec(4,4)\n\n ax_joint = fig.add_subplot(gs[1:4,0:3])\n ax_marg_x = fig.add_subplot(gs[0,0:3])\n ax_marg_y = fig.add_subplot(gs[1:4,3])\n else:\n fig,ax_joint,ax_marg_x,ax_marg_y = fig_axes\n \n if joint_type == 'hex':\n ax_joint.hexbin(x,y, cmap = cmap, bins= 'log', gridsize = gridsize )\n elif joint_type == 'scatter':\n ax_joint.scatter(x,y, color = c, alpha= alpha, label = scatter_label)\n \n \n if xmin is None:\n xmin = min(x)\n if xmax is None:\n xmax = max(x)\n if ymin is None:\n ymin = min(y)\n if ymax is None:\n ymax = max(y)\n \n if bins:\n ax_marg_x.hist(x, density = False, color = c, alpha = alpha, bins = bins[0], \n align = 'mid')\n ax_marg_y.hist(y, density = False, color = c, alpha = alpha, bins = bins[1], \n align = 'mid', orientation=\"horizontal\")\n else: \n ax_marg_x.hist(x, density = False, color = c, alpha = alpha, range = (xmin, xmax), \n align = 'mid')\n ax_marg_y.hist(y, density = False, color = c, alpha = alpha, range = (ymin, ymax), \n align = 'mid', orientation=\"horizontal\")\n \n if logscale:\n ax_joint.set_xscale('log')\n ax_joint.set_yscale('log')\n ax_marg_x.set_xscale('log')\n ax_marg_x.set_yscale('log')\n ax_marg_y.set_xscale('log')\n ax_marg_y.set_yscale('log')\n else:\n if xdelta is None:\n xdelta = (xmax - xmin)/100.\n if ydelta is None:\n ydelta = (ymax - ymin)/100.\n ax_joint.axis([xmin-xdelta, xmax+xdelta, ymin-ydelta, ymax+ydelta])\n ax_marg_x.set_xlim([xmin-xdelta, xmax+xdelta])\n ax_marg_y.set_ylim([ymin-ydelta, ymax+ydelta])\n\n # Turn off tick labels on marginals\n plt.setp(ax_marg_x.get_xticklabels(), visible=False)\n plt.setp(ax_marg_y.get_yticklabels(), visible=False)\n\n # Set labels on joint\n if joint_xlabel is None:\n try:\n joint_xlabel = x.name\n except:\n joint_xlabel = ''\n if joint_ylabel is None:\n try:\n joint_ylabel = y.name\n except:\n joint_ylabel = ''\n \n ax_joint.set_xlabel(joint_xlabel)\n ax_joint.set_ylabel(joint_ylabel)\n\n # Set labels on marginals\n if marginal_xlabel is None:\n marginal_xlabel = 'Count'\n if marginal_ylabel is None:\n marginal_ylabel = 'Count'\n \n ax_marg_y.set_xlabel(marginal_xlabel)\n ax_marg_x.set_ylabel(marginal_ylabel )\n \n if highlighting is not None:\n for lb, rb, c, label in highlighting:\n ax_joint.axvspan(lb, rb, alpha=0.25, color=c, label = label)\n\n if edge_values is not None:\n for v, c, label in edge_values:\n ax_joint.axvline(x=v, color=c, linewidth = 3., \n linestyle = '--', label = label)\n\n if anchor_legend is not None:\n ax_joint.legend(bbox_to_anchor= anchor_legend)\n\n\n return fig, ax_joint, ax_marg_x, ax_marg_y", "title": "" }, { "docid": "ca671ce7985efe56deea4d6b16c80e4a", "score": "0.62169546", "text": "def plot_distribution(data, name, units):\n mean = data.mean()\n std = data.std()\n maximum = data.max()\n minimum = data.min()\n stats = 'Mean = %.5f\\nStd = %.5f\\nMax = %.5f\\nMin = %.5f' % \\\n (mean, std, maximum, minimum)\n title = 'Distribution of %s in Final Policy' % name\n\n if not show_plots:\n return\n\n plt.figure()\n plt.hist(data)\n plt.title(title)\n plt.xlabel('Error (%s)' % units)\n plt.ylabel('Number of Time Steps')\n plt.axvline(mean, color='k', linestyle='dashed', linewidth=1)\n plt.axvline(mean+std, color='r', linestyle='dashed', linewidth=1)\n plt.axvline(mean-std, color='r', linestyle='dashed', linewidth=1)\n plt.text(0.87, 0.9, stats, ha='center', va='center',\n transform=plt.gca().transAxes)", "title": "" }, { "docid": "6e663879fbdd8d99a6ce1d0989c58471", "score": "0.6167517", "text": "def plot_joint(\n data,\n var_names=None,\n coords=None,\n figsize=None,\n textsize=None,\n kind=\"scatter\",\n gridsize=\"auto\",\n contour=True,\n fill_last=True,\n joint_kwargs=None,\n marginal_kwargs=None,\n):\n valid_kinds = [\"scatter\", \"kde\", \"hexbin\"]\n if kind not in valid_kinds:\n raise ValueError(\n (\"Plot type {} not recognized.\" \"Plot type must be in {}\").format(kind, valid_kinds)\n )\n\n data = convert_to_dataset(data, group=\"posterior\")\n\n if coords is None:\n coords = {}\n\n plotters = list(xarray_var_iter(get_coords(data, coords), var_names=var_names, combined=True))\n\n if len(plotters) != 2:\n raise Exception(\n \"Number of variables to be plotted must 2 (you supplied {})\".format(len(plotters))\n )\n\n figsize, ax_labelsize, _, xt_labelsize, linewidth, _ = _scale_fig_size(figsize, textsize)\n\n if joint_kwargs is None:\n joint_kwargs = {}\n\n if marginal_kwargs is None:\n marginal_kwargs = {}\n\n # Instantiate figure and grid\n fig, _ = plt.subplots(0, 0, figsize=figsize)\n grid = plt.GridSpec(4, 4, hspace=0.1, wspace=0.1)\n\n # Set up main plot\n axjoin = fig.add_subplot(grid[1:, :-1])\n\n # Set up top KDE\n ax_hist_x = fig.add_subplot(grid[0, :-1], sharex=axjoin)\n ax_hist_x.tick_params(labelleft=False, labelbottom=False)\n\n # Set up right KDE\n ax_hist_y = fig.add_subplot(grid[1:, -1], sharey=axjoin)\n ax_hist_y.tick_params(labelleft=False, labelbottom=False)\n\n # Set labels for axes\n x_var_name = make_label(plotters[0][0], plotters[0][1])\n y_var_name = make_label(plotters[1][0], plotters[1][1])\n\n axjoin.set_xlabel(x_var_name, fontsize=ax_labelsize)\n axjoin.set_ylabel(y_var_name, fontsize=ax_labelsize)\n axjoin.tick_params(labelsize=xt_labelsize)\n\n # Flatten data\n x = plotters[0][2].flatten()\n y = plotters[1][2].flatten()\n\n if kind == \"scatter\":\n axjoin.scatter(x, y, **joint_kwargs)\n elif kind == \"kde\":\n plot_kde(x, y, contour=contour, fill_last=fill_last, ax=axjoin, **joint_kwargs)\n else:\n if gridsize == \"auto\":\n gridsize = int(len(x) ** 0.35)\n axjoin.hexbin(x, y, mincnt=1, gridsize=gridsize, **joint_kwargs)\n axjoin.grid(False)\n\n for val, ax, orient, rotate in (\n (x, ax_hist_x, \"vertical\", False),\n (y, ax_hist_y, \"horizontal\", True),\n ):\n if val.dtype.kind == \"i\":\n bins = get_bins(val)\n ax.hist(\n val, bins=bins, align=\"left\", density=True, orientation=orient, **marginal_kwargs\n )\n else:\n marginal_kwargs.setdefault(\"plot_kwargs\", {})\n marginal_kwargs[\"plot_kwargs\"][\"linewidth\"] = linewidth\n plot_kde(val, rotated=rotate, ax=ax, **marginal_kwargs)\n\n ax_hist_x.set_xlim(axjoin.get_xlim())\n ax_hist_y.set_ylim(axjoin.get_ylim())\n\n return axjoin, ax_hist_x, ax_hist_y", "title": "" }, { "docid": "6ccf84781a296a6448227def04e81ffd", "score": "0.6104457", "text": "def plot(self):\n X = np.linspace(-4, 4, 100)\n plt.plot(X, self.product(X[:, None]))\n plt.show()", "title": "" }, { "docid": "6d4ce393ac900dabf994583f4d311f46", "score": "0.60887104", "text": "def plot_joint(self, cmap=\"BuGn\"):\n cmap=\"BuGn\";\n pal = sns.color_palette(cmap, 256)\n lc = pal[int(.7 * 256)]\n bg = pal[0]\n\n fig = plt.figure(figsize=(7, 7))\n gs = plt.GridSpec(6, 6)\n\n p_lim = self.p_grid.min(), self.p_grid.max()\n I_lim = self.I_grid.min(), self.I_grid.max()\n\n ax1 = fig.add_subplot(gs[1:, :-1])\n ax1.set(xlim=p_lim, ylim=I_lim)\n\n ax1.contourf(self.p_grid, self.I_grid, self.pI.T, 30, cmap=cmap)\n\n plt.xlabel(\"$p$\", fontsize=16)\n plt.ylabel(\"$I$\", fontsize=16)\n\n ax2 = fig.add_subplot(gs[1:, -1])\n ax2.set_facecolor(bg)\n ax2.set(ylim=I_lim)\n ax2.plot(self.pI.sum(axis=0), self.I_grid, c=lc, lw=3)\n ax2.set_xticks([])\n ax2.set_yticks([])\n\n ax3 = fig.add_subplot(gs[0, :-1])\n ax3.set_facecolor(bg)\n ax3.set(xlim=p_lim)\n ax3.plot(self.p_grid, self.pI.sum(axis=1), c=lc, lw=3)\n ax3.set_xticks([])\n ax3.set_yticks([])", "title": "" }, { "docid": "daadc2b0fadbdecb6056d75256dcb487", "score": "0.60711783", "text": "def plot_distribution(series):\n if not os.path.exists(PLOT_DIR):\n os.mkdir(PLOT_DIR)\n fig, ax = plt.subplots(1, 1, figsize=(4, 2.5))\n series.plot.hist(bins=100, ax=ax)\n figpath = os.path.join(PLOT_DIR, '170302_distiribution_of_percentages_attracted_to_genes.pdf')\n ax.set_yscale('log')\n ax.set_xlabel('sum(percent abundance) across all samples')\n ax.set_ylabel('number of genes')\n fig.savefig(figpath, bbox_inches='tight')", "title": "" }, { "docid": "07fbd5a2b2be79f27c910b7003594068", "score": "0.60545", "text": "def PlotPosterior(xs, ys, suite, pcolor=False, contour=True):\n X, Y = numpy.meshgrid(xs, ys)\n func = lambda x, y: suite.Prob((x, y))\n prob = numpy.vectorize(func)\n Z = prob(X, Y)\n\n pyplot.clf()\n if pcolor:\n pyplot.pcolor(X, Y, Z)\n if contour:\n pyplot.contour(X, Y, Z)\n\n myplot.Save(root='bayes_height_posterior_%s' % suite.name,\n title='Posterior joint distribution',\n xlabel='Mean height (cm)',\n ylabel='Stddev (cm)')", "title": "" }, { "docid": "ffb066d11cb4de634216d277208c9793", "score": "0.6016494", "text": "def draw_distribution(self, variable, bins=100, alpha=0.75):\n plt.title('variable distribution')\n plt.ylabel('count')\n plt.xlabel('value')\n f_var = variable.flatten()\n n, bins, patches = plt.hist(f_var, bins=bins, density=0, facecolor='blue', alpha=alpha)\n plt.show()", "title": "" }, { "docid": "cdce1e198cce0f2b3bf4508bff6d0f63", "score": "0.6003054", "text": "def plot_p_q(mean, logvar, pz_scale=1., N_samples=100, add_legend=False, suptitle_app=\"_train\"):\n\n pz = torchD.Normal(torch.zeros_like(mean), scale=pz_scale) # assume constant scale\n std = torch.exp(0.5*logvar)\n qzx = torchD.Normal(loc=mean, scale=std)\n\n print(\"Plot bivariate latent distributions\")\n print(\"pz batch_shape {}, event_shape {}\".format(pz.batch_shape, pz.event_shape))\n print(\"qzx batch_shape {}, event_shape {}\".format(qzx.batch_shape, qzx.event_shape))\n pz_samples = pz.sample((N_samples,)).cpu().detach().numpy() #shape (1000, 32, 2)\n qzx_samples = qzx.sample((N_samples,)).cpu().detach().numpy()\n\n sample_dim, batch_dim, latent_dim = pz_samples.shape\n print(\"check p, q shape, pz {}, qzx {}\".format(pz_samples.shape, qzx_samples.shape))\n \n # 1D histograms as subplots\n fig, axes = plt.subplots(nrows=2, ncols=latent_dim, figsize=(12, 12))\n for i in range(latent_dim):\n sns.histplot(pz_samples[...,i], kde=True, ax=axes[0,i], legend=add_legend)\n sns.histplot(qzx_samples[...,i], kde=True, ax=axes[1,i], legend=add_legend)\n \n cols_header = [\"Latent {}\".format(i) for i in range(latent_dim)]\n rows_header = [\"pz\", \"qzx\"]\n\n for ax, col in zip(axes[0], cols_header):\n ax.set_title(col)\n\n for ax, row in zip(axes[:,0], rows_header):\n ax.set_ylabel(row, rotation=0, size='large')\n\n plt.suptitle(\"Bivariate Latent Distributions\"+suptitle_app)\n plt.show()\n \n # 2D histplot by seaborn \n df_pz = pd.DataFrame(pz_samples.reshape(-1, latent_dim), columns=[\"Latent {}\".format(i) for i in range(latent_dim)]) #\n df_pz.index = np.tile(np.arange(pz_samples.shape[1]), pz_samples.shape[0]) + 1\n df_pz.index.name = 'Batch'\n \n df_qzx = pd.DataFrame(qzx_samples.reshape(-1, latent_dim), columns=[\"Latent {}\".format(i) for i in range(latent_dim)]) #\n df_qzx.index = np.tile(np.arange(qzx_samples.shape[1]), qzx_samples.shape[0]) + 1\n df_qzx.index.name = 'Batch'\n \n fig, axes = plt.subplots(nrows=2,ncols=1, figsize=(12, 12))\n sns.histplot(df_pz, x=\"Latent 0\", y=\"Latent 1\", hue=\"Batch\", kde=True, ax=axes[0], palette=\"bright\", legend=add_legend)\n sns.histplot(df_qzx, x=\"Latent 0\", y=\"Latent 1\", hue=\"Batch\", kde=True, ax=axes[1], palette=\"bright\", legend=add_legend)\n\n plt.suptitle(\"Scatterplot of samples\"+suptitle_app)\n plt.show()", "title": "" }, { "docid": "1d9df8911e4dbe6941c3cc25a2655a61", "score": "0.5996329", "text": "def plot_histograms(ax, prng, nb_samples=10000):\n params = ((10, 10), (4, 12), (50, 12), (6, 55))\n for a, b in params:\n values = prng.beta(a, b, size=nb_samples)\n ax.hist(values, histtype=\"stepfilled\", bins=30,\n alpha=0.8, density=True)\n # Add a small annotation.\n ax.annotate('Annotation', xy=(0.25, 4.25),\n xytext=(0.9, 0.9), textcoords=ax.transAxes,\n va=\"top\", ha=\"right\",\n bbox=dict(boxstyle=\"round\", alpha=0.2),\n arrowprops=dict(\n arrowstyle=\"->\",\n connectionstyle=\"angle,angleA=-95,angleB=35,rad=10\"),\n )\n return ax", "title": "" }, { "docid": "2f7be71aa908ccd940acf6cd3ca5d740", "score": "0.5986667", "text": "def probplot(indx):\n cnt_features = ri.getimagefeatures(indx)\n probs = model.predict_proba(cnt_features.reshape(1,-1))\n labels = model.classes_()\n fig, ax = plt.subplots(figsize=(15,15))\n sortargs = probs.argsort()[0][-3:]\n lbl = labels[sortargs]\n fig, ax = plt.subplots()\n y_pos = np.arange(len(lbl))\n y = probs[0][sortargs]\n N = len(y)\n x = range(N)\n width = 1/2.\n ax.xaxis.set_visible(False)\n ax.yaxis.set_visible(False)\n ax.set_xlim([0,1])\n rects = ax.barh(x, y, width, color=\"#AAAAAA\", alpha = 0.5)\n ax.vlines(x=0.005, ymin=-1.0, ymax=3.0,linewidth=2,color = 'k')\n ax.set_yticks(np.arange(3) + width/20.)\n for i, rect in enumerate(rects):\n length = round(rect.get_width(),4)\n ax.text(.5, rect.get_y() + rect.get_height()/10,\n '{} - {}%'.format(labelmaker[lbl[i]], int(100 * length)),\n ha='center', va='bottom',size=20)\n fig.figurePatch.set_alpha(0)\n plt.grid(False)\n ax.set_facecolor('white')\n plt.tight_layout\n canvas=FigureCanvas(fig)\n png_output = StringIO()\n canvas.print_png(png_output)\n plt.close(fig)\n response=make_response(png_output.getvalue())\n response.headers['Content-Type'] = 'image/png'\n return response", "title": "" }, { "docid": "0be0988e54d8cff720af8ef8f50d2cf5", "score": "0.5967445", "text": "def data_distplot(data):\n for cols in data.columns:\n plt.figure()\n sns.distplot(data[cols])", "title": "" }, { "docid": "a21dd90c1009e39f44f5c8de64cfd716", "score": "0.5929595", "text": "def plot_distribution_of_scores(feature_series):\n fig, ax = plt.subplots(1, figsize=(30, 5))\n sns.distplot(feature_series, kde=True, hist=True)\n plt.xlim(0, feature_series.max())\n plt.xlabel(\"Number of upvotes\", fontsize=17)\n plt.ylabel(\"frequency\", fontsize=17)\n plt.tick_params(labelsize=15)\n plt.title(\"Number of upvotes distribution\", fontsize=17)\n plt.show()", "title": "" }, { "docid": "e40d0d288b90559f4362f901e5f3fe42", "score": "0.59281015", "text": "def plot_hist(prediction, n_to_generate,valid,property_identifier):\r\n prediction = np.array(prediction)\r\n x_label = ''\r\n plot_title = '' \r\n \r\n print(\"Proportion of valid SMILES:\", valid/n_to_generate)\r\n \r\n if property_identifier == \"a2d\":\r\n print(\"Max of pIC50: \", np.max(prediction))\r\n print(\"Mean of pIC50: \", np.mean(prediction))\r\n print(\"Min of pIC50: \", np.min(prediction))\r\n x_label = \"Predicted pIC50\"\r\n plot_title = \"Distribution of predicted pIC50 for generated molecules\"\r\n elif property_identifier == \"sas\":\r\n print(\"Max SA score: \", np.max(prediction))\r\n print(\"Mean SA score: \", np.mean(prediction))\r\n print(\"Min SA score: \", np.min(prediction))\r\n x_label = \"Calculated SA score\"\r\n plot_title = \"Distribution of SA score for generated molecules\"\r\n elif property_identifier == \"qed\":\r\n print(\"Max QED: \", np.max(prediction))\r\n print(\"Mean QED: \", np.mean(prediction))\r\n print(\"Min QED: \", np.min(prediction))\r\n x_label = \"Calculated QED\"\r\n plot_title = \"Distribution of QED for generated molecules\" \r\n \r\n elif property_identifier == \"logP\":\r\n percentage_in_threshold = np.sum((prediction >= 0.0) & \r\n (prediction <= 5.0))/len(prediction)\r\n print(\"Percentage of predictions within drug-like region:\", percentage_in_threshold)\r\n print(\"Average of log_P: \", np.mean(prediction))\r\n print(\"Median of log_P: \", np.median(prediction))\r\n plt.axvline(x=0.0)\r\n plt.axvline(x=5.0)\r\n x_label = \"Predicted LogP\"\r\n plot_title = \"Distribution of predicted LogP for generated molecules\"\r\n \r\n# sns.set(font_scale=1)\r\n ax = sns.kdeplot(prediction, shade=True,color = 'g')\r\n ax.set(xlabel=x_label,\r\n title=plot_title)\r\n plt.show()\r\n return (valid/n_to_generate)*100", "title": "" }, { "docid": "3df79d0999450abcadbccbfe643cbd59", "score": "0.59277755", "text": "def plot(self, joints, ax, target=None, show=False):\n from ikpy.utils import plot\n\n if ax is None:\n # If ax is not given, create one\n _, ax = plot.init_3d_figure()\n plot.plot_chain(self, joints, ax, name=self.name)\n\n # Plot the goal position\n if target is not None:\n plot.plot_target(target, ax)\n if show:\n plot.show_figure()", "title": "" }, { "docid": "284b178577a5610208d8288214787b3f", "score": "0.5919633", "text": "def plots(self, onlyHist=True):\n plt.figure()\n plt.hist(self.r[:, 1]*self.dims[1], bins=100)\n plt.xlabel('y [m]')\n plt.ylabel('Number of Particles [-]')\n plt.title('Concentration of Particles along Y')\n\n if not onlyHist:\n plt.figure()\n plt.scatter(self.fullr[:, 0], self.fullr[:, 1], s=0.5)\n plt.title(str(len(self.fullr)) + ' particles in data set')\n plt.xlabel('x [m]')\n plt.ylabel('y [m]')\n plt.axis('equal')\n plt.show()", "title": "" }, { "docid": "1acec0525e8742f1ee7188e7d36a86e9", "score": "0.5913822", "text": "def plot_gaussians(l):\n\tmeans = []\n\tvariances = []\n\tfor tup in l:\n\t\tmeans.append(tup[0])\n\t\tvariances.append(tup[1])\n\t\t\n\tx_min = min(means) - 3*max(variances)\n\tx_max = max(means) + 3*max(variances)\n\tx_axis = np.arange(x_min,x_max,1)\n\t\n\tfor tup in l:\n\t\ttry:\n\t\t\ty = [tup[2]*st.norm.pdf(x,tup[0],tup[1]) for x in x_axis]\n\t\texcept IndexError:\n\t\t\ty = [st.norm.pdf(x,tup[0],tup[1]) for x in x_axis] # no scaling factor\n\t\tplt.plot(x_axis,y)\n\tplt.show()", "title": "" }, { "docid": "f820cf016a697d02fe823a773d9c0100", "score": "0.5905548", "text": "def plot_prediction_distribution(actual, predicted):\n _ = plt.figure(figsize=(10, 6))\n _ = sns.distplot(predicted, hist=False, label='Predicted')\n _ = sns.distplot(actual, hist=False, label='Actual')\n _.set_xlabel('Round Drafted')\n _.set_ylabel('Distribution')\n _.set_title('Round Distribution - Actual vs Predicted', fontsize=16)\n _ = plt.legend()\n\n plt.show()", "title": "" }, { "docid": "aab87ad7977e1f479db297ff6136ccc6", "score": "0.59047544", "text": "def plot_distributions(dist_builder: RiskCDFModel):\n from matplotlib import pyplot as plt\n # Grab the distributions\n dist = dist_builder.distributions\n # Get all the selectors\n selectors = dist_builder.selectors\n for selector in selectors:\n plt.figure()\n plt.title(selector)\n plt.xlabel('Days')\n plt.ylabel('Cumulative confidence of Discharge')\n legend = []\n for key in dist[selector]:\n plt.plot(dist[selector][key])\n legend.append(key)\n plt.legend(legend)\n plt.show()", "title": "" }, { "docid": "75315510b7c5c005a8528a09f6fb8966", "score": "0.5900154", "text": "def plot_float_distribution(column, title, xlabel):\n \n data = movies_df[column]\n ax = sns.distplot(data)\n ax.set_title(title)\n ax.set_xlabel(xlabel)\n plt.show()", "title": "" }, { "docid": "ff18e941fb1a453254a8fc2fc4a64952", "score": "0.5864662", "text": "def prob3():\n #Get the crime data and plot them\n df = pd.read_csv('crime_data.csv')\n df.plot(kind=\"box\", y=['Burglary', 'Violent', 'Vehicle Theft'])\n plt.ylabel('Crime Frequency')\n plt.title('Distributions of Crimes')\n df.plot(kind=\"Hexbin\", x='Vehicle Theft', y='Robbery', gridsize=15)\n plt.title('Distributions of Vehicle Thefts by Robberies')\n plt.show()\n return", "title": "" }, { "docid": "8bf401395ff3e77993834c5271d86481", "score": "0.58620673", "text": "def output_sim_plot(self):\n end_x, end_n, positions, energies = \\\n self.markov_metropolis(hist=True, transitions=1000000)\n pylab.hist(positions, 100, normed = 'True')\n x = [a / 10.0 for a in range(-50, 51)]\n y = [self.analytical_density(a) for a in x]\n z = [self.classical_density(a) for a in x]\n pylab.plot(x, y, c='red', linewidth=2.0)\n pylab.plot(x, z, c='green', linewidth=2.0)\n pylab.title('Theoretical Gaussian distribution $\\pi(x)$ and \\\n \\nhistogram for '+str(len(positions))+' samples, beta='+str(self.beta), fontsize = 18)\n pylab.xlabel('$x$', fontsize = 30)\n pylab.ylabel('$\\pi(x)$', fontsize = 30)\n pylab.savefig('markov_sim_beta_'+str(self.beta)+'.png')", "title": "" }, { "docid": "e130a78d87898b27ce70f1367e18e386", "score": "0.5846076", "text": "def plot_priors(\n self,\n draws=5000,\n var_names=None,\n random_seed=None,\n figsize=None,\n textsize=None,\n hdi_prob=None,\n round_to=2,\n point_estimate=\"mean\",\n kind=\"kde\",\n bins=None,\n omit_offsets=True,\n omit_group_specific=True,\n ax=None,\n ):\n if not self.built:\n raise ValueError(\"Cannot plot priors until model is built!\")\n\n unobserved_rvs_names = []\n flat_rvs = []\n for unobserved in self.backend.model.unobserved_RVs:\n if \"Flat\" in unobserved.__str__():\n flat_rvs.append(unobserved.name)\n else:\n unobserved_rvs_names.append(unobserved.name)\n if var_names is None:\n var_names = pm.util.get_default_varnames(\n unobserved_rvs_names, include_transformed=False\n )\n else:\n flat_rvs = [fv for fv in flat_rvs if fv in var_names]\n var_names = [vn for vn in var_names if vn not in flat_rvs]\n\n if flat_rvs:\n _log.info(\n \"Variables %s have flat priors, and hence they are not plotted\", \", \".join(flat_rvs)\n )\n\n if omit_offsets:\n omitted = [f\"{rt}_offset\" for rt in self.group_specific_terms]\n var_names = [vn for vn in var_names if vn not in omitted]\n\n if omit_group_specific:\n omitted = list(self.group_specific_terms)\n var_names = [vn for vn in var_names if vn not in omitted]\n\n axes = None\n if var_names:\n pps = self.prior_predictive(draws=draws, var_names=var_names, random_seed=random_seed)\n\n axes = plot_posterior(\n pps,\n group=\"prior\",\n figsize=figsize,\n textsize=textsize,\n hdi_prob=hdi_prob,\n round_to=round_to,\n point_estimate=point_estimate,\n kind=kind,\n bins=bins,\n ax=ax,\n )\n return axes", "title": "" }, { "docid": "52f7e36f27e320c587c261c5837e6d0d", "score": "0.58407116", "text": "def plot(X: np.ndarray, gaussian_mixture: GaussianMixture, responsibilities: np.ndarray, log_likelihood: float,\n title: str):\n _, K = responsibilities.shape\n\n fig, ax = plt.subplots()\n ax.title.set_text(title)\n ax.set_xlim((-10, 10))\n ax.set_ylim((-10, 10))\n\n for i, point in enumerate(X):\n ax.scatter(X[i][0], X[i][1], color=responsibilities[i], alpha=0.5, linewidths=0)\n\n x, y = np.meshgrid(np.linspace(-10, 10, 100), np.linspace(-10, 10, 100))\n pos = np.dstack((x, y))\n\n for j in range(K):\n mean = gaussian_mixture.mean[j]\n covariance = gaussian_mixture.covariance[j]\n normal = multivariate_normal(mean, covariance)\n # circle = Circle(mean, covariance, color=color[j], fill=False)\n ax.contour(x, y, normal.pdf(pos), alpha=1.0, zorder=10)\n # legend = \"mu = ({:0.2f}, {:0.2f})\\n stdv = {:0.2f}\".format(\n # mean[0], mean[1], covariance)\n # ax.text(mean[0], mean[1], legend)\n\n plt.axis('equal')\n plt.show()", "title": "" }, { "docid": "3663bcf823c7c9007c9fdd4d45817877", "score": "0.5831648", "text": "def prob(table, x, y):\n total = gettotal(table)\n jointprob = table[x][y] / float(total)\n print 'Joint probability of %s with %s: %f' %(x, y, jointprob)\n #print '(n = ', total, ')'", "title": "" }, { "docid": "42bfe7306ac7f1631633677650ed5afe", "score": "0.5826455", "text": "def plot_generated_dot_display_joint(fname='', sample_num=(22, 1, 1, 40, 0),\n encoding=False, rate=10, rho=6.,\n save=False, figname=''):\n print('loading data')\n data = np.load(fname).item()\n print('done loading')\n fakes = data['fake_data']\n sn = sample_num\n if encoding:\n fake = fakes[sn[0]][sn[1]][sn[2]][sn[3]][sn[4]].numpy() * rho\n # fake = fake[:, :40]\n x = []\n for j, i in enumerate(fake):\n st = np.abs(np.unique(i))\n for s in st:\n x.append((s, j))\n df = pd.DataFrame(x, columns=['time [s]', 'Neuron ID'])\n g = sns.JointGrid(x=df['time [s]'], y=df['Neuron ID'])\n g = g.plot_joint(plt.scatter, marker=\"|\")\n # g = g.plot_marginals(sns.distplot)\n # mx = np.mean(fake, axis=0)\n my = np.sum(fake, axis=1) / rho\n # g.ax_marg_x.step(x=np.linspace(0, 6, len(mx)), y=mx)\n g.ax_marg_y.step(my, y=range(len(my)), where='pre', color=cmap[5])\n g.ax_marg_x.hist(df['time [s]'], bins=64,\n histtype='step', color=cmap[5], lw=1.5)\n g.ax_marg_x.set_title('counts')\n g.ax_marg_y.set_title('rate [Hz]')\n # g.ax_marg_y.barh(range(len(my)), width=my)\n # g.ax_marg_x.fill_between(np.linspace(0, 6, len(mx)), mx, step='pre')\n # g.ax_marg_y.fill_between(y1=range(0, 64), x=my, step='pre')\n g.fig.suptitle('Generated spikes, [5/{}] Hz'.format(rate))\n plt.setp(g.ax_marg_x.get_yticklabels(), visible=True)\n plt.setp(g.ax_marg_y.get_xticklabels(), visible=True)\n else:\n # TODO binsize need to be loaded from the real data,\n # but for speed and memory reasons omitted here\n binsize = 312.5 * pq.ms\n generated = fakes[sn[0]][sn[1]][sn[2]][sn[3]][sn[4]]\n print('Converting')\n # rho needs to be extracted from the binned_data by getting the\n # maximum of counts of the set or the average\n # rho e.g. 16\n sts = cts(generated, binsize, rho=rho)\n [plt.plot(i.magnitude, np.ones_like(i) * j, '.k') for j, i in\n enumerate(sts, 1)]\n plt.xlabel('ms')\n if save:\n plt.savefig(figname)\n plt.show()", "title": "" }, { "docid": "e8275c612929580307568379f8e2cd0e", "score": "0.58238244", "text": "def show_distributions(self,\n throat_diameter='diameter',\n pore_diameter='diameter',\n throat_length='length', \n fig=None):\n net = self._net\n if not fig:\n fig = plt.figure()\n ax1 = fig.add_subplot(221)\n ax1.hist(net.get_pore_data(prop=pore_diameter)[net.get_pore_indices('all')],25,facecolor='green')\n ax1.set_xlabel('Pore Diameter [m]')\n ax1.set_ylabel('Frequency')\n\n ax2 = fig.add_subplot(222)\n net.find_neighbor_pores(1)\n x = sp.zeros(net.num_pores())\n for i in list(range(0,sp.shape(net.adjacency_matrix['lil']['connections'].rows)[0])):\n x[i] = sp.shape(net.adjacency_matrix['lil']['connections'].rows[i])[0]\n ax2.hist(x,25,facecolor='yellow')\n ax2.set_xlabel('Coordination Number')\n ax2.set_ylabel('Frequency')\n\n ax3 = fig.add_subplot(223)\n ax3.hist(net.get_throat_data(prop=throat_diameter)[net.get_throat_indices('all')],25,facecolor='blue')\n ax3.set_xlabel('Throat Diameter [m]')\n ax3.set_ylabel('Frequency')\n \n ax4 = fig.add_subplot(224)\n ax4.hist(net.get_throat_data(prop=throat_length)[net.get_throat_indices('all')],25,facecolor='red')\n ax4.set_xlabel('Throat Length [m]')\n ax4.set_ylabel('Frequency')", "title": "" }, { "docid": "5cdb910a94240bbcdb6387e3835f6a13", "score": "0.58137465", "text": "def norm_plot(x, mu, std, **kwargs):\n y = ss.norm.pdf(x, mu, std)\n \n plt.fill_between(x, 0, y, **kwargs)\n plt.plot(x, y, linestyle=\"None\")", "title": "" }, { "docid": "b161b1ba50007f32ecd8d59a4a765e3a", "score": "0.58112663", "text": "def plot_stats(self):\n # Plot a line for each statistic\n x = range(len(self.generation_outcomes))\n for i in range(len(self.generation_outcomes[0])):\n y = [entry[i] for entry in self.generation_outcomes]\n plt.plot(x, y)\n\n # Draw and pause so the graph can be interacted with each update\n plt.draw()\n plt.pause(0.0001)", "title": "" }, { "docid": "afb349d2f9d28044bdabfcffa18a085a", "score": "0.58033866", "text": "def plot(self):\n plt.hist(self.data, self.bins)\n\n plt.xlabel('X~Norm[' + str(self.norm.mu) + ',' + str(self.norm.sigma) + ']')\n plt.ylabel('Count')\n plt.title(\"Normal Distribution Histogram\")\n\n plt.grid(True)\n\n plt.show()", "title": "" }, { "docid": "5f67262804a39fa1f8417bd8ed06d4c8", "score": "0.5791994", "text": "def plot_random_variable(self, var_num=4):\n mean, std = self.rv.mean(), self.rv.std()\n xs = np.linspace(mean - var_num * std, mean + var_num * std, 100)\n ys = self.rv.pdf(xs)\n \n fig, ax = plt.subplots(figsize=(self.figWidth,self.figHeight))\n ax.plot(xs, ys, label=\"rv\", linewidth=4, color='#fdc086')\n ax.set_title('pdf of the random variable')\n ax.text(0.2 , 0.9, r'$\\mu={},\\ \\sigma={}$'.format(mean, std), ha='center', va='center', transform=ax.transAxes)\n if self.sample_label:\n ax.set_xlabel(self.sample_label)\n \n plt.show()", "title": "" }, { "docid": "b6ede57a646ea9f0bf0ef3c4f141276b", "score": "0.57891905", "text": "def plot_data_distributions(dataset):\n\n accident_node = 'Accident_Severity'\n \n # histogram of accident severity\n plt.hist(dataset[accident_node], bins=3)\n plt.xlabel('Accident Severity')\n plt.ylabel('Count')\n plt.title('Distribution of Accident Severity')\n plt.show()\n\n # histogram of age\n plt.hist(dataset['Age_of_Driver'], bins=20)\n plt.xlabel('Age_of_Driver')\n plt.ylabel('Count')\n plt.title('Distribution of Driver Age')\n plt.show()\n\n # histogram of day of week\n plt.hist(dataset['Day_of_Week'], bins=20)\n plt.xlabel('Day_of_Week')\n plt.ylabel('Count')\n plt.title('Distribution of Day of Week')\n plt.show()\n\n # Make one plot for each different location\n sns.kdeplot(dataset.ix[dataset['Urban_or_Rural_Area'] == 1, accident_node],\n label = 'Urban', shade = True)\n sns.kdeplot(dataset.ix[dataset['Urban_or_Rural_Area'] == 2, accident_node],\n label = 'Rural', shade = True)\n # Add labeling\n plt.xlabel('Accident Severity')\n plt.ylabel('Density')\n plt.title('Density Plot of Accident Severity by Location')\n plt.show()\n\n # Make one plot for each different location\n sns.kdeplot(dataset.ix[dataset['Age_of_Driver'] < 30, accident_node],\n label = '< 30', shade = True)\n range_df = dataset['Age_of_Driver'].between(30,50,inclusive=True)\n sns.kdeplot(dataset.ix[range_df, accident_node], label = '30<= age <=50', shade = True)\n range_df = dataset['Age_of_Driver'].between(50,65,inclusive=True)\n sns.kdeplot(dataset.ix[range_df, accident_node], label = '50<= age <=65', shade = True)\n sns.kdeplot(dataset.ix[dataset['Age_of_Driver'] > 65, accident_node],\n label = '> 65', shade = True)\n # Add labeling\n plt.xlabel('Accident Severity')\n plt.ylabel('Density')\n plt.title('Density Plot of Accident Severity by Age Group')\n plt.show()", "title": "" }, { "docid": "16bf9d2630fdda17853b17c37e3f7a15", "score": "0.5789034", "text": "def create_PS_plots(data): \n\n for d in dichotomies:\n\n cosines, label = p_score2(data, new_labels, d)\n\n plt.figure(figsize=(20, 10))\n for i in range(24):\n jitter = jitter_x(i, 6)\n\n plt.scatter(jitter, cosines[i], s=50)\n\n plt.scatter(i, np.mean(cosines[i]), c=\"red\", marker=\"*\", s=300)\n\n plt.ylabel(\"Cosine\")\n plt.xlabel(\"Angle Pairings\")\n plt.title(\"{}\".format(str(d)))\n plt.show()", "title": "" }, { "docid": "42b889062a564ae5764c6b8e92bfb3bb", "score": "0.5766873", "text": "def plot_betti_dist(bettiarray_instance, bettiarray_data):\n for column_index in range(0, bettiarray_instance.shape[1]):\n plt.figure(column_index)\n n, b, p = plt.hist(bettiarray_instance[:, column_index], bins=np.arange(0, max(bettiarray_instance[:, column_index]) + 0.5), density=True)\n plt.plot([bettiarray_data[0, column_index], bettiarray_data[0, column_index]], [0, max(n)], color=\"#ff5b1e\")\n plt.text(-0.29, 25, 'Real system', color=\"#ff5b1e\")\n # plt.ylim(0, 30)\n plt.xlabel('Number of Betti ' + str(column_index))\n plt.ylabel('Normalized count')\n\n plt.show()", "title": "" }, { "docid": "29de44570dd4bb1f68776a76815537e0", "score": "0.57663137", "text": "def prob1():\n #calculate f, g, fg convolution, and fg Hadamard product\n f = np.linspace(0,2*np.pi,1000)\n g = np.sin(f)\n fgcon = np.convolve(np.hstack([f,f]),g, mode ='valid')[1:]\n fgma = np.multiply(f,g)\n #plot each graph\n plt.subplot(231)\n plt.title(\"f\")\n plt.plot(f, f)\n plt.subplot(232)\n plt.title(\"g\")\n plt.plot(f, g)\n plt.subplot(233)\n plt.title(\"convolution\")\n plt.plot(f, fgcon)\n plt.subplot(234)\n plt.title(\"Hadamard\")\n plt.plot(f, fgma)\n plt.subplot(235)\n plt.title(\"All together\")\n plt.plot(f, f)\n plt.plot(f, g)\n plt.plot(f, fgcon)\n plt.plot(f, fgma)\n plt.show()", "title": "" }, { "docid": "d8b65325693db08d1164c0b8470904f1", "score": "0.57570285", "text": "def plot_pp(pp_list):\n # Note: Pyplot just skips values that are infinite\n plt.plot(pp_list)\n plt.ylabel(\"Perplexity\")\n plt.xlabel(\"Discounting Parameter\")\n plt.xticks([x for x in range(11)], [x/10 for x in range(11)])\n plt.show()", "title": "" }, { "docid": "333abaa71dad0be96e298226a92fb7b5", "score": "0.5754808", "text": "def plot_data(self):\r\n for worker in range(self.n_workers):\r\n plt.plot(self.rewards[worker].numpy())\r\n\r\n # Make sure you didn't call plt.ion() in DuelingDDQN()\r\n plt.show()", "title": "" }, { "docid": "620bf93ab73af99ae82bf31c0cfe1ded", "score": "0.57528394", "text": "def plot_var(times, pitches, ends, var_n):\n # var_n: 0 to 30 (0: Aria)\n n_data = filter(lambda x:(ends[var_n] < x[0] <= ends[var_n+1]),\n zip(times, pitches))\n # seaborn\n df = pd.DataFrame(n_data)\n df.columns = [\"time\",\"height\"]\n seaborn.jointplot('time', 'height', data=df)\n plt.show()", "title": "" }, { "docid": "a0fc43b1dc43a2d617c2d7cee7d5f578", "score": "0.574316", "text": "def make_photon_pt_dist_plot(data):\n hist = hist1d(data.photonPt, min=0., max=10, nbins=50, log=False)\n\n can = mkplot(hist, drawOpt='PE',\n # logx=True,\n logy=True,\n xRange=[0.0, 10], yRange=[0.5, 0.7e4],\n xLabel='p_{T}^{#gamma} [GeV]',\n yLabel='Events / 0.2 GeV',\n attr=[{'marker': 20, 'size': 1.0, 'color': 1}])\n\n return can", "title": "" }, { "docid": "046833e90592e93ceb7d55df7b7aa9c3", "score": "0.57404244", "text": "def plot():", "title": "" }, { "docid": "abe96135e3b06ab325d3aaf41c7e37d4", "score": "0.57364106", "text": "def plot_jointplot( # pylint:disable=invalid-name\n y: np.array,\n palinstance: PALBase,\n labels: Union[List[str], None] = None,\n figsize: tuple = (8.0, 6.0),\n):\n assert isinstance(y, np.ndarray), \"Input array y must be a numpy array\"\n assert (\n len(y) == palinstance.number_design_points\n ), \"Length of y must equal the size of the design space\"\n assert y.ndim == 2, \"y must be a two-dimensional numpy array\"\n assert (\n y.shape[1] == palinstance.ndim\n ), \"y needs to be a two-dimensional array which column number \\\n equals the number of targets\"\n if (palinstance.means is None) or (palinstance.std is None) or (palinstance.beta is None):\n raise ValueError(\n \"Predicted means is None. Execute run_one_step() \\\n to obtain predicted means for each model.\"\n )\n\n num_targets = y.shape[1]\n fig, ax = plt.subplots( # pylint:disable=invalid-name\n num_targets, num_targets, figsize=figsize, tight_layout=True\n )\n\n for row in range(num_targets):\n for column in range(num_targets):\n if row == column:\n plot_histogram(y[:, row], palinstance, ax[row, column])\n else:\n plot_pareto_front_2d(\n y[:, row],\n y[:, column],\n palinstance.std[:, row] * np.sqrt(palinstance.beta),\n palinstance.std[:, column] * np.sqrt(palinstance.beta),\n palinstance,\n ax=ax[row, column],\n )\n\n ax[row, column].spines[\"top\"].set_color(\"none\")\n ax[row, column].spines[\"right\"].set_color(\"none\")\n\n if labels is None:\n labels = [f\"objective {i}\" for i in range(num_targets)]\n else:\n assert len(labels) == num_targets\n\n for index in range(num_targets):\n ax[index, 0].set_ylabel(labels[index])\n ax[num_targets - 1, index].set_xlabel(labels[index])\n\n ax[0, num_targets - 1].legend()\n\n return fig", "title": "" }, { "docid": "d857e8683acef5e1abe5608b6fd3a59d", "score": "0.5731772", "text": "def visualise_probabilities(topologies, info, probs):\n fig, (ax1, ax2, ax3) = plt.subplots(3, sharex=True)\n index = np.arange(len(topologies))\n bar_width = 0.35\n opacity = 0.8\n\n # Cast the random data into lists.\n random_small = list(probs[0][info['small_size']].values())\n random_medium = list(probs[0][info['medium_size']].values())\n random_large = list(probs[0][info['large_size']].values())\n\n # Calculate the standard deviation (used for error bars).\n random_small_std = list(probs[1][info['small_size']].values())\n random_medium_std = list(probs[1][info['medium_size']].values())\n random_large_std = list(probs[1][info['large_size']].values())\n\n # Cast the non-random data into lists.\n non_random_small = list(probs[2][info['small_size']].values())\n non_random_medium = list(probs[2][info['medium_size']].values())\n non_random_large = list(probs[2][info['large_size']].values())\n\n # Calculate the standard deviation (used for error bars).\n non_random_small_std = list(probs[3][info['small_size']].values())\n non_random_medium_std = list(probs[3][info['medium_size']].values())\n non_random_large_std = list(probs[3][info['large_size']].values())\n\n # Plot the first plot (random and non-random, small sized topology).\n rects1 = ax1.bar(index, random_small, bar_width,\n alpha=opacity,\n color='blue',\n label='Random VNF allocation',\n yerr=random_small_std)\n\n rects2 = ax1.bar(index + bar_width, non_random_small, bar_width,\n alpha=opacity,\n color='g',\n label='Non-random VNF allocation',\n yerr=non_random_small_std)\n\n # Plot the second plot (random and non-random, medium sized topology).\n rects1 = ax2.bar(index, random_medium, bar_width,\n alpha=opacity,\n color='blue',\n label='Random VNF allocation',\n yerr=random_medium_std)\n\n rects2 = ax2.bar(index + bar_width, non_random_medium, bar_width,\n alpha=opacity,\n color='g',\n label='Non-random VNF allocation',\n yerr=non_random_medium_std)\n\n # Plot the third plot (random and non-random, large sized topology).\n rects1 = ax3.bar(index, random_large, bar_width,\n alpha=opacity,\n color='blue',\n label='Random VNF allocation',\n yerr=random_large_std)\n\n rects2 = ax3.bar(index + bar_width, non_random_large, bar_width,\n alpha=opacity,\n color='g',\n label='Non-random VNF allocation',\n yerr=non_random_large_std)\n\n # Set the title data.\n ax1.set(title='Probability of VNF traversal by topology and network size')\n ax2.set(ylabel='Probability of traversing VNF')\n ax3.set(xlabel='Topology')\n\n ax1.set(ylim=(0, 1))\n ax2.set(ylim=(0, 1))\n ax3.set(ylim=(0, 1))\n plt.xticks(index + bar_width / 2, topologies)\n\n plt.tight_layout()\n plt.show()", "title": "" }, { "docid": "e6faa552a58968a1bfe2564fc7764beb", "score": "0.5703878", "text": "def plot_pdf(self):\n\n ax = base.plotvol3()\n for (x, y, t), weight in zip(self.x, self.weight):\n # ax.plot([x, x], [y, y], [0, weight], 'r')\n ax.plot([x, x], [y, y], [0, weight], 'skyblue', linewidth=3)\n ax.plot(x, y, weight, 'k.', markersize=6)\n\n plt.grid(True)\n plt.xlabel('X')\n plt.ylabel('Y')\n plt.xlim()\n ax.set_zlabel('particle weight')\n ax.view_init(29, 59)", "title": "" }, { "docid": "c41cf687cab4f691b58e12abd3d42490", "score": "0.56789356", "text": "def plot_marginals(metrics):\n # Generate basic plot of marginal distributions\n fig, axes = plt.subplots(2, 2, \n subplot_kw=dict(polar=False), \n figsize = (7,6))\n # Scale parameter\n axes[0,0].set_xlabel('Scale')\n axes[0,1].set_xlabel('Slope')\n axes[1,0].set_xlabel('Gamma')\n axes[1,1].set_xlabel('Lambda')\n # Loop through and plot marginals that exist\n counter = 0\n idx = np.array([[0,0], [0,1], [1,0], [1,1]])\n for keys in ['scale', 'slope', 'gamma', 'lambda']:\n axes[idx[counter,0],idx[counter,1]].set_ylabel('Probability')\n if metrics['Marginals'][keys] is not np.nan and metrics['Marginals'][keys].size > 1:\n axes[idx[counter,0],idx[counter,1]].plot(metrics['Marginals_X'][keys],\n metrics['Marginals'][keys], \n lw=3, \n color='#5998ff')\n axes[idx[counter,0],idx[counter,1]].fill_between(metrics['Marginals_X'][keys],\n metrics['Marginals'][keys], color='#5998ff', alpha = .4)\n elif metrics['Marginals'][keys].size == 1:\n axes[idx[counter,0],idx[counter,1]].text(0.5,0.5, \"None\",\n horizontalalignment='center', \n verticalalignment='center', \n transform=axes[idx[counter,0],idx[counter,1]].transAxes)\n # Update counter\n counter += 1\n plt.tight_layout()\n plt.show()", "title": "" }, { "docid": "f753ece3fb30b850fece0912d5d442ea", "score": "0.5674519", "text": "def plt_prob(ax,fwb):\n #setup useful ranges and common linspaces\n x0_space = np.linspace(150, 285 , 40)\n x1_space = np.linspace(11.5, 15.5 , 40)\n\n # get probability for x0,x1 ranges\n tmp_x0,tmp_x1 = np.meshgrid(x0_space,x1_space)\n z = np.zeros_like(tmp_x0)\n for i in range(tmp_x0.shape[0]):\n for j in range(tmp_x1.shape[1]):\n x = np.array([[tmp_x0[i,j],tmp_x1[i,j]]])\n z[i,j] = fwb(x)\n\n\n cmap = plt.get_cmap('Blues')\n new_cmap = truncate_colormap(cmap, 0.0, 0.5)\n pcm = ax.pcolormesh(tmp_x0, tmp_x1, z,\n norm=colors.Normalize(vmin=0, vmax=1),\n cmap=new_cmap, shading='nearest', alpha = 0.9)\n ax.figure.colorbar(pcm, ax=ax)", "title": "" }, { "docid": "d4832d685ca52702a40b255464b21167", "score": "0.56668925", "text": "def plot_dist(self):\n self.model.plot_dist(self.X, self.clusters, distargs=self.distargs)", "title": "" }, { "docid": "11fad85d1d80ff6a6012c5677be2cc31", "score": "0.5647222", "text": "def plot_priors(priors, min=-5, max=5):\n x = np.linspace(min, max, priors.shape[1])\n\n new_figure()\n plt.title(\"Gaussian Priors\")\n plt.xlabel(\"Input line\")\n plt.ylabel(\"Sampled value\")\n plt.plot(x, priors.T)", "title": "" }, { "docid": "26c50de3fc1a6de1941d51549c39a9fc", "score": "0.56409043", "text": "def pairplot(self):\n sns.set_style('white', {'font.family':'serif', 'font.serif':'Palatino'})\n colours = sns.color_palette(\"husl\")\n sns.pairplot(self.data[relevant], hue=self.data['Prediction'], palette=colours)\n plt.show()", "title": "" }, { "docid": "da482c5bb19ad02f01266fe2329263d1", "score": "0.5637851", "text": "def draw_dist_gap_profile_singel_plot(gap_list):\n for i, v in enumerate(gap_list):\n x_s = np.arange(len(v)) * 0.05\n plt.plot(x_s, v)\n\n plt.xlabel(\"Time (s)\")\n plt.ylabel(\"Distance Gap (m)\")\n plt.ylim([5, 35])\n fig = plt.gcf()\n fig.set_size_inches(11, 5)", "title": "" }, { "docid": "f83ae217ce2b07c8c06668c6d135f16b", "score": "0.5636278", "text": "def visualize_probability_convergence():\n fair_probs = [1.0 / 6] * 6\n\n # Run 500 experiments where you roll the dice ten times.\n counts = np.random.multinomial(10, fair_probs, size=500)\n\n # Summarize all of the counts in each column for each row.\n cum_counts = counts.astype(np.float32).cumsum(axis=0)\n\n # Compute the estimate probability for each dice being rolled through\n # all of the experiments. This will converge towards 16%\n estimates = cum_counts / cum_counts.sum(axis=1, keepdims=True)\n\n # Plot each estimated probability\n d2l.set_figsize((6, 4.5))\n for i in range(6):\n d2l.plt.plot(estimates[:, i].asnumpy(), label=f\"P(die={str(i+1)})\")\n\n # Add the true probability of you rolling any dice number\n d2l.plt.axhline(y=0.167, color=\"black\", linestyle=\"dashed\")\n\n # Set the x and y label for the current axes\n d2l.plt.gca().set_xlabel(\"Groups of experiments\")\n d2l.plt.gca().set_ylabel(\"Estimated probability\")\n\n # Create the legend and save the figure as an image.\n d2l.plt.legend()\n d2l.plt.savefig(\"probability_convergence.png\")", "title": "" }, { "docid": "621d69b166a7bb48e1267be447b4e159", "score": "0.5627442", "text": "def show_plot(mean, std, distribution):\n\n if distribution == 'normal':\n s = np.random.normal(mean, std, 1000)\n elif distribution == 'lognormal':\n s = np.random.lognormal(mean, std, 1000) \n else: # else run normal dist\n s = np.random.normal(mean, std, 1000) \n\n _count, bins, _ignored = plt.hist(s, 100, density=True)\n plt.plot(bins, 1/(std * np.sqrt(2 * np.pi)) *\n np.exp( - (bins - mean)**2 / (2 * std**2) ),\n linewidth=2, color='r')\n\n plt.show() \n plt.savefig('plots/{}_{:.2f}_{:.2f}.png'.format(distribution, mean, std))\n plt.close()", "title": "" }, { "docid": "9052f5b9c95ba06cc7bd469a8fb17bb5", "score": "0.56154925", "text": "def pairplot(self):\n sns.set_style('white', {'font.family':'serif', 'font.serif':'Palatino'})\n colours = sns.color_palette(\"husl\")\n print(self.data[relevant])\n # sns.pairplot(self.data[relevant], hue=self.data['Prediction'], palette=colours)\n # plt.show()", "title": "" }, { "docid": "80a86066494bae344afac1ce0070bc02", "score": "0.5609147", "text": "def make_plots(self):\n logger.info(\"Semilog plot of |1> state probability requires calibrated data.\")\n plt.figure(figsize=(2*6.4, 4.8))\n plt.subplot(121)\n plt.plot(self.xpts, self.ypts, \".\", markersize=15, label=\"Data\")\n plt.plot(self.xpts, self.model(self.xpts), \"-\", linewidth=3, label=\"Fit\")\n plt.xlabel(self.xlabel, fontsize=14)\n plt.ylabel(self.ylabel, fontsize=14)\n plt.annotate(self.annotation(), xy=(0.4, 0.10), xycoords='axes fraction', size=12)\n plt.subplot(122)\n plt.semilogy(self.xpts, -1/2*(self.ypts - self.fit_params[\"A0\"]), \".\", markersize=15, label=\"Data\")\n plt.semilogy(self.xpts, -1/2*(self.model(self.xpts) - self.fit_params[\"A0\"]), \"-\", linewidth=3, label=\"Fit\")\n plt.xlabel(self.xlabel, fontsize=14)\n plt.ylabel('|1> probability', fontsize=14)\n plt.suptitle(self.title, fontsize=14)", "title": "" }, { "docid": "2dedc1ba45ff1d527333003198019670", "score": "0.56072867", "text": "def plot_distribution(images, labels, class_id, CLASSES):\n fig = plt.figure(figsize=(21,7))\n rows, cols = 1, 3\n locs = np.where(labels == class_id)\n samples = locs[:][0]\n class_images = images[samples]\n hist_r, hist_g, hist_b = compute_hist(class_images)\n plt.title(\"Histogram - Mean Pixel Value: \" + CLASSES[class_id])\n plt.axis('off')\n\n fig.add_subplot(rows, cols, 1)\n hist, bins = hist_r[\"hist\"], hist_r[\"bins\"]\n width = 0.7 * (bins[1] - bins[0])\n center = (bins[:-1] + bins[1:]) / 2\n plt.bar(center, hist, align='center', width=width,color='r')\n plt.xlim((0,1))\n plt.ylim((0, 255))\n\n fig.add_subplot(rows, cols, 2)\n hist, bins = hist_g[\"hist\"], hist_g[\"bins\"]\n width = 0.7 * (bins[1] - bins[0])\n center = (bins[:-1] + bins[1:]) / 2\n plt.bar(center, hist, align='center', width=width,color='g')\n plt.xlim((0,1))\n plt.ylim((0,255))\n\n fig.add_subplot(rows, cols, 3)\n hist, bins = hist_b[\"hist\"], hist_b[\"bins\"]\n width = 0.7 * (bins[1] - bins[0])\n center = (bins[:-1] + bins[1:]) / 2\n plt.bar(center, hist, align='center', width=width,color='b')\n plt.xlim((0,1))\n plt.ylim((0, 255))", "title": "" }, { "docid": "0ff1e4b46b1c26b66c8d18032fd481b3", "score": "0.5603352", "text": "def plot_nlls(nlls, title, xlabel):\n fig, ax = plt.subplots()\n # Plot the two distributions side by side\n sns.distplot(nlls, ax=ax, kde=True)\n ax.set_title(title)\n ax.set_xlabel(xlabel)\n \n return fig, ax", "title": "" }, { "docid": "cc01412458a1a613d53ce9d21b40076c", "score": "0.56014466", "text": "def plot_probabilities_barplots(df, output_path):\n sns.set(style=\"whitegrid\")\n\n # Draw a nested barplot\n g = sns.factorplot(x='cluster', y='probability', hue='condition',\n size=6, kind='bar', palette='PRGn', data=df)\n g.despine(left=True)\n g.set_ylabels('Probability')\n plt.savefig(os.path.join(output_path, 'States probabilities cond.png'))\n # plt.show()", "title": "" }, { "docid": "88595da7d9a77c16a505bb97044c021f", "score": "0.56009275", "text": "def plot_reward(all_iterations, all_rewards):\n\tfig = plt.figure()\n\tplt.plot(all_iterations, all_rewards)\n\tfig.suptitle('Iterations vs rewards')\n\tplt.xlabel('No. Iterations')\n\tplt.ylabel('reward')\n\tplt.ioff()\n\tplt.show()", "title": "" }, { "docid": "7d4f649b5c695384537224507caec889", "score": "0.5593912", "text": "def plot(self, axes, x_min, x_max, y_max):\n\n if self.samples is None:\n raise ValueError(\"no samples yet, please call `sample` first\")\n if self.samples.ndim == 2 and self.samples.shape[1] > 1:\n raise ValueError(\n \"visualization for dimensions great than 1 not supported\")\n\n X = np.linspace(x_min, x_max, 1000)\n Yp = np.exp(np.array([self.propose_logpdf(x) for x in X]))\n Yt = np.exp(np.array([self.target_logpdf(x) for x in X]))\n\n # plot the histogram of samples\n axes.hist(\n self.samples,\n bins=100,\n color='#999999',\n label=\"samples\",\n normed=True,\n edgecolor='#999999')\n\n # plot the proposal distribution PDF\n axes.plot(\n X, Yp, 'r-',\n linewidth=2,\n label=\"proposal\")\n\n # plot the target distribution PDF\n axes.plot(\n X, Yt, 'b-',\n linewidth=2,\n label=\"target\")\n\n axes.legend(loc='upper right')\n axes.set_xlabel(\"x\")\n axes.set_ylabel(\"p(x)\")\n axes.set_xlim(x_min, x_max)\n axes.set_ylim(0, y_max)", "title": "" }, { "docid": "caaa992bfa4d6dbc41a8aa5fec207e1f", "score": "0.55938905", "text": "def main():\n [x_65, y_65] = compute_probability(DIMENSION, 0.65)\n [x_70, y_70] = compute_probability(DIMENSION, 0.70)\n [x_75, y_75] = compute_probability(DIMENSION, 0.75)\n [x_80, y_80] = compute_probability(DIMENSION, 0.80)\n [x_90, y_90] = compute_probability(DIMENSION, 0.90)\n\n f, axes = plt.subplots(1)\n axes.scatter(x_65, y_65, s=10, c='b', marker=\"o\", label='ALPHA = 0.65')\n axes.scatter(x_70, y_70, s=10, c='y', marker=\"o\", label='ALPHA = 0.70')\n axes.scatter(x_75, y_75, s=10, c='r', marker=\"o\", label='ALPHA = 0.75')\n axes.scatter(x_80, y_80, s=10, c='g', marker=\"o\", label='ALPHA = 0.80')\n axes.scatter(x_90, y_90, s=10, c='m', marker=\"o\", label='ALPHA = 0.90')\n plt.legend(loc='upper left', prop={'size':25}, bbox_to_anchor=(1, 1))\n axes.set_xlabel('number of created mazes')\n axes.set_ylabel('fraction of solvable mazes')\n axes.set_ylim(ymin=0)\n axes.set_xlim(left=0)\n plt.savefig('probabilities.png', bbox_inches='tight')\n plt.show()", "title": "" }, { "docid": "794c61ad0d45ac0f56b829fafbc22bf2", "score": "0.5591409", "text": "def plot(self, ylabel='Normalized Probability', xlabel='Amplitude',\n color='k', ls='-', bins=None, fbins=100, alpha=.1, \n method='compound', histtype='stepfilled', xlim=None):\n if bins is None: bins = self.bins\n\n foo, edges, bar = plt.hist(self.sample, bins=bins, density=True)\n x = [ val-(val-edges[i-1])/2. for i, val in enumerate(edges) ][1:]\n\n plt.plot(x, self.func(self.values, x, method=method), color=color,\n ls=ls, linewidth=2)\n plt.xlim(xlim)\n plt.ylabel(ylabel, size=17)\n plt.xlabel(xlabel, size=17)\n plt.yticks(size='17')\n plt.xticks(size='17')", "title": "" }, { "docid": "f74dfd8784be7f90bce108c4950f35b0", "score": "0.558083", "text": "def _scatter_plot(data, xname, yname, **kwargs):\n default_joint_kwargs = {\n \"height\": max(\n get_option(\"display.matplotlib.fig_width\"),\n get_option(\"display.matplotlib.fig_height\"),\n )\n }\n default_scatter_kwargs = {}\n default_dist_kwargs = {\"kde\": False}\n default_joint_kwargs.update(kwargs.get(\"joint_kwargs\", {}))\n default_scatter_kwargs.update(kwargs.get(\"scatter_kwargs\", {}))\n default_dist_kwargs.update(kwargs.get(\"dist_kwargs\", {}))\n\n g = sns.JointGrid(x=data[xname], y=data[yname], **default_joint_kwargs)\n g = g.plot_joint(sns.scatterplot, **default_scatter_kwargs)\n g = g.plot_marginals(sns.histplot, **default_dist_kwargs)\n return g", "title": "" }, { "docid": "c149b5fd634fc940bb85d5aaffaadfba", "score": "0.5579985", "text": "def plot_histogram(sequence, freqs, nb_simulation=1000, expected=False):\n fig, axes = plt.subplots(2, 2, figsize=(15, 15))\n p_emp = {}\n p_dint = {}\n p_nt = {}\n words = [\"ATCTGC\", \"ATATAT\", \"AAAAAA\", \"TTTAAA\"]\n positions = [(0, 0), (0, 1), (1, 0), (1, 1)]\n\n # Distribution stationnaire de la chaîne de Markov associée\n pi_k = stationary_distribution(\n freqs, transition_matrix(sequence), 0.00001, verbose=False)\n\n for word in words:\n p_emp[word] = p_empirique(len(sequence), word, freqs, nb_simulation)\n if expected is True:\n p_dint[word] = dinucleotides_proba(\n str_to_int(word), transition_matrix(sequence), pi_k)\n p_nt[word] = nucleotides_proba(str_to_int(word), freqs)\n\n for pos, word in zip(positions, p_emp.keys()):\n ks = np.arange(len(p_emp[word]))\n axes[pos].grid(True)\n axes[pos].set_title(\"Distribution des occurrences de \" + word)\n axes[pos].set_xlabel(\"Occurrences du mot\")\n axes[pos].set_ylabel(\"Probabilité empirique estimée\")\n axes[pos].bar(ks, p_emp[word])\n\n if expected is True:\n # Paramètre de la loi de Poisson\n mu_dint = p_dint[word] * (len(sequence) - len(word) + 1)\n mu_nt = p_nt[word] * (len(sequence) - len(word) + 1)\n axes[pos].scatter(\n ks, geq_poisson_probability(ks, mu_dint), zorder=2)\n axes[pos].scatter(ks, geq_poisson_probability(ks, mu_nt), zorder=3)\n axes[pos].legend(['Loi de Poisson (dinucléotides)',\n 'Loi de Poisson (nucléotides)', 'Distribution empirique'])\n\n extent = axes[pos].get_window_extent().transformed(\n fig.dpi_scale_trans.inverted())\n fig.savefig(\"plots/histogram_\" + word + \".png\",\n bbox_inches=extent.expanded(1.1, 1.2))", "title": "" }, { "docid": "2d12ed309984a8bc10dca7af15e387e1", "score": "0.55731857", "text": "def plot_random_classifier(**kwargs):\n from pylab import plot\n if 'color' not in kwargs:\n kwargs['color'] = 'black'\n if 'linestyle' not in kwargs:\n kwargs['linestyle'] = ':'\n plot(\n [0, 1],\n [0, 1],\n **kwargs\n )", "title": "" }, { "docid": "2343cb69e802826870b86af39404c2b3", "score": "0.55496347", "text": "def plot_permutation_distributions(exp):\n\n with open('./experiment_config.json', 'r') as f:\n config = json.load(f)\n\n sns.set(style=\"white\", font_scale=config['font_scale'],\n rc={\"lines.linewidth\": config['font_scale']})\n\n comparisons = [\"Open-Closed\", \"Open-Brain\", \"Brain-Closed\"]\n ps = []\n\n (fig, axs) = plt.subplots(2, 3, figsize=(20, 12))\n plt.subplots_adjust(hspace=0.4, wspace=0.2)\n\n for i, comp in enumerate(comparisons):\n f = '../data/stats/%s_experiment/%s_%s_permutation_info.npz'\n perm_info = np.load(f % (exp, comp, exp))\n\n # plot permutation distribution\n ax = axs[0, i]\n sns.distplot(perm_info['alpha_dist'], ax=ax)\n ax.axvline(perm_info['alpha_diff'], color=config['colors'][1])\n title = '%s Alpha Power \\n Uncorrected p = %.3f'\n ax.set_title(title % (comp, perm_info['alpha_p_value']))\n\n ax = axs[1, i]\n sns.distplot(perm_info['beta_dist'], ax=ax)\n ax.axvline(perm_info['beta_diff'], color=config['colors'][1])\n title = '%s Beta Power \\n Uncorrected p = %.3f'\n ax.set_title(title % (comp, perm_info['beta_p_value']))\n\n ps.append(perm_info['alpha_p_value'])\n ps.append(perm_info['beta_p_value'])\n\n plt.tight_layout()\n sns.despine()\n\n return fig", "title": "" }, { "docid": "374d96afc37bc4dc9618bf6321dddd0a", "score": "0.5546732", "text": "def plot_relations(x, y, x_label, y_label=\"Income\"):\n plt.figure()\n x, y = list(zip(*sorted(list(zip(x, y.flatten())), key=lambda x: x[0])))\n plt.scatter(x, y)\n plt.xlabel(x_label)\n plt.ylabel(y_label)\n plt.show()", "title": "" }, { "docid": "64e65251fbb08385f43662df7f3b66b9", "score": "0.5541747", "text": "def plot_counts(sequence, freqs):\n fig, axes = plt.subplots(2, 2, figsize=(15, 15))\n ks = [2, 4, 6, 8]\n positions = [(0, 0), (0, 1), (1, 0), (1, 1)]\n\n length = len(sequence)\n # Matrice de transition de la chaîne de Markov associée\n tmatrix = transition_matrix(sequence)\n # Distribution stationnaire de la chaîne de Markov associée\n pi_k = stationary_distribution(freqs, tmatrix, 0.00001, verbose=False)\n\n print(\"Méthode des moindres carrés des distances à l'observation\")\n print(\"=========================================================\")\n\n for pos, k in zip(positions, ks):\n # axes[pos].grid(True)\n axes[pos].set_title(\"Longueur des mots : k = \" + str(k))\n axes[pos].set_xlabel(\"Indice lexicographique du mot\")\n axes[pos].set_ylabel(\"Comptage attendu\")\n\n nt_counts = comptage_attendu(k, length, freqs)\n dint_counts = comptage_attendu_markov(k, length, tmatrix, pi_k)\n obs_counts = k_grams_occurrences(sequence, k)\n\n nt, dint = ut.encode_file(nt_counts, dint_counts)\n obs = list(obs_counts.values())\n xs = np.arange(len(nt_counts))\n\n mse_nt, std_nt = distance_counts(obs, nt)\n mse_dint, std_dint = distance_counts(obs, dint)\n\n print(\"\\nPour k =\", k, \":\")\n print(\"============\")\n print(\"Modèle\\t\\t|\\tSomme des carrés\\t|\\tEcart type\")\n print(\"------------------------------------------------------------------------\")\n print(\"Nucléotides\\t|\\t\", round(mse_nt, 4), \"\\t\\t|\\t\", round(std_nt, 4))\n print(\"Dinucléotides\\t|\\t\", round(mse_dint, 4),\n \"\\t\\t|\\t\", round(std_dint, 4))\n\n # Paramètre de la loi de Poisson\n axes[pos].scatter(xs, obs, zorder=1)\n axes[pos].scatter(xs, nt, zorder=2)\n axes[pos].scatter(xs, dint, zorder=3)\n axes[pos].legend(['Observations', 'Modèle de nucléotides',\n 'Modèle de dinucléotides'])", "title": "" }, { "docid": "8560d75637f24cc04df2512915f847dd", "score": "0.55405957", "text": "def plot(self, xmin=-1, xmax=5):\n x = torch.linspace(xmin, xmax, 1000)\n y = self(x)\n plt.plot(x.detach().numpy(), y.detach().numpy())\n def rounder(s): return round(s, 3)\n show_params = \"\\n\".join([\"T = \" + str(rounder(self.T.item())), \"ep1 = \" + str(rounder(self.ep1.item())), \"ep2 = \" + str(rounder(self.ep2.item()))])\n plt.legend([show_params], loc=\"best\")\n plt.show()", "title": "" }, { "docid": "58efe02c132d2f8a5d8d00b929b36531", "score": "0.5530489", "text": "def main():\n plot = []\n for i in range(500): # Change the range here to generate more/less examples\n msg = get_message()\n plot.append(msg.__sizeof__())\n\n import matplotlib.pyplot as plt\n plt.hist(plot, bins=50)\n plt.ylabel('distribution')\n plt.show()", "title": "" }, { "docid": "e82bf22d34b6fdcdc69cc216f287e7be", "score": "0.553043", "text": "def plot_demographics(data):\n sns.set()\n\n # Data prep\n data = data.copy()\n # Replaces the binary indicator with PD/Healthy for plot labeling\n data[\"class\"].replace({0: \"Healthy\", 1: \"PD\"}, inplace=True)\n data[\"gender\"].replace({0: \"Female\", 1: \"Male\"}, inplace=True)\n # Drops duplicate ids to get the true patient count\n data.drop_duplicates(subset=\"id\", inplace=True)\n class_id = data[[\"class\", \"id\", \"gender\"]]\n\n # plot the barplot\n sns.catplot(data=class_id, x=\"class\", kind=\"count\", hue=\"gender\",\n palette=\"Blues\")\n plt.xlabel(\"Diagnosis\")\n plt.title(\"PD Distribution in the Dataset\")\n plt.savefig(\"pd_demographics.png\", dpi=1000, bbox_inches=\"tight\")\n plt.clf()", "title": "" }, { "docid": "fcc9fa2c0c21bd992f4bd015e10eb392", "score": "0.5530075", "text": "def par_plot(self, it, corr, corr_std):\n plot_style.white()\n\n plt.clf()\n fig, ax = plt.subplots(1, 1)\n plt.scatter(self.dl_par, corr, color=pal[0],\n label=r'$C(\\Delta t = 0, \\Delta x = 0, \\Delta y = 0, \\Delta z)$')\n plt.fill_between(self.dl_par, corr-corr_std, corr+corr_std,\n alpha=0.3)\n plt.plot(self.dl_par, fit.osc_gauss(self.dl_par, self.par_fit_params[it,0],\n self.par_fit_params[it,1], 0), color=pal[2] ,\n label=r'$p_\\parallel + (1-p_\\parallel)\\exp[- (\\Delta z / l_{\\parallel})^2] '\n '\\cos(k_{\\parallel} \\Delta z) $')\n plt.plot(self.dl_par, np.exp(-(self.dl_par/self.par_fit_params[it,0])**2),\n 'k--', label='Gaussian Envelope')\n plt.legend()\n plt.xlabel(r'$\\Delta z$ (m)')\n plt.ylabel(r'$C(\\Delta z)$')\n plot_style.minor_grid(ax)\n plot_style.ticks_bottom_left(ax)\n plt.savefig(self.out_dir + '/parallel/corr_fns/par_fit_it_' +\n str(it) + '.pdf')\n plt.close(fig)", "title": "" }, { "docid": "b1e7961ed24a984e0202a028fa5be423", "score": "0.5526959", "text": "def plot_distributions(\n filename,\n observables=None,\n parameter_points=None,\n uncertainties=\"nuisance\",\n nuisance_parameters=None,\n draw_nuisance_toys=None,\n normalize=False,\n log=False,\n observable_labels=None,\n n_bins=50,\n line_labels=None,\n colors=None,\n linestyles=None,\n linewidths=1.5,\n toy_linewidths=0.5,\n alpha=0.15,\n toy_alpha=0.75,\n n_events=None,\n n_toys=100,\n n_cols=3,\n quantiles_for_range=(0.025, 0.975),\n sample_only_from_closest_benchmark=True,\n):\n\n # Load data\n sa = SampleAugmenter(filename, include_nuisance_parameters=True)\n if uncertainties == \"nuisance\":\n nuisance_morpher = NuisanceMorpher(\n sa.nuisance_parameters, list(sa.benchmarks.keys()), reference_benchmark=sa.reference_benchmark\n )\n\n # Default settings\n if parameter_points is None:\n parameter_points = []\n\n for key, is_nuisance in zip(sa.benchmarks, sa.benchmark_is_nuisance):\n if not is_nuisance:\n parameter_points.append(key)\n\n if line_labels is None:\n line_labels = parameter_points\n\n n_parameter_points = len(parameter_points)\n\n if colors is None:\n colors = [\"C\" + str(i) for i in range(10)] * (n_parameter_points // 10 + 1)\n elif not isinstance(colors, list):\n colors = [colors for _ in range(n_parameter_points)]\n\n if linestyles is None:\n linestyles = [\"solid\", \"dashed\", \"dotted\", \"dashdot\"] * (n_parameter_points // 4 + 1)\n elif not isinstance(linestyles, list):\n linestyles = [linestyles for _ in range(n_parameter_points)]\n\n if not isinstance(linewidths, list):\n linewidths = [linewidths for _ in range(n_parameter_points)]\n\n if toy_linewidths is None:\n toy_linewidths = linewidths\n if not isinstance(toy_linewidths, list):\n toy_linewidths = [toy_linewidths for _ in range(n_parameter_points)]\n\n # Observables\n observable_indices = []\n if observables is None:\n observable_indices = list(range(len(sa.observables)))\n else:\n all_observables = list(sa.observables.keys())\n for obs in observables:\n try:\n observable_indices.append(all_observables.index(str(obs)))\n except ValueError:\n logging.warning(\"Ignoring unknown observable %s\", obs)\n\n logger.debug(\"Observable indices: %s\", observable_indices)\n\n n_observables = len(observable_indices)\n\n if observable_labels is None:\n all_observables = list(sa.observables.keys())\n observable_labels = [all_observables[obs] for obs in observable_indices]\n\n # Parse thetas\n theta_values = [sa._get_theta_value(theta) for theta in parameter_points]\n theta_matrices = [sa._get_theta_benchmark_matrix(theta) for theta in parameter_points]\n logger.debug(\"Calculated %s theta matrices\", len(theta_matrices))\n\n # Get event data (observations and weights)\n all_x, all_weights_benchmarks = sa.weighted_events(generated_close_to=None)\n logger.debug(\"Loaded raw data with shapes %s, %s\", all_x.shape, all_weights_benchmarks.shape)\n\n indiv_x, indiv_weights_benchmarks = [], []\n if sample_only_from_closest_benchmark:\n for theta in theta_values:\n this_x, this_weights = sa.weighted_events(generated_close_to=theta)\n indiv_x.append(this_x)\n indiv_weights_benchmarks.append(this_weights)\n\n # Remove negative weights\n sane_event_filter = np.all(all_weights_benchmarks >= 0.0, axis=1)\n\n n_events_before = all_weights_benchmarks.shape[0]\n all_x = all_x[sane_event_filter]\n all_weights_benchmarks = all_weights_benchmarks[sane_event_filter]\n n_events_removed = n_events_before - all_weights_benchmarks.shape[0]\n\n if int(np.sum(sane_event_filter, dtype=np.int)) < len(sane_event_filter):\n logger.warning(\"Removed %s / %s events with negative weights\", n_events_removed, n_events_before)\n\n for i, (x, weights) in enumerate(zip(indiv_x, indiv_weights_benchmarks)):\n sane_event_filter = np.all(weights >= 0.0, axis=1)\n indiv_x[i] = x[sane_event_filter]\n indiv_weights_benchmarks[i] = weights[sane_event_filter]\n\n # Shuffle events\n all_x, all_weights_benchmarks = shuffle(all_x, all_weights_benchmarks)\n\n for i, (x, weights) in enumerate(zip(indiv_x, indiv_weights_benchmarks)):\n indiv_x[i], indiv_weights_benchmarks[i] = shuffle(x, weights)\n\n # Only analyze n_events\n if n_events is not None and n_events < all_x.shape[0]:\n logger.debug(\"Only analyzing first %s / %s events\", n_events, all_x.shape[0])\n\n all_x = all_x[:n_events]\n all_weights_benchmarks = all_weights_benchmarks[:n_events]\n\n for i, (x, weights) in enumerate(zip(indiv_x, indiv_weights_benchmarks)):\n indiv_x[i] = x[:n_events]\n indiv_weights_benchmarks[i] = weights[:n_events]\n\n if uncertainties != \"nuisance\":\n n_toys = 0\n\n n_nuisance_toys_drawn = 0\n if draw_nuisance_toys is not None:\n n_nuisance_toys_drawn = draw_nuisance_toys\n\n # Nuisance parameters\n nuisance_toy_factors = []\n\n if uncertainties == \"nuisance\":\n n_nuisance_params = sa.n_nuisance_parameters\n\n if not n_nuisance_params > 0:\n raise RuntimeError(\"Cannot draw systematic uncertainties -- no nuisance parameters found!\")\n\n logger.debug(\"Drawing nuisance toys\")\n\n nuisance_toys = np.random.normal(loc=0.0, scale=1.0, size=n_nuisance_params * n_toys)\n nuisance_toys = nuisance_toys.reshape(n_toys, n_nuisance_params)\n\n # Restrict nuisance parameters\n if nuisance_parameters is not None:\n for i in range(n_nuisance_params):\n if i not in nuisance_parameters:\n nuisance_toys[:, i] = 0.0\n\n logger.debug(\"Drew %s toy values for nuisance parameters\", n_toys * n_nuisance_params)\n\n nuisance_toy_factors = np.array(\n [\n nuisance_morpher.calculate_nuisance_factors(nuisance_toy, all_weights_benchmarks)\n for nuisance_toy in nuisance_toys\n ]\n ) # Shape (n_toys, n_events)\n\n nuisance_toy_factors = sanitize_array(nuisance_toy_factors, min_value=1.0e-2, max_value=100.0)\n # Shape (n_toys, n_events)\n\n # Preparing plot\n n_rows = (n_observables + n_cols - 1) // n_cols\n n_events_for_range = 10000 if n_events is None else min(10000, n_events)\n\n fig = plt.figure(figsize=(4.0 * n_cols, 4.0 * n_rows))\n\n for i_panel, (i_obs, xlabel) in enumerate(zip(observable_indices, observable_labels)):\n logger.debug(\"Plotting panel %s: observable %s, label %s\", i_panel, i_obs, xlabel)\n\n # Figure out x range\n xmins, xmaxs = [], []\n for theta_matrix in theta_matrices:\n x_small = all_x[:n_events_for_range]\n weights_small = mdot(theta_matrix, all_weights_benchmarks[:n_events_for_range])\n\n xmin = weighted_quantile(x_small[:, i_obs], quantiles_for_range[0], weights_small)\n xmax = weighted_quantile(x_small[:, i_obs], quantiles_for_range[1], weights_small)\n xwidth = xmax - xmin\n xmin -= xwidth * 0.1\n xmax += xwidth * 0.1\n\n xmin = max(xmin, np.min(all_x[:, i_obs]))\n xmax = min(xmax, np.max(all_x[:, i_obs]))\n\n xmins.append(xmin)\n xmaxs.append(xmax)\n\n xmin = min(xmins)\n xmax = max(xmaxs)\n x_range = (xmin, xmax)\n\n logger.debug(\"Ranges for observable %s: min = %s, max = %s\", xlabel, xmins, xmaxs)\n\n # Subfigure\n ax = plt.subplot(n_rows, n_cols, i_panel + 1)\n\n # Calculate histograms\n bin_edges = None\n histos = []\n histos_up = []\n histos_down = []\n histos_toys = []\n\n for i_theta, theta_matrix in enumerate(theta_matrices):\n theta_weights = mdot(theta_matrix, all_weights_benchmarks) # Shape (n_events,)\n\n if sample_only_from_closest_benchmark:\n indiv_theta_weights = mdot(theta_matrix, indiv_weights_benchmarks[i_theta]) # Shape (n_events,)\n histo, bin_edges = np.histogram(\n indiv_x[i_theta][:, i_obs],\n bins=n_bins,\n range=x_range,\n weights=indiv_theta_weights,\n density=normalize,\n )\n else:\n histo, bin_edges = np.histogram(\n all_x[:, i_obs], bins=n_bins, range=x_range, weights=theta_weights, density=normalize\n )\n histos.append(histo)\n\n if uncertainties == \"nuisance\":\n histos_toys_this_theta = []\n for i_toy, nuisance_toy_factors_this_toy in enumerate(nuisance_toy_factors):\n toy_histo, _ = np.histogram(\n all_x[:, i_obs],\n bins=n_bins,\n range=x_range,\n weights=theta_weights * nuisance_toy_factors_this_toy,\n density=normalize,\n )\n histos_toys_this_theta.append(toy_histo)\n\n histos_up.append(np.percentile(histos_toys_this_theta, 84.0, axis=0))\n histos_down.append(np.percentile(histos_toys_this_theta, 16.0, axis=0))\n histos_toys.append(histos_toys_this_theta[:n_nuisance_toys_drawn])\n\n # Draw error bands\n if uncertainties == \"nuisance\":\n for histo_up, histo_down, lw, color, label, ls in zip(\n histos_up, histos_down, linewidths, colors, line_labels, linestyles\n ):\n bin_edges_ = np.repeat(bin_edges, 2)[1:-1]\n histo_down_ = np.repeat(histo_down, 2)\n histo_up_ = np.repeat(histo_up, 2)\n\n plt.fill_between(bin_edges_, histo_down_, histo_up_, facecolor=color, edgecolor=\"none\", alpha=alpha)\n\n # Draw some toys\n for histo_toys, lw, color, ls in zip(histos_toys, toy_linewidths, colors, linestyles):\n for k in range(n_nuisance_toys_drawn):\n bin_edges_ = np.repeat(bin_edges, 2)[1:-1]\n histo_ = np.repeat(histo_toys[k], 2)\n\n plt.plot(bin_edges_, histo_, color=color, alpha=toy_alpha, lw=lw, ls=ls)\n\n # Draw central lines\n for histo, lw, color, label, ls in zip(histos, linewidths, colors, line_labels, linestyles):\n bin_edges_ = np.repeat(bin_edges, 2)[1:-1]\n histo_ = np.repeat(histo, 2)\n\n plt.plot(bin_edges_, histo_, color=color, lw=lw, ls=ls, label=label, alpha=1.0)\n\n plt.legend()\n\n plt.xlabel(xlabel)\n if normalize:\n plt.ylabel(\"Normalized distribution\")\n else:\n plt.ylabel(r\"$\\frac{d\\sigma}{dx}$ [pb / bin]\")\n\n plt.xlim(x_range[0], x_range[1])\n if log:\n ax.set_yscale(\"log\", nonposy=\"clip\")\n else:\n plt.ylim(0.0, None)\n\n plt.tight_layout()\n\n return fig", "title": "" }, { "docid": "ca31dae3bd85a8b1422c39a67f89e5b2", "score": "0.5526664", "text": "def plot_in_degree_distribution(g):\n dg = degree_pairs(in_degree_distribution(g))\n plt.clf()\n plt.xscale(\"log\")\n plt.yscale(\"log\")\n plt.scatter(*zip(*dg))", "title": "" }, { "docid": "ca31dae3bd85a8b1422c39a67f89e5b2", "score": "0.5526664", "text": "def plot_in_degree_distribution(g):\n dg = degree_pairs(in_degree_distribution(g))\n plt.clf()\n plt.xscale(\"log\")\n plt.yscale(\"log\")\n plt.scatter(*zip(*dg))", "title": "" }, { "docid": "6e32c45170dc776025e9198acef50fd1", "score": "0.5508181", "text": "def create_distribution_plots(nA, nB, pA, pB, plot_name):\n # Calculate ranges to determine binomial distribution values for\n a_calc_vals = []\n for i in range(int(pA*nA) - int(0.1*pA*nA), int(pA*nA) + int(0.1*pA*nA), 1):\n a_calc_vals.append(i)\n \n b_calc_vals = []\n for i in range(int(pB*nB) - int(0.1*pB*nB), int(pB*nB) + int(0.1*pB*nB), 1):\n b_calc_vals.append(i)\n \n # Determine probabilities based on binomial pmf\n a_dist = [scs.binom.pmf(r, nA, pA) for r in a_calc_vals]\n b_dist = [scs.binom.pmf(r, nB, pB) for r in b_calc_vals]\n \n # Convert frequency of occurence values into conversion percentages\n a_plot_vals = []\n for val in a_calc_vals:\n a_plot_vals.append(100*val/nA)\n \n b_plot_vals = []\n for val in b_calc_vals:\n b_plot_vals.append(100*val/nB)\n \n # Generate plots for data\n plt.bar(a_plot_vals, a_dist, width = 0.1, color = \"red\")\n plt.bar(b_plot_vals, b_dist, width = 0.1, color = \"blue\")\n \n # Create legend\n legend_colors = {\"Control Group\": \"red\", \"Test Group\": \"blue\"}\n legend_labels = list(legend_colors.keys())\n legend_handles = [plt.Rectangle((0,0),1,1, color = legend_colors[label]) for label in legend_labels]\n plt.legend(legend_handles, legend_labels)\n \n # Add axis labels\n plt.xlabel('Conversion Rate')\n plt.ylabel('Probability')\n \n plt.savefig(plot_name)\n \n return", "title": "" }, { "docid": "02fe0f2aa4693191f8d02c55cf197102", "score": "0.5507328", "text": "def plot_counts():\n df = load_data()\n counts = np.zeros(6)\n for i in range(len(counts)):\n counts[i] = len(pd.unique(df[df.columns[i]].values.ravel())) - 1\n taxo = list(df.columns.values)[:6]\n df_count = pd.DataFrame(counts, taxo)\n df_count.columns = ['UniqueCounts']\n pp = PdfPages('counts_plot.pdf')\n plt.figure()\n plt.clf()\n sns.barplot(x=df_count.index, y=df_count['UniqueCounts'])\n plt.title('Unique groups in our dataset at different taxonomic ranks')\n plt.xlabel('Taxonomic ranks')\n plt.ylabel('Counts')\n pp.savefig()\n pp.close()", "title": "" }, { "docid": "cf1289877be4ed039e0e529c551ff5f7", "score": "0.54955363", "text": "def plot(self):\n data = np.array(self.actors_in_graph)\n x, y = data[:, 0], data[:, 1]\n plt.title(\"actors in system\")\n plt.xlabel(\"hours\")\n plt.ylabel(\"number of actors\")\n plt.plot(x, y)\n plt.show()", "title": "" }, { "docid": "b089db9bf11096f9d618de3533f73464", "score": "0.5489375", "text": "def visualize_distributions(self,filename):\n x = np.linspace(0,1)\n plt.clf()\n plt.plot(x, self.rv_score_given_true.pdf(x),label='given true %s'%str(self.rv_score_given_true.args))\n plt.plot(x, self.rv_score_given_false.pdf(x),label='given false %s'%str(self.rv_score_given_false.args))\n plt.legend()\n plt.title('Detector for class %s, expected time is %.2f s'%(self.cls,\n self.config['avg_time_per_image']))\n plt.savefig(filename)", "title": "" }, { "docid": "02515a4ca89ebb29301127557150f9bc", "score": "0.5476246", "text": "def plot_predictions_histograms(self, dataloader):\n probabilities = self.collect_probabilities(dataloader)\n n_bins = 20\n\n example_score = list(probabilities.values())[0][0]\n n_classes = example_score.shape[1] # we assume all models produce the same number of classes\n n_signals = len(self.signals_list) +1 #we include the global model n the signals\n n_models = len(probabilities)\n plt.figure()\n for i_signal, signal in enumerate(self.signals_list+[\"Global\\nmodel\"]):\n probas, Y = probabilities[signal]\n for i_class in range(n_classes):\n plt.subplot(n_classes, n_models+1, i_class*(n_models+1) +i_signal +1 )\n plot_bar_per_class(probas[:,i_class], Y, color_per_class, n_bins)\n plt.yticks([],[])\n plt.xticks(ticks=np.linspace(0,1,6), labels=['']*6, fontsize=6)\n if i_class == 0: plt.title(signal, fontsize=8)\n if i_signal == 0: plt.ylabel(classes_names[i_class], fontsize=8)\n if i_class == n_classes-1: plt.xticks(ticks=np.linspace(0,1,6),\n labels=[f'{x:.1f}' for x in np.linspace(0,1,6)])\n\n # add the legend in the last column\n plt.subplot(n_classes, n_models+1, 0*(n_models+1) +n_signals +1 )\n labels_legend = [\"Actual class\"] + classes_names # add one label for legend title\n colors_legend = [[1,1,1,0]] + [color+[1] for color in color_per_class]# add an alphachannel for transparency\n bars_legend = [matplotlib.patches.Patch(facecolor=color, edgecolor=color) for color in colors_legend]\n plt.legend(bars_legend, labels_legend, fontsize=6)\n plt.xticks([],[]); plt.yticks([],[]) # erase ticks\n plt.axis('off') # erase axis", "title": "" }, { "docid": "213863afc0113505258b2b1c7561f9b2", "score": "0.54727083", "text": "def make_prob_hists(arr_energy_correct, modelname):\n def configure_hstack_plot(plot_title, savepath):\n \"\"\"\n Configure a mpl plot with GridLines, Logscale etc.\n :param str plot_title: Title that should be used for the plot.\n :param str savepath: path that should be used for saving the plot.\n \"\"\"\n axes.legend(loc='upper center')\n plt.grid(True, zorder=0)\n #plt.yscale('log')\n\n x_ticks_major = np.arange(0, 1.1, 0.1)\n plt.xticks(x_ticks_major)\n plt.minorticks_on()\n\n plt.xlabel('Probability')\n plt.ylabel('Normed Quantity')\n title = plt.title(plot_title)\n title.set_position([.5, 1.04])\n\n plt.savefig(savepath)\n\n fig, axes = plt.subplots()\n particle_types_dict = {'muon-CC': (14, 1), 'a_muon-CC': (-14, 1), 'elec-CC': (12, 1), 'a_elec-CC': (-12, 1)}\n\n # make energy cut, 3-40GeV\n arr_energy_correct_ecut = arr_energy_correct[arr_energy_correct[:, 0] <= 40]\n\n make_prob_hist_class(arr_energy_correct_ecut, axes, particle_types_dict, 'muon-CC', 0, plot_range=(0,1), color='b', linestyle='-')\n make_prob_hist_class(arr_energy_correct_ecut, axes, particle_types_dict, 'a_muon-CC', 0, plot_range=(0, 1), color='b', linestyle='--')\n make_prob_hist_class(arr_energy_correct_ecut, axes, particle_types_dict, 'elec-CC', 0, plot_range=(0,1), color='r', linestyle='-')\n make_prob_hist_class(arr_energy_correct_ecut, axes, particle_types_dict, 'a_elec-CC', 0, plot_range=(0, 1), color='r', linestyle='--')\n\n configure_hstack_plot(plot_title='Probability to be classified as elec-CC (shower)', savepath='results/plots/PT_hist1D_prob_shower_' + modelname + '.pdf')\n plt.cla()\n\n make_prob_hist_class(arr_energy_correct_ecut, axes, particle_types_dict, 'muon-CC', 1, plot_range=(0,1), color='b', linestyle='-')\n make_prob_hist_class(arr_energy_correct_ecut, axes, particle_types_dict, 'a_muon-CC', 1, plot_range=(0, 1), color='b', linestyle='--')\n make_prob_hist_class(arr_energy_correct_ecut, axes, particle_types_dict, 'elec-CC', 1, plot_range=(0,1), color='r', linestyle='-')\n make_prob_hist_class(arr_energy_correct_ecut, axes, particle_types_dict, 'a_elec-CC', 1, plot_range=(0, 1), color='r', linestyle='--')\n\n configure_hstack_plot(plot_title='Probability to be classified as muon-CC (track)', savepath='results/plots/PT_hist1D_prob_track_' + modelname + '.pdf')\n plt.cla()", "title": "" }, { "docid": "419e0666e70378e404e8e9cc8dbcc2c5", "score": "0.5460578", "text": "def plot_topic_distribution(document_topic_mixture):\n topic_count = len(document_topic_mixture[0])\n topic_count_distribution = [0]*topic_count\n for document in document_topic_mixture:\n max_topic = document.index(max(document))\n topic_count_distribution[max_topic] += 1\n\n plt.figure(figsize=(16, 7), dpi=160)\n plt.bar(x=range(1, topic_count+1), height=topic_count_distribution)\n plt.gca().set(ylabel='Number of Documents', xlabel='Topic')\n if topic_count > 20:\n plt.xticks(np.linspace(0, topic_count, 11))\n else:\n plt.xticks(range(1, topic_count+1))\n plt.title(\"Number of documents per topic for %d topics\"%topic_count, fontdict=dict(size=22))\n plt.show()", "title": "" }, { "docid": "beb61b910dbd6e6adaf224c1d67ef0e1", "score": "0.5456101", "text": "def _plotpdf(self, xmin: float, xmax: float, ymin: float, ymax: float, density: int, alpha: np.ndarray, mu: np.ndarray, Sigma: np.ndarray) -> None:\r\n x = np.linspace(xmin, xmax, 10*density+1)\r\n y = np.linspace(ymin, ymax, 10*density+1)\r\n X, Y = np.meshgrid(x, y) \r\n p = np.zeros((len(x), len(y)))\r\n for i in range(len(x)):\r\n for j in range(len(y)):\r\n p[i,j] = self._pGMM(np.vstack((x[i],y[j])), alpha, mu, Sigma)\r\n plt.figure()\r\n ax = plt.axes(projection='3d')\r\n ax.plot_surface(X, Y, p.T, cmap='viridis', edgecolor='none')\r\n ax.set_xlabel(\"x\")\r\n ax.set_ylabel(\"y\")\r\n ax.set_zlabel(\"pdf\")", "title": "" }, { "docid": "28d1c3fa893b10ebe54243e6da6c9f71", "score": "0.5455251", "text": "def generate_plot():\n # Size of the coordinates\n limit = 3\n\n # Plotting fundamental domain\n fundamental_domain = (\n (0.02, 0.02), (0.02, limit), (limit, limit),\n )\n\n # Probe points\n probes = ({\"label\": \"A\", \"x\": (1, 2), \"color\": \"black\"},)\n\n # Action over probes\n actions = (\n (\n lambda x: (x[0], x[1]),\n lambda x: (x[1], x[0])),\n )\n\n # Identification lines\n id_lines = ({\"begin\": [0.02, 0.02], \"end\": [limit, limit], \"color\": \"blue\"},)\n\n # Execute only if run as a script\n plot_dyad_orbichord(\n x_lim=[0, limit],\n y_lim=[0, limit],\n x_label=\"$x_1$\",\n y_label=\"$x_2$\",\n fundamental_domain=fundamental_domain,\n probes=probes,\n actions=actions,\n id_lines=id_lines\n )", "title": "" }, { "docid": "85c8e2ca566b8a67c1fa839b9b3e0ebd", "score": "0.5454153", "text": "def view_distributions(df,ncols=5):\n # Preparation de la grille d'affichage\n nrows = np.ceil(df.shape[1]/ncols)\n fig = plt.figure(figsize=(15,5*nrows))\n\n for count,col in enumerate(df.columns,1):\n # Distribution quantitative\n if df[col].dtype == object:\n plt.subplot(nrows,ncols,count)\n df[col].fillna('N/A').value_counts().plot(kind=\"bar\",title=col)\n elif df[col].dtype in [np.float64,np.int64]:\n plt.subplot(nrows,ncols,count)\n df[col].plot(kind=\"hist\",title=col)\n plt.show()", "title": "" }, { "docid": "08b6c26c382ddfc8dfc638c4e25413ea", "score": "0.5447455", "text": "def plot_embedding(X, y, d, title=None):\n x_min, x_max = np.min(X, 0), np.max(X, 0)\n X = (X - x_min) / (x_max - x_min)\n\n # Plot colors numbers\n plt.figure(figsize=(10, 10))\n ax = plt.subplot(111)\n for i in range(X.shape[0]):\n # plot colored number\n plt.text(X[i, 0], X[i, 1], str(y[i]),\n color=plt.cm.bwr(d[i] / 1.),\n fontdict={'weight': 'bold', 'size': 9})\n\n plt.xticks([]), plt.yticks([])\n if title is not None:\n plt.title(title)", "title": "" }, { "docid": "9ce65338c33aff0480b73111ca3691e7", "score": "0.54461133", "text": "def plot_bases(bases):\n out_file = \"base_distribution.pdf\"\n plot_data = {\"base\": [], \"position\" : [], \"count\" : [], \"experiment\" : []}\n for exp_name, pos_info in bases.iteritems():\n for pos, base_info in pos_info.iteritems():\n for base, count in base_info.iteritems():\n plot_data['base'].append(base)\n plot_data['position'].append(pos)\n plot_data['experiment'].append(exp_name)\n plot_data['count'].append(count)\n for name, convert_to in [('base', robjects.StrVector),\n ('count', robjects.IntVector),\n ('position', robjects.IntVector),\n ('experiment', robjects.StrVector)]:\n plot_data[name] = convert_to(plot_data[name])\n robjects.r.assign('exp.data', robjects.r['data.frame'](**plot_data))\n robjects.r.assign('save.file', out_file)\n robjects.r('''\n library(ggplot2)\n print(head(exp.data))\n p <- ggplot(exp.data, aes(x=position, y=count, fill=base))\n p <- p + geom_bar(position=\"fill\", stat=\"identity\")\n p <- p + facet_wrap(~experiment)\n ggsave(save.file, p, width=11, height=8)\n ''')", "title": "" }, { "docid": "b96d17971bcf54c44aafd7f72a9d46e8", "score": "0.54434603", "text": "def plot_binomial_model(trace, alpha, beta, ax=None):\n\n if not ax:\n fig = plt.figure(figsize=(10,8))\n ax = fig.add_subplot(111)\n ax.hist(trace['p'], 20, histtype='step', lw=2.0, density = True, label='post'); \n x = np.linspace(0, 1, 100)\n ax.plot(x, stats.beta.pdf(x, alpha, beta), label='prior');\n ax.legend(loc='best');\n return(ax)", "title": "" }, { "docid": "398dc1c5e5c79ec639166174a934bed4", "score": "0.54382986", "text": "def plot_probs(probs, labels, image):\n fig, (ax1, ax2) = plt.subplots(2,1, figsize=(5, 9))\n # image on top\n ax1.set_title(labels[0])\n ip.imshow(image, ax=ax1, title=labels[0])\n # plot on bottom\n ax2.barh(y=labels, width=probs)\n plt.yticks(rotation = 25)\n fig.tight_layout(pad=2)\n plt.show()", "title": "" }, { "docid": "2355e77ed873535ea82424c76c6c4e61", "score": "0.54360753", "text": "def histogram_plot(constraints_df, prediction, saving_path, header=None):\n\n def _distance_calc(x):\n if (x[\"pred\"] >= x['min_value']) & (x[\"pred\"] <= x['max_value']):\n return min(abs(x[\"pred\"] - x['min_value']), abs(x['max_value'] - x[\"pred\"]))\n elif x[\"pred\"] < x['min_value']:\n return -abs(x['min_value'] - x[\"pred\"])\n elif x[\"pred\"] > x['max_value']:\n return -abs(x['pred'] - x[\"max_value\"])\n else:\n print \"some error in the _assign_value function! Check it\"\n return None\n\n constraints_df_with_pred = constraints_df.copy()\n constraints_df_with_pred[\"pred\"] = prediction\n # turning off the interactive mode, so plots will not be displayed (they are all saved in a directory)\n # Also cleaning the plt area\n plt.clf()\n plt.ioff()\n # Show the joint distribution using kernel density estimation\n is_constrained = constraints_df_with_pred.apply(lambda x: False if (x['min_value'] == float(\"-inf\")\n and x['max_value'] == float(\"inf\")) else True, axis=1)\n constraints_df_subset = constraints_df_with_pred.loc[is_constrained]\n total = constraints_df_subset.shape[0]\n distance_from_constraint = constraints_df_subset.apply(_distance_calc, axis=1)\n constraints_satisfied = sum([1 if i > 0 else 0 for i in distance_from_constraint]) * 1.0 / total\n constraints_not_satisfied = 1 - constraints_satisfied\n hist_plot = sns.distplot(distance_from_constraint, color='black')\n for i, rectangle in enumerate(hist_plot.patches):\n cur_x = rectangle.get_x() + rectangle.get_width() * (1 / 2)\n height = rectangle.get_height()\n width = rectangle.get_width()\n # case we are left to the zero value bucket\n if (cur_x + width < 0) and rectangle.get_height() > 0:\n hist_plot.patches[i].set_color('r')\n # case we are right to the zero value bucket\n elif cur_x > 0 and rectangle.get_height() > 0:\n hist_plot.patches[i].set_color('b')\n # case we are on the border of the zero value bucket\n elif rectangle.get_height() > 0:\n hist_plot.patches[i].set_color('violet')\n\n # adding a text with the proportions of constraints satisfied and those which weren't\n textstr = 'Constraints Satisfied = %.1f%%\\nConstraints Not Satisfied=%.1f%%' % \\\n (constraints_satisfied * 100, constraints_not_satisfied * 100)\n # these are matplotlib.patch.Patch properties\n props = dict(boxstyle='round', facecolor='wheat', alpha=0.5)\n # place a text box in upper left in axes coords\n hist_plot.text(0.03, 0.97, textstr, transform=hist_plot.transAxes, fontsize=9, verticalalignment='top',\n bbox=props)\n plt.ylabel('Density', size=16)\n plt.xlabel('Distance', size=16)\n plt.savefig(saving_path)", "title": "" }, { "docid": "f6d523435e994e841007cb5ed9ada101", "score": "0.5435545", "text": "def distplot(d1, d2, d1_label=None, d2_label=None, d1_popstat=None, d2_popstat=None,\n bins=None, kde=True, title='Histogram', figsize=(8, 4.5),\n c='k', context='notebook', savefig=False):\n with sns.plotting_context(context):\n fig, ax = plt.subplots(figsize=figsize)\n sns.distplot(d1, bins=bins, kde=kde, label=d1_label, hist=False, kde_kws={\"shade\": True}, color='b', ax=ax)\n sns.distplot(d2, bins=bins, kde=kde, label=d2_label, hist=False, kde_kws={\"shade\": True}, color='r', ax=ax)\n if d1_popstat or d2_popstat:\n ymax = ax.get_ylim()[1]\n d1_mu = d1_popstat[0]\n d1_ci = [d1_mu + d1_popstat[1], d1_mu - d1_popstat[1]]\n d2_mu = d2_popstat[0]\n d2_ci = [d2_mu + d2_popstat[1], d2_mu - d2_popstat[1]]\n ax.vlines(d1_mu, 0, ymax, colors='b')\n ax.vlines(d1_ci, 0, ymax, linestyles='dashed', colors='b')\n ax.vlines(d2_mu, 0, ymax, colors='r')\n ax.vlines(d2_ci, 0, ymax, linestyles='dashed', colors='r')\n ax.set_ylim(0,ymax)\n ax = ax_params(ax, title, c)\n ax.set_title(title)\n plt.legend()\n if savefig:\n fig.savefig('figs/{}-distplot.png'.format(title), transparent=True, dpi=200, bbox_inches='tight')\n plt.show()\n return fig, ax", "title": "" }, { "docid": "a0d0fe32ebe7ca0bef191b4724c038f8", "score": "0.5434592", "text": "def plot(self, num_feat=5):\n # Create the figure and axes\n fig, ax = plt.subplots(nrows=3, ncols=2, figsize=(15, 15))\n # Flatten axes a 1D vector\n ax = ax.flatten()\n\n # Loop through the num of targets\n for i in range(num_feat):\n # Get the names of the most frequent values\n values = self.df.iloc[:, i+2].value_counts().index.astype(str)\n # Get the most frequent values\n counts = self.df.iloc[:, i+2].value_counts(normalize=True)\n # Get the column name for labels and title\n col_name = self.df.iloc[:, i+2].name\n # Plot th bar chart\n ax[i].bar(values, counts*100)\n ax[i].set_xlabel(col_name)\n ax[i].set_ylabel('% of questions')\n sns.despine()\n ax[i].set_title(f\"{col_name} column distribution\")\n\n # Title of the overall figure\n fig.suptitle(\"Query columns distributions\", size = 20)\n fig.tight_layout()\n\n # Delete the last subplot\n fig.delaxes(ax[-1])", "title": "" }, { "docid": "43ed896bf0d87befa4891f12e994cce9", "score": "0.5430594", "text": "def plot_damping(self):\n fig = plt.figure(figsize=(6,4))\n plt.semilogx(self.reference_data['strain(%)'],\n self.reference_data['damping(%)'],\n 'bo-',\n label='Reference')\n plt.ylabel('$G/G_{max}$')\n plt.xlabel('Shear Strain $\\gamma$ (%)')\n plt.legend()\n plt.show()\n return", "title": "" } ]
8aeb8a3700f1ce37cf4f91212577812e
setter for the maximum attribute
[ { "docid": "f4d8de4754d871aa86f66900eb55dae8", "score": "0.8005382", "text": "def maximum(self, maximum):\n self._maximum = maximum", "title": "" } ]
[ { "docid": "4a092107baaf3de27d2f66d9ed3abd1d", "score": "0.8707602", "text": "def setMaximum(self, maxValue):\n self.attrs[\"Maximum\"] = maxValue", "title": "" }, { "docid": "baa8ac2a2dcea71742c037e43c142c63", "score": "0.84917045", "text": "def set_max(self, max):\n if max >= 0:\n self.max = max", "title": "" }, { "docid": "5e4fda4dbb3012e4c21054aff705e97d", "score": "0.83963513", "text": "def set_max(self, new_max):\r\n if self.max_ < new_max:\r\n self.max_ = new_max", "title": "" }, { "docid": "0baa0f5cdf3e106c3fb7992ea22ae766", "score": "0.8193914", "text": "def set_max(self, _max):\n assert _max >= 1, \"Max is less than zero (you cannot expect < 1 step)\"\n self.max = _max", "title": "" }, { "docid": "cc547ae13fb26e7348f3610902baa00a", "score": "0.81088585", "text": "def max(self):\n self._sync_properties('MAX')", "title": "" }, { "docid": "a0e52ad4f3a7204f7f72d827204d7746", "score": "0.8057498", "text": "def max(self, max):\n\n self._max = max", "title": "" }, { "docid": "1482bca896df9cdbddd0e031a39e2d91", "score": "0.7908761", "text": "def max(self):\n pass", "title": "" }, { "docid": "fec1adeb60c197e8728305783def9eac", "score": "0.7817282", "text": "def setMaximum(self, value):\n super().setMaximum(value * self.PRECISION)", "title": "" }, { "docid": "33352a67bf65253ab3d7bcc4d099fe88", "score": "0.7775449", "text": "def maximum(self, maximum):\n\n self._maximum = maximum", "title": "" }, { "docid": "b286a4b0a772fcaaeee6f1e4e5147d2d", "score": "0.7676538", "text": "def setMaxValue(self, maxValue):\r\n self.__maxValue = float(maxValue) # avoid overflows with NumPy scalars\r", "title": "" }, { "docid": "8e2c22ea48d86c4924374208baec019c", "score": "0.76561713", "text": "def amax(self):\n pass", "title": "" }, { "docid": "b6fc32a979fb0f196445d0e4104bf304", "score": "0.75750065", "text": "def modify_set_max_mag(self, value):\n self.max_mag = value", "title": "" }, { "docid": "df539c20b1e7514984a7ed3bede63e17", "score": "0.7547361", "text": "def _set_maximumValue(self, *args) -> \"bool\" :\n return _fusion.JointLimits__set_maximumValue(self, *args)", "title": "" }, { "docid": "fa049d3b6222a72b5195faefe5b0d657", "score": "0.74832916", "text": "def set_max(self,max_Value):\n\t\ttry:\n\t\t\tself.max = float(max_Value)\n\t\texcept ValueError:\n\t\t\tprint(\"Max value must be a floting point number\")", "title": "" }, { "docid": "ad88012a9f4933cb2a177aba610ff54d", "score": "0.7428217", "text": "def max(self, attribute):\n return self._output_func('Max', attribute)", "title": "" }, { "docid": "8280496fbf61c37cd9db0affec23b63b", "score": "0.7425759", "text": "def get_max(self):\n\t\treturn self.max", "title": "" }, { "docid": "6889cb5cdd1f8dfe82676bfa4cf83300", "score": "0.741832", "text": "def maximum(self):\n return self._maximum", "title": "" }, { "docid": "2e1bb5fc11bb1a29d1630b1831ffbd69", "score": "0.7407258", "text": "def max(self):", "title": "" }, { "docid": "7a4320c434a30de518a7a1aab3d895c0", "score": "0.73547256", "text": "def _set_max_value(self, max_value) -> None:\n\n self._external_max_value = max_value\n\n # Check that the current value of the parameter is still within the boundaries. If not, issue a warning\n\n if (\n self._external_max_value is not None\n and self.value > self._external_max_value\n ):\n\n log.warning(\n \"The current value of the parameter %s (%s) \"\n \"was above the new maximum %s.\"\n % (self.name, self.value, self._external_max_value)\n )\n self.value = self._external_max_value", "title": "" }, { "docid": "77c6c349eb4989e7555e804b5c1e1bb4", "score": "0.7324512", "text": "def set_data_max(self, v):\n self.__data_max = v", "title": "" }, { "docid": "44ad4e47391d2f368d26ea65f0bc76f3", "score": "0.7276642", "text": "def maximum(self, *args, **kwargs):\n ...", "title": "" }, { "docid": "d1852c67f7cb7817e840a51507cab652", "score": "0.72297966", "text": "def max_value(self):\n return self.__max_value", "title": "" }, { "docid": "6c29f4143971d8bc626804984e70d70e", "score": "0.7209168", "text": "def max(self):\n return self._max", "title": "" }, { "docid": "6c29f4143971d8bc626804984e70d70e", "score": "0.7209168", "text": "def max(self):\n return self._max", "title": "" }, { "docid": "6c29f4143971d8bc626804984e70d70e", "score": "0.7209168", "text": "def max(self):\n return self._max", "title": "" }, { "docid": "6c29f4143971d8bc626804984e70d70e", "score": "0.7209168", "text": "def max(self):\n return self._max", "title": "" }, { "docid": "6c29f4143971d8bc626804984e70d70e", "score": "0.7209168", "text": "def max(self):\n return self._max", "title": "" }, { "docid": "44384672a2963684a000b18a4a9d6bc5", "score": "0.72071165", "text": "def getmax(self):\n return self.max", "title": "" }, { "docid": "40d1751b02743752c1ad13f9fa32db0a", "score": "0.7205053", "text": "def maximum(self):\n return self._maximum", "title": "" }, { "docid": "40d1751b02743752c1ad13f9fa32db0a", "score": "0.7205053", "text": "def maximum(self):\n return self._maximum", "title": "" }, { "docid": "76304baa2c39bcac17f3b806be32b596", "score": "0.7168122", "text": "def _mgui_set_maximum(self, value: float):\n self._qwidget.setMaximum(value)", "title": "" }, { "docid": "99f81678da17207f8dc41cda0dd59e99", "score": "0.7109286", "text": "def max(self):\n return _wrap(max(self))", "title": "" }, { "docid": "069390f67095b2e06e6d4d159bf0fdfb", "score": "0.71055704", "text": "def max(self) -> int:", "title": "" }, { "docid": "18535692470990114a714a1580c1bd3e", "score": "0.7099366", "text": "def max_value(self):\n return self._max_value", "title": "" }, { "docid": "aee496acdb4eefa00aed4a914465102d", "score": "0.70905507", "text": "def max(self, *args, **kwargs): # real signature unknown\n pass", "title": "" }, { "docid": "f5907996b77b3261e0409405e62c9e20", "score": "0.70724064", "text": "def set_max_age(self, ma):\n try:\n self.max_age = int(ma) # assert maxage is an integer\n except (TypeError, ValueError):\n self.max_age = None", "title": "" }, { "docid": "4bbcd6b0464d32edde79faad82ed31fa", "score": "0.7046693", "text": "def max_y(self, new_max):\n try:\n self.y_axis_max.SetValue(round(new_max, 3))\n except OverflowError:\n self.y_axis_max.SetValue(17000)", "title": "" }, { "docid": "471c7a81ed00ffcf534a44d04bd9bba2", "score": "0.70388836", "text": "def max(self, obj) -> 'Update':\n self.params['update']['$max'] = obj\n return self", "title": "" }, { "docid": "511ca347c0096a70682ab85e7f8825ed", "score": "0.70023566", "text": "def age_max(self, age_max):\n\n self._age_max = age_max", "title": "" }, { "docid": "5487b7e5743f698bee5fce6f848df81c", "score": "0.69870776", "text": "def get_max_value(self):\n\n return self._max", "title": "" }, { "docid": "f8c6464fa9e1278b873bf5c8b383d532", "score": "0.69763577", "text": "def Vmax(*args):\n # Getter\n if len(args) == 0:\n return CheckForError(lib.CapControls_Get_Vmax())\n\n # Setter\n Value, = args\n CheckForError(lib.CapControls_Set_Vmax(Value))", "title": "" }, { "docid": "e94a7a4cc4eb84153df76cc907bb5944", "score": "0.6956132", "text": "def max(self) -> float:\n return self.__max", "title": "" }, { "docid": "e94a7a4cc4eb84153df76cc907bb5944", "score": "0.6956132", "text": "def max(self) -> float:\n return self.__max", "title": "" }, { "docid": "157233084cd3dbb70f39f6f3422d2ddf", "score": "0.6950489", "text": "def maxval(self):\n return self._maxval", "title": "" }, { "docid": "52f3c21bccb05889d721cf9d5019cc8c", "score": "0.6941521", "text": "def max_value(self) -> int:\n return self._max_value", "title": "" }, { "docid": "83880d42b2314d4c06d862650824d9c8", "score": "0.6926668", "text": "def setMaximumSize(self, maximumSize):\n old = None\n if self.maxSizeSet:\n old = self.maxSize\n else:\n old = None\n self.maxSize = maximumSize\n self.maxSizeSet = (maximumSize != None)\n firePropertyChange(\"maximumSize\", old, maximumSize)", "title": "" }, { "docid": "a8b1fc8dad6c0e037c3a0650e13832d8", "score": "0.6917379", "text": "def max(self) -> Optional[int]:\n return pulumi.get(self, \"max\")", "title": "" }, { "docid": "76a56f29207a816695ac4b4276131dfd", "score": "0.6906357", "text": "def max(self):\n return self._method(\"max\", self._type.element_type)", "title": "" }, { "docid": "8182783e1bdd9343f6a3c4e9b8a99c7d", "score": "0.68831646", "text": "def _set_isMaximumValueEnabled(self, *args) -> \"bool\" :\n return _fusion.JointLimits__set_isMaximumValueEnabled(self, *args)", "title": "" }, { "docid": "032ee8f1223497432f1623530684a426", "score": "0.6871222", "text": "def max(self):\n\t\tif self.values:\n\t\t\treturn self._max[-1]", "title": "" }, { "docid": "349d9787327be4c5477361be4eaa4194", "score": "0.6862584", "text": "def update_maxval(self, maxval):\n if self.fp8_maxval is None:\n self.fp8_maxval = maxval\n else:\n self.fp8_maxval = 0.9 * self.fp8_maxval + 0.1 * maxval", "title": "" }, { "docid": "79cce6823698423674ebece3c6577529", "score": "0.6837865", "text": "def _get_maximumValue(self) -> \"double\" :\n return _fusion.JointLimits__get_maximumValue(self)", "title": "" }, { "docid": "cdb5dee745fcec2016cf9d7c52f4af6e", "score": "0.6815825", "text": "def SetMaxNumValues(self, max_num_values):\r\n self._max_num_values = max_num_values", "title": "" }, { "docid": "b5abd49205dc4daed090ee00c739893d", "score": "0.6814083", "text": "def maximum(self, other):", "title": "" }, { "docid": "e4f618e5b42c509f14b5b7c4fc04864b", "score": "0.68042123", "text": "def maximum(self) -> Optional[int]:\n return pulumi.get(self, \"maximum\")", "title": "" }, { "docid": "2f6063fb5c2119dd92ec40f16a4735cf", "score": "0.6786315", "text": "def setMaximum(self, maximum):\n self._zoomSlider.setMaximum(minimum)", "title": "" }, { "docid": "03f4c093429edc72eaec29b45f6a8a6a", "score": "0.67688686", "text": "def max(self):\n return int(self._max) if not np.isinf(self._max) else self._max", "title": "" }, { "docid": "83b88ab9fe9dd8ef5525c817c9ec8a3c", "score": "0.6747705", "text": "def max(self):\n return self._feature.max", "title": "" }, { "docid": "f6987ef10a14e79bd7dfe2bd556994aa", "score": "0.6733999", "text": "def ProgressBarMax(self, nbVal):\n self.__nbVal = nbVal", "title": "" }, { "docid": "f6987ef10a14e79bd7dfe2bd556994aa", "score": "0.6733999", "text": "def ProgressBarMax(self, nbVal):\n self.__nbVal = nbVal", "title": "" }, { "docid": "dddeab94d429188c0d33811162998fe5", "score": "0.67330545", "text": "def TopMax(self,topmax):\n\t\tself.topmax=topmax", "title": "" }, { "docid": "a8f4334742b597d006b1f1271a49b577", "score": "0.67202634", "text": "def get_max(self):\n return self.get_limits()[1]", "title": "" }, { "docid": "785740096c20c7094f485ee61d08a29d", "score": "0.669998", "text": "def max_value(self) -> float:\n return self._max_value", "title": "" }, { "docid": "0b0adabbb75ce8fbcfe8f124e0507a29", "score": "0.66849715", "text": "def arg_max_attr(self, attr_name='scores'):\n return sp.argmax(self.snp_results[attr_name])", "title": "" }, { "docid": "a01fd3c051892c500070695338603921", "score": "0.6667559", "text": "def max_value(self) -> float:\n return 0.5", "title": "" }, { "docid": "a5dff14246da846cf4764a48393dea6a", "score": "0.66638815", "text": "def setMaximumSize(self, *args, **kwargs):\n ...", "title": "" }, { "docid": "56098c77d6090be1107fb54efdc4abf3", "score": "0.66572076", "text": "def setMaxLevel(self, maxLevel):\n self.maxLevel = maxLevel", "title": "" }, { "docid": "d3fb77f5e39b0413ec8610198e974b42", "score": "0.66551006", "text": "def maximum(self) -> Optional[int]:\n return self._max", "title": "" }, { "docid": "a38ba142d898d3059220f2b428877fcc", "score": "0.6654223", "text": "def exclusive_maximum(self, exclusive_maximum):\n\n self._exclusive_maximum = exclusive_maximum", "title": "" }, { "docid": "6db6e9be30f58fb82f84aef75b6f0abc", "score": "0.66482687", "text": "def _get_maxas_limit(self):\n return self.__maxas_limit", "title": "" }, { "docid": "f160c141c31291705f335226920e8734", "score": "0.6640855", "text": "def get_maximum(self) -> int:\n return self.maximum if self.maximum > self.default else 0", "title": "" }, { "docid": "a70702446a91ba684f567df65dc7a63b", "score": "0.6633103", "text": "def day_max(self, day_max):\n\n self._day_max = day_max", "title": "" }, { "docid": "a70702446a91ba684f567df65dc7a63b", "score": "0.6633103", "text": "def day_max(self, day_max):\n\n self._day_max = day_max", "title": "" }, { "docid": "00d844a3cb390ddd6917bd9e00ff5f76", "score": "0.66240704", "text": "def max(self):\n fun = self.__get_fun('Get{}Max')\n err, value = fun(RetValue('f64'))\n return value", "title": "" }, { "docid": "b98491f44816860706b0fe0bb1fe3d32", "score": "0.6622033", "text": "def remove_maximum(self) -> None:\n self._external_max_value = None", "title": "" }, { "docid": "cab8fd3e00c323e699d947d13565a07a", "score": "0.6620638", "text": "def onFmaxChange(self,value):\r\n self.sysDict[self.sysCurrentName].Fmax,_ = self.locale.toDouble(value)", "title": "" }, { "docid": "d62c48e771192fde168b83d4b8c60bbc", "score": "0.6619204", "text": "def set_max_energy(self, energy):\n\n self._max_energy = energy", "title": "" }, { "docid": "7a2662899a9d26a48d2e4074c203e88e", "score": "0.66016644", "text": "def iops_max(self, iops_max):\n\n self._iops_max = iops_max", "title": "" }, { "docid": "83e7396787460d5d7378051d2b3ed267", "score": "0.65965176", "text": "def guest_maximum(self, guest_maximum):\n\n self._guest_maximum = guest_maximum", "title": "" }, { "docid": "be04146e0f2e62a42d6b6651574179ba", "score": "0.6589334", "text": "def setMaxTime(max_time):\r\n\t\tNode.max_time = max_time", "title": "" }, { "docid": "b5f12282150b0634821cd341211da92f", "score": "0.6582046", "text": "def get_data_max(self):\n return self.__data_max", "title": "" }, { "docid": "e11ce24b1ef290b6ba607834cd774505", "score": "0.6575111", "text": "def max_value(self):\n return self.atspi_value_obj.get_maximum_value()", "title": "" }, { "docid": "3b4e3d9543fba9c0b8dd28236fd0b109", "score": "0.65719706", "text": "def max_height(self, max_height):\n\n self._max_height = max_height", "title": "" }, { "docid": "28c46a448af56094d65f752f08dfb6fb", "score": "0.65708566", "text": "def max_x(self, new_max):\n self.x_axis_max.SetValue(round(new_max, 3))", "title": "" }, { "docid": "786d52fbc1857d1ab4f8625659170c23", "score": "0.65659666", "text": "def get_max(self):\r\n if self.get_count() > 0:\r\n return self.max_\r\n return 0.0", "title": "" }, { "docid": "65487d3c6353135656d5130a677c683f", "score": "0.6559144", "text": "def set_maxhp(self, maxhp: int):\n if not isinstance(maxhp, int) and maxhp is not None:\n raise ValueError(\"Max HP must be an integer or None.\")\n self._combatant.max_hp = maxhp", "title": "" }, { "docid": "b0286191cdd55e0afdf03c10885cddec", "score": "0.65557325", "text": "def setMaxGimbalTorque(self, ugMax):\n self._ugMax = ugMax\n return", "title": "" }, { "docid": "b0286191cdd55e0afdf03c10885cddec", "score": "0.65557325", "text": "def setMaxGimbalTorque(self, ugMax):\n self._ugMax = ugMax\n return", "title": "" }, { "docid": "ee11316be3e1006c577e41340268465b", "score": "0.65554345", "text": "def set_max_pdu( self, max_pdu ) :\n self.max_pdu = max_pdu", "title": "" }, { "docid": "6dcff6842a1e2173966a581eb43d3172", "score": "0.65466297", "text": "def set_nmax(self, N, nmax):\n sbm = self.submodes[N]\n sbm.nmax = nmax", "title": "" }, { "docid": "fcddcf9fc8103d7575dfa327b6984529", "score": "0.65375215", "text": "def datamax(self):\n return 55000", "title": "" }, { "docid": "da426e34baf6cc8bc7d08f8aa28ba4fc", "score": "0.6529486", "text": "def buttonMaximum_Clicked( self, event ):\n max = Config.getint('GameObjects', 'Enemies')\n DM.ChangeDataCapacity(self, self.listBoxEnemies, DataEnemies, max)", "title": "" }, { "docid": "cb09f3312ceca432b59e03b4d2dec1fe", "score": "0.65222543", "text": "def set_maxiter(self , max_iter):\n self.max_iter = max_iter", "title": "" }, { "docid": "6423beacb46b881111ec48a060ec93df", "score": "0.6519177", "text": "def maximum_fill_value(obj):\n return _extremum_fill_value(obj, max_filler, \"maximum\")", "title": "" }, { "docid": "6fceabd544f43bc45f40fbc04da09bab", "score": "0.6519021", "text": "def onUmaxChange(self, value):\r\n self.sysDict[self.sysCurrentName].Umax,_ = self.locale.toDouble(value)", "title": "" }, { "docid": "5dfbbdb369daca2f11e85f981dc9b4fb", "score": "0.6517304", "text": "def set_prixMaximum(self, v):\n if self.prixMinimum <= v <= self.__area: self.__pmaxi = v", "title": "" }, { "docid": "476ef1af9733e07fe5d8044cceddfccd", "score": "0.6510037", "text": "def _get_max_value(self) -> float:\n\n return self._external_max_value", "title": "" }, { "docid": "e022d45b1db9a2103f4d4cdc371b28c0", "score": "0.650865", "text": "def cmax(self):\n return self[\"cmax\"]", "title": "" }, { "docid": "974e9a2a2d12826893ca06e0f8681789", "score": "0.6500924", "text": "def max_value(self) -> google.protobuf.wrappers_pb2.DoubleValue:", "title": "" } ]
135b9aa187dcc0e58bf633f36c64c2ca
Gets the url of this DashboardMetadata.
[ { "docid": "3cab8eb5451712f7c55e14a0a0cea1ab", "score": "0.7339102", "text": "def url(self):\n return self._url", "title": "" } ]
[ { "docid": "2fbe73001cbd34a95b62e79f502d7012", "score": "0.7927961", "text": "def dashboard_url(self) -> str:\n return self._dashboard_url", "title": "" }, { "docid": "c703c957fbe09cf7c45ebb07023d3e91", "score": "0.7727256", "text": "def url(self):\n return self.data[\"metadata\"][\"url\"]", "title": "" }, { "docid": "35a0a056e90e31a217d59281c4d89f29", "score": "0.74750835", "text": "def get_url(self):\n return self.__url", "title": "" }, { "docid": "539d83db94878169161eeba7ad9c69a9", "score": "0.7449673", "text": "def url(self) -> str:\n return self._url", "title": "" }, { "docid": "07881524ead321fd3f27b3b74354a2fb", "score": "0.74416804", "text": "def get_url(self):\n return self.url", "title": "" }, { "docid": "07881524ead321fd3f27b3b74354a2fb", "score": "0.74416804", "text": "def get_url(self):\n return self.url", "title": "" }, { "docid": "e29f2680132deb470b3536132d240125", "score": "0.73837847", "text": "def url(self):\n return self.__url", "title": "" }, { "docid": "e29f2680132deb470b3536132d240125", "score": "0.73837847", "text": "def url(self):\n return self.__url", "title": "" }, { "docid": "20398f1f45e1695d4f76127641802aa1", "score": "0.7305444", "text": "def getURL(self):\n return self.__url", "title": "" }, { "docid": "bf3279d2a1d3c03f37aedc705552c583", "score": "0.73035383", "text": "def url(self):\n return self._data_url", "title": "" }, { "docid": "293da25285a37420c67d3e01d4562421", "score": "0.723976", "text": "def get_url(self):\n return self._get_url_()", "title": "" }, { "docid": "78a742ed5c6fdcb0bdefb26a7ad926f3", "score": "0.7210645", "text": "def url(self):\r\n return self._url", "title": "" }, { "docid": "78a742ed5c6fdcb0bdefb26a7ad926f3", "score": "0.7210645", "text": "def url(self):\r\n return self._url", "title": "" }, { "docid": "78a742ed5c6fdcb0bdefb26a7ad926f3", "score": "0.7210645", "text": "def url(self):\r\n return self._url", "title": "" }, { "docid": "78a742ed5c6fdcb0bdefb26a7ad926f3", "score": "0.7210645", "text": "def url(self):\r\n return self._url", "title": "" }, { "docid": "78a742ed5c6fdcb0bdefb26a7ad926f3", "score": "0.7210645", "text": "def url(self):\r\n return self._url", "title": "" }, { "docid": "964bed0d47bc0870a4d9dbc1f7ca7b03", "score": "0.7192434", "text": "def url(self):\n # type: () -> string_types\n return self._url", "title": "" }, { "docid": "964bed0d47bc0870a4d9dbc1f7ca7b03", "score": "0.7192434", "text": "def url(self):\n # type: () -> string_types\n return self._url", "title": "" }, { "docid": "659e5ae3d8d1056e7d2295e2678eaae8", "score": "0.7170676", "text": "def url(self) -> str:\n return pulumi.get(self, \"url\")", "title": "" }, { "docid": "659e5ae3d8d1056e7d2295e2678eaae8", "score": "0.7170676", "text": "def url(self) -> str:\n return pulumi.get(self, \"url\")", "title": "" }, { "docid": "659e5ae3d8d1056e7d2295e2678eaae8", "score": "0.7170676", "text": "def url(self) -> str:\n return pulumi.get(self, \"url\")", "title": "" }, { "docid": "659e5ae3d8d1056e7d2295e2678eaae8", "score": "0.7170676", "text": "def url(self) -> str:\n return pulumi.get(self, \"url\")", "title": "" }, { "docid": "659e5ae3d8d1056e7d2295e2678eaae8", "score": "0.7170676", "text": "def url(self) -> str:\n return pulumi.get(self, \"url\")", "title": "" }, { "docid": "0b5a3ca744d06123052dc9b79189c052", "score": "0.71263194", "text": "def _get_url(self):\n return self._url", "title": "" }, { "docid": "99c490ca2258a7e5b1d0a293eb97ad65", "score": "0.70493704", "text": "def geturl(self):\n return self.getURLString()", "title": "" }, { "docid": "e8b6e1019cb8a92c07a3529582e063c1", "score": "0.70378864", "text": "def url(self):\r\n return self._columns_values['url']", "title": "" }, { "docid": "0a43bd02341be63e7937e2fa42053d34", "score": "0.70055676", "text": "def url(self) -> str:\n return Utils.generate_file_path(self.url_alias_with_prefix)", "title": "" }, { "docid": "65f4ccc9ec234e4434c08019749cc38f", "score": "0.699007", "text": "def url(self) -> Optional[str]:\n return pulumi.get(self, \"url\")", "title": "" }, { "docid": "65f4ccc9ec234e4434c08019749cc38f", "score": "0.699007", "text": "def url(self) -> Optional[str]:\n return pulumi.get(self, \"url\")", "title": "" }, { "docid": "65f4ccc9ec234e4434c08019749cc38f", "score": "0.699007", "text": "def url(self) -> Optional[str]:\n return pulumi.get(self, \"url\")", "title": "" }, { "docid": "65f4ccc9ec234e4434c08019749cc38f", "score": "0.699007", "text": "def url(self) -> Optional[str]:\n return pulumi.get(self, \"url\")", "title": "" }, { "docid": "225ae2102d2fe4ff6e37166b8db9f981", "score": "0.69781417", "text": "def html_url(self):\n return self._url", "title": "" }, { "docid": "225ae2102d2fe4ff6e37166b8db9f981", "score": "0.69781417", "text": "def html_url(self):\n return self._url", "title": "" }, { "docid": "ed162b53d85b2fb73e703ece9febf0c1", "score": "0.6960011", "text": "def url(self):\n return self.build.url", "title": "" }, { "docid": "d9c5696b96e0edf157cf31370087a40b", "score": "0.6955216", "text": "def url(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"url\")", "title": "" }, { "docid": "d9c5696b96e0edf157cf31370087a40b", "score": "0.6955216", "text": "def url(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"url\")", "title": "" }, { "docid": "4f60187a7a2b7a5dacfa2952a03a5ccd", "score": "0.69509107", "text": "def custom_url(self):\n return self._custom_url", "title": "" }, { "docid": "731c46a96f2647f9ce95825108d58330", "score": "0.69477504", "text": "def url(self) :\n\t\ttry :\n\t\t\treturn self._url\n\t\texcept Exception as e:\n\t\t\traise e", "title": "" }, { "docid": "e9c3e44454cd5dc6b3f760d59609f4dc", "score": "0.69469714", "text": "def url(self):\n return self.config.url(project=self.project)", "title": "" }, { "docid": "16bd4dab637b5dd608da1ff688d43aa3", "score": "0.6892871", "text": "def getUrl(self):\n return self.base.get(\"url\", [])", "title": "" }, { "docid": "16bd4dab637b5dd608da1ff688d43aa3", "score": "0.6892871", "text": "def getUrl(self):\n return self.base.get(\"url\", [])", "title": "" }, { "docid": "f1eb538e4386c898da49af8ad81a9c57", "score": "0.68601656", "text": "def url(self):\n return self._call.url", "title": "" }, { "docid": "47c72bff3bfc7367d7eb5bd2de9e591e", "score": "0.6841154", "text": "def get_url(self):\n if self.digester is None:\n return None\n else:\n return self.digester.get_url()", "title": "" }, { "docid": "91bd3dd6d197a3a84ac0b2d81694f211", "score": "0.6829752", "text": "def getUrl(self):\n return self.e.getFullUrl()", "title": "" }, { "docid": "2f74140692b5ef088dee00d8f7bad523", "score": "0.6803394", "text": "def dashboard_endpoint(self) -> Optional[str]:\n return pulumi.get(self, \"dashboard_endpoint\")", "title": "" }, { "docid": "dcf4b4339ae9e44fce581044b247d08b", "score": "0.6793286", "text": "def url(self):\r\n return self.urlparts.geturl()", "title": "" }, { "docid": "d9f2c487dc9769b273f88080633441c2", "score": "0.6776823", "text": "def panel_url(self):\n return self._panel.url", "title": "" }, { "docid": "1f39b430a5777b69ec949dcce46b8006", "score": "0.6775322", "text": "def get_url(self) -> str:\n return self.file_url", "title": "" }, { "docid": "d35cd10891f2dbc8e13b56f2bacd9e86", "score": "0.67578846", "text": "def getUrl(self):\n did = self._did or \"all\"\n series = self._series or \"all\"\n return \"{}/{}/{}\".format(self._pid, did, series)", "title": "" }, { "docid": "939e8779aada27e859dcb3845311416f", "score": "0.67558", "text": "def get_URL(self):\n return self._output.get('URL', None)", "title": "" }, { "docid": "1c5c2ebe8261594a4c36db8a05cb8a30", "score": "0.6736182", "text": "def getUrl(self):\n return '%s://%s%s' % (self.getUrlScheme(), self.getUrlNetLoc(), self.getUrlPath());", "title": "" }, { "docid": "62243e6600289f48a4cd74e254a0bea5", "score": "0.6734765", "text": "def absolute_url(self):\n return self.properties.get('AbsoluteUrl', None)", "title": "" }, { "docid": "d9f8f5d5c55016edf8be7d1309a4cd59", "score": "0.6700179", "text": "def get_data_url(self):\n pass", "title": "" }, { "docid": "9bf53c81f3df23f4fc38e8222168eacf", "score": "0.6688201", "text": "def metrics_url(self) -> str:\n return f\"{self.base_url}{self._metrics_url}\"", "title": "" }, { "docid": "fdebcb111251381053c3144479aac3f9", "score": "0.6671577", "text": "def external_url(self):\n return self._external_url", "title": "" }, { "docid": "2a9b8e24a1d25a342914445120b3b40e", "score": "0.66336536", "text": "def download_url(self):\n return self._download_url", "title": "" }, { "docid": "900d27bc6455dee3aaf24e60c17858f7", "score": "0.6631914", "text": "def external_url(self) -> str:\n return pulumi.get(self, \"external_url\")", "title": "" }, { "docid": "37542ff9586425cfcca198cfeda19f13", "score": "0.6629137", "text": "def get_url(self):\n return self.target_url", "title": "" }, { "docid": "a8369d9481e497f5815eeaaba4b2d319", "score": "0.6628852", "text": "def getRemoteUrl(self):\n patient = self.getPatient()\n if patient:\n return patient.absolute_url()", "title": "" }, { "docid": "ade736f46228eced8413e5af03f508bf", "score": "0.6620543", "text": "def url(self):\n return self.item.url", "title": "" }, { "docid": "775f366f7569b5f6e06f2d40cf12f219", "score": "0.65979326", "text": "def get_url(self):\n r = urllib.parse.urlparse(self.config.url)\n return f\"{r.scheme}://{r.netloc}\"", "title": "" }, { "docid": "2a19ccee41909fe2f71c55f5aed8bc11", "score": "0.6586506", "text": "def url(self):\n return u'{host_url}{path}'.format(host_url=self.host.url,\n path=self.path)", "title": "" }, { "docid": "6bcfffe2c894fee4ec6654dbcd49efef", "score": "0.65817374", "text": "def url(self):\n raise NotImplementedError", "title": "" }, { "docid": "5db6a61f1683325847ac23c2009d705b", "score": "0.6565092", "text": "def download_url(self):\n # type: () -> str\n return self._download_url", "title": "" }, { "docid": "9e489696c4b03231913e1a7488ca5980", "score": "0.65640503", "text": "def link_url(self) -> str:\n return pulumi.get(self, \"link_url\")", "title": "" }, { "docid": "1d8df38335eaaf1f1eb32bd777141c40", "score": "0.6561124", "text": "def url(self):\r\n return url_from_path(self.filename)", "title": "" }, { "docid": "f8941dd55958abae7da7a8028d0360cd", "score": "0.6547939", "text": "def url(self):\n if not self._url:\n self._url = Url(request_uri(self._server))\n return self._url", "title": "" }, { "docid": "fe97bc6ff04c2008b2819d376759cd54", "score": "0.65339726", "text": "def get_a_url(self):\n return self._task.get_a_url().to_url()", "title": "" }, { "docid": "b9325742a98cead8339c72c1daa86c4c", "score": "0.6527336", "text": "def url(self):\n return self._devtools_client.GetUrl(self.id)", "title": "" }, { "docid": "21ba650cf9ef1ef5721f6dd4ad0ed3c0", "score": "0.65270746", "text": "def get_absolute_url(self):\n return self.get_json_url(self, use_pretty=True)", "title": "" }, { "docid": "e5f6bdbd84b2a02b9b69ca87386389a8", "score": "0.6519415", "text": "def _url(self) -> Optional[Text]:\n if os.environ.get(\"RASA_DUCKLING_HTTP_URL\"):\n return os.environ[\"RASA_DUCKLING_HTTP_URL\"]\n\n return self.component_config.get(\"url\")", "title": "" }, { "docid": "ac4780da36d9f58aaf2258ba9eee4964", "score": "0.65129", "text": "def endpoint_url(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"endpoint_url\")", "title": "" }, { "docid": "4480777db911e4270bea938104e71227", "score": "0.6494489", "text": "def url(self):\n return self.file.url", "title": "" }, { "docid": "56a81e2711a5041d5020b275d9a13e17", "score": "0.6486783", "text": "def url(self) -> str:\n return get_creature_url(self.identifier)", "title": "" }, { "docid": "76eae139a5e6dd732a342c5e05b408f5", "score": "0.64762497", "text": "def self_url(self):\n return self._self_url", "title": "" }, { "docid": "150748fc6dcb4ddb347ce84d03b72147", "score": "0.6431425", "text": "def _getURL(self):\r\n return 'http://%s.%s' % (self.key, self.baseurl)", "title": "" }, { "docid": "d38940ced82a1000d406b7bf8e2ab4c9", "score": "0.64279705", "text": "def url_data(self) -> str:\n return self._url_data", "title": "" }, { "docid": "96844f7bfe5a0b4a6b3d638bae670ae3", "score": "0.642447", "text": "def url(self):\n return self.get_current_url()", "title": "" }, { "docid": "c9c14ede5ad1de587f4159e0c2a4c96f", "score": "0.63986874", "text": "def href(self):\n return self.__href", "title": "" }, { "docid": "84300822e456483febb21f6c7fcfcae2", "score": "0.63976073", "text": "def get_url(self, table):\n return \"{}/{}\".format(self.config[\"host\"], table)", "title": "" }, { "docid": "95b0e76b63aa18e10b0693375648c2f7", "score": "0.63649684", "text": "def web_url(self):\n return self._web_url", "title": "" }, { "docid": "07c4c0870fee6855f2ee648cc039a562", "score": "0.6361388", "text": "def get_url(self) -> str:\n\n self._check_if_crawler_running()\n\n return self._page.url", "title": "" }, { "docid": "a2eda1b4bf296c72c6a5248629db8a9f", "score": "0.6357708", "text": "def _url(self):\n\n # Raise this since this is an abstract property.\n raise NotImplementedError('Subclass has not implemented property.')", "title": "" }, { "docid": "5f088ca5285b8caf3db796a0136c7557", "score": "0.63572615", "text": "def url(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"url\")", "title": "" }, { "docid": "5f088ca5285b8caf3db796a0136c7557", "score": "0.63572615", "text": "def url(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"url\")", "title": "" }, { "docid": "5f088ca5285b8caf3db796a0136c7557", "score": "0.63572615", "text": "def url(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"url\")", "title": "" }, { "docid": "5f088ca5285b8caf3db796a0136c7557", "score": "0.63572615", "text": "def url(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"url\")", "title": "" }, { "docid": "5f088ca5285b8caf3db796a0136c7557", "score": "0.63572615", "text": "def url(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"url\")", "title": "" }, { "docid": "7db6b6babbc51d08ac387dbe7b98d539", "score": "0.63553387", "text": "def ConstructDashboardURL(self, stage=None):\n return self._run.ConstructDashboardURL(stage=stage)", "title": "" } ]
8a55eab1ea93d2f7b415a1a505232965
Add the processor to the available processors that __next__ returns.
[ { "docid": "747bf753849a90c40ac9e19dc462340f", "score": "0.7581475", "text": "def add_processor(self, processor):\n raise NotImplementedError()", "title": "" } ]
[ { "docid": "fb6f43761ac94070a64eefdf1c7b95a1", "score": "0.76851845", "text": "def __next__(self):\n for processor in self._processors:\n\n processor()", "title": "" }, { "docid": "d91c4595fb0c24cff850e46a1bf542dc", "score": "0.7611795", "text": "def add_processor(self, processor):\n self.processors.append(processor)", "title": "" }, { "docid": "fe113c26a9d288c85f8b41ce9dfe2bb9", "score": "0.6942205", "text": "def add_processor(self, processor):\n if not hasattr(processor, '__call__'):\n raise ValueError(\"LogScanner: add_processor(): processor not a \" +\n \"function or function object\")\n self._procs.append(processor)", "title": "" }, { "docid": "a36cfde1fd38a787604157183dbfaa10", "score": "0.6653939", "text": "def processors(self, processors):\n self._processors = processors", "title": "" }, { "docid": "e8f330368d3213dda0814a77c7631906", "score": "0.64629006", "text": "def register(self, processor):\n self.registry[processor.name] = processor", "title": "" }, { "docid": "09e0e0db2cfe9d1e5d849d59e4cac835", "score": "0.6329806", "text": "def as_target_processor(self):\n self.current_processor = self.tokenizer\n yield\n self.current_processor = self.feature_extractor", "title": "" }, { "docid": "5d8e0ca18d2e1b49bce3a81e87018c69", "score": "0.62887657", "text": "def my_processor():\n my_processor = MyProcessor()\n yield my_processor", "title": "" }, { "docid": "ae979d2bd3ae74c54cd6acffe87889fa", "score": "0.6263545", "text": "def ProcessNext(self):\n raise NotImplementedError", "title": "" }, { "docid": "a28c0e78628681877885b77ed9992823", "score": "0.5928027", "text": "def __setitem__(self, key, value):\n with self._condition:\n if key not in self._processors:\n proc_iterator = self._proc_iter_class()\n proc_iterator.add_processor(value)\n self._processors[key] = proc_iterator\n else:\n self._processors[key].add_processor(value)\n self._condition.notify_all()", "title": "" }, { "docid": "eb434aa65b73cd6b0b0bb87bfb66d180", "score": "0.5816772", "text": "def add_processor(name, processor_object):\n globals()[name] = processor_object\n if name not in _PROCESSOR_NAMES:\n _PROCESSOR_NAMES.append(name)", "title": "" }, { "docid": "38ca28daa7a791e4c80fe648de9a61c7", "score": "0.58017886", "text": "def add_post_processor(self, pp):\n self._pps.append(pp)\n pp.set_downloader(self)", "title": "" }, { "docid": "e29a8036b43e6d2397de391b6d2ba2e7", "score": "0.5766604", "text": "def __getitem__(self, item):\n with self._condition:\n return self._processors[item].next_processor()", "title": "" }, { "docid": "b5dfce14f9ee06da56b9b7b42cb06688", "score": "0.5749241", "text": "def __call__(self, *args, **kwargs):\n return self.current_processor(*args, **kwargs)", "title": "" }, { "docid": "55bbd67eaf267d48c3051d14b30dd44e", "score": "0.57464945", "text": "def configure_processors(self, runtime_environment):\n if self.is_configured:\n return\n for processor in self:\n processor.configure(runtime_environment)\n self.is_configured = True", "title": "" }, { "docid": "4d31f2f1793aafe93fe981aa1c062bea", "score": "0.5706102", "text": "def processor(self):\n return self._processor", "title": "" }, { "docid": "7acfe698d8f9917466e0380639ef6526", "score": "0.56905913", "text": "def processors(self):\n return self._processors", "title": "" }, { "docid": "7acfe698d8f9917466e0380639ef6526", "score": "0.56905913", "text": "def processors(self):\n return self._processors", "title": "" }, { "docid": "2b6069bef7becaec5827d98d67ce58fd", "score": "0.5676872", "text": "def __next__(self):\n import itertools\n if self.current_batch_index >= self.num_batches:\n print(f'[Loader] Stopping Iteration')\n import sys; sys.exit(1)\n self.stop_loading()\n raise StopIteration\n else:\n # TODO: check processes are working\n if self.ii is None:\n self.ii = itertools.cycle(range(self.num_workers))\n print(f'[NEW BATCH] !!')\n examples = []\n for i in range(self.examples_per_batch):\n print(f' Getting example {i}..', end=''); start = time.time()\n example = None\n example = self.example_queues[next(self.ii)][1].recv()\n examples.append(example)\n print(f'.done ({time.time() - start} sec)')\n start = time.time()\n batch = self.batch_collate_fn(examples)\n print(f' (__next__) Collate took {time.time() - start:.2f} sec')\n self.current_batch_index += 1\n return batch", "title": "" }, { "docid": "e4740c2013961a24a564806eadadc1e2", "score": "0.56287396", "text": "def __add__(self, next: Plug) -> Plug:\n if isinstance(next, FreePlug):\n return self\n\n # keep output not used by next pipeline and add next outputs\n next.outputs += [e for e in self.outputs if e not in next.inputs]\n return RestrictedPlug(inputs=self.inputs, outputs=next.outputs)", "title": "" }, { "docid": "8a11d548de20a453f9ae7f98e9c139c9", "score": "0.56010896", "text": "def result_processor(self):\n return None", "title": "" }, { "docid": "2f5a580b51bec249c09d3910d4fb39d7", "score": "0.5571186", "text": "def add_processor(self, module_name, content, session):\n\n filename = '/%s.py' % module_name\n self._add_file('processors', filename, content, session)", "title": "" }, { "docid": "d2b7ddec1ce04ca0b50feef26ed7d9e4", "score": "0.5564669", "text": "def then(self, f, *args, **kw):\n assert callable(f)\n assert \"source\" not in kw\n # print(\"Processor\", args, kw)\n return Processor(self, f, *args, **kw)", "title": "" }, { "docid": "27a91d150d917b16c03cd7228c5021d2", "score": "0.55428576", "text": "def _push_processor(self, proc, index=None):\n\n if index is None:\n self._procstack.append(proc)\n else:\n self._procstack.insert(index, proc)", "title": "" }, { "docid": "3e59b32899238dd59ac2fdbbf5f1f135", "score": "0.55407494", "text": "def add_processor(self, consumer, is_error_consumer=False):\n for producer in self.producers:\n self.processor_graph.add_producer_and_consumer(producer, consumer, is_error_consumer)\n return ProcessorGraphBuilder(self.processor_graph, consumer)", "title": "" }, { "docid": "acb7151480c72c391b3810741d157af1", "score": "0.54978484", "text": "def processors(self):\n return self._procs[:]", "title": "" }, { "docid": "2622cfe2c2adb72c91010acb961fead6", "score": "0.5463847", "text": "def add(self, *var_processor_fns):\n for fn in var_processor_fns:\n self._processors.append(fn)\n return self", "title": "" }, { "docid": "5bb8f014d9938de8894e0947fd49abe9", "score": "0.54606146", "text": "def next(self):\n super().next()\n self._method_next()", "title": "" }, { "docid": "26346a212ea6fa768e332f308692f442", "score": "0.54284114", "text": "def set_post_processor(self, processor):\n self._post_processor = processor", "title": "" }, { "docid": "f842cfe7e877bebb557bca6c91ac4aac", "score": "0.5416073", "text": "def bind_processor(self):\n return None", "title": "" }, { "docid": "1550e6662673c2f343f3cbe1226f90b2", "score": "0.54032487", "text": "def _process(self, data):\n for pp in self._preprocessors:\n data = pp(data)\n\n return data", "title": "" }, { "docid": "662f01c7e1873e2d8b6ffcbd0f0be61d", "score": "0.5387171", "text": "def as_target_processor(self):\n warnings.warn(\n \"`as_target_processor` is deprecated and will be removed in v5 of Transformers. You can process your \"\n \"labels by using the argument `text` of the regular `__call__` method (either in the same call as \"\n \"your images inputs, or in a separate call.\"\n )\n self._in_target_context_manager = True\n self.current_processor = self.tokenizer\n yield\n self.current_processor = self.image_processor\n self._in_target_context_manager = False", "title": "" }, { "docid": "e22bf7261ccd74e8dcc395c62c5072b6", "score": "0.5284179", "text": "def handle(self, to_process: typing.List[Input]) -> typing.Iterator[Output]:\n to_process.extend(self.iterable)\n yield from ()", "title": "" }, { "docid": "1a3312952974c450b87f5b3890491a33", "score": "0.52665395", "text": "def flattener():\n for pg in list_all_process_groups():\n for proc in pg.nipyapi_extended.process_group_flow.flow.processors:\n yield proc", "title": "" }, { "docid": "db9015057450d6bef36c82324c64bff3", "score": "0.5254651", "text": "def add_iter_to_generator_class(self, fn_info: FuncInfo) -> None:\n self.enter(fn_info)\n self_target = self.add_self_to_env(fn_info.generator_class.ir)\n self.add(Return(self.read(self_target, fn_info.fitem.line)))\n blocks, env, _, fn_info = self.leave()\n\n # Next, add the actual function as a method of the generator class.\n sig = FuncSignature((RuntimeArg(SELF_NAME, object_rprimitive),), object_rprimitive)\n iter_fn_decl = FuncDecl('__iter__', fn_info.generator_class.ir.name, self.module_name, sig)\n iter_fn_ir = FuncIR(iter_fn_decl, blocks, env)\n fn_info.generator_class.ir.methods['__iter__'] = iter_fn_ir\n self.functions.append(iter_fn_ir)", "title": "" }, { "docid": "cbee7f3a7a629c488d6f48ddeaa869f1", "score": "0.5245541", "text": "def _run_cpu(self):\n if not self._built:\n raise RuntimeError(\"Pipeline must be built first.\")\n if not self._last_iter:\n self._pipe.RunCPU()\n self._cpu_batches_to_consume += 1", "title": "" }, { "docid": "ce77f82a0a3a018a85304dd2546da8b4", "score": "0.5187133", "text": "def dispatch_next_process(self):\n # ...\n nextProc = self.processList[len(self.processList)-1]\n nextProc.event.set()\n nextProc.start()\n\n if len(self.processList) > 2:\n for x in range(len(self.processList) - 2):\n self.processList[x].event.clear()\n else:\n for x in range(len(self.processList) - 1):\n self.processList[x].event.clear()", "title": "" }, { "docid": "9edee307a658d36c0ca027ec0efb35f0", "score": "0.51868993", "text": "def process(self):\n raise NotImplementedError()", "title": "" }, { "docid": "e4fc19f529a56c078db02279c2d2eaa0", "score": "0.51866376", "text": "def nextGenerator(self):\n \n pass", "title": "" }, { "docid": "f90708bc176b2ee75fe4abe523c89a90", "score": "0.5161887", "text": "def __next__(self):\n pass", "title": "" }, { "docid": "5253374ac0bb6b10e3b6abf5ea564032", "score": "0.5156609", "text": "def list_stream_processors(NextToken=None, MaxResults=None):\n pass", "title": "" }, { "docid": "c68bba88497111ee9a1806076bea665c", "score": "0.5156178", "text": "def _push_next(self):\n # Optimally one would want to task worker that have none depleted iterators,\n # however, this does not seem to be possible with a worker pool\n async_ret = self._worker_pool.apply_async(\n self._worker_fn,\n (\n self.batch_size,\n self._batchify_fn,\n self.dtype,\n self.is_train,\n self.shuffle,\n self.cyclic,\n self.cycle_num,\n ),\n )\n self._data_buffer[self._sent_idx] = async_ret\n self._sent_idx += 1", "title": "" }, { "docid": "df39f047f716d689bfb3fac618ca028c", "score": "0.51334417", "text": "def _process(self, packet):\n raise BirImplementationError(\"Processor %s: Super class %s \" %\n (self.name, str(type(self))) + \"of threaded_processor does \"\n \"not implement _process\")", "title": "" }, { "docid": "27a4507793db10e93bbe6d1ed15026cb", "score": "0.51197475", "text": "def __init__(self, *processors):\n super().__init__()\n self._processors = tuple(processors)\n self.running = False", "title": "" }, { "docid": "b02c3528de8cb6198569e0e0b2097783", "score": "0.51169735", "text": "def next(self):\n raise NotImplementedError()", "title": "" }, { "docid": "e21638d24cf393787c1583f8c8cc7553", "score": "0.51024467", "text": "def process(self, metric):\n if metric.source not in self.queues:\n self.queues[metric.source] = []\n self.queues[metric.source].append(metric)", "title": "" }, { "docid": "6656075a2a258e01d1ca0bdab63d822b", "score": "0.5098623", "text": "def __next__(self):\n while self.index >= len(self.features) and not self.finished:\n new_features = self._fetch_features()\n self.features.extend(new_features)\n\n if self.index < len(self.features):\n self.index += 1\n return self.features[self.index - 1]\n\n raise StopIteration", "title": "" }, { "docid": "4ddad9dd6cb545f6c96f60bb79e01012", "score": "0.5096611", "text": "def __next__(self):\n return self.__next__()", "title": "" }, { "docid": "18554901c829c0d18f3337c475c165cb", "score": "0.509258", "text": "def _map_to_processors(self, f, objective):\n\n P = self._random_subprocessors()\n best = f(next(P))\n for p in P:\n x = f(p)\n if objective(best, x):\n best = x\n return best[1]", "title": "" }, { "docid": "dee02fb0f9447eb7510047bf0f9916bd", "score": "0.5092287", "text": "def __next__(self):\n raise NotImplementedError()", "title": "" }, { "docid": "7248b835650dd95ffc5403205a7de260", "score": "0.5092085", "text": "def next(self):\n raise NotImplementedError", "title": "" }, { "docid": "c29341d7ccd048f08528b6ac4b82b946", "score": "0.5076727", "text": "def get_processor():\n return _active_processor", "title": "" }, { "docid": "a97f426dd682bdc73efbeb92b37065fa", "score": "0.5054247", "text": "def _process(self):\n return", "title": "" }, { "docid": "c883b8b487e992db3f44bd71794739df", "score": "0.50466424", "text": "def _yield(self):\n raise NotImplementedError", "title": "" }, { "docid": "9f223ae7ab82d2e277ce4734f6d8ddf9", "score": "0.50385624", "text": "def __add__(self, *args):\n return _pcbnew.SwigPyIterator___add__(self, *args)", "title": "" }, { "docid": "46164425c4ddead95bd9f562600941d2", "score": "0.5033553", "text": "def __next__(self):\n raise NotImplementedError(\"implement in subclass\")", "title": "" }, { "docid": "79900874293231e17d57dabf9e18dc2e", "score": "0.50325733", "text": "def __next__(self):\n return next(self.generator)", "title": "" }, { "docid": "e802070faf045c07130bb93e1da25b7f", "score": "0.5029876", "text": "def __next__(self):\n return self._next()", "title": "" }, { "docid": "11732417c29c99cf69c090e516f9506e", "score": "0.5023839", "text": "def next(*args, **kwargs):\n \n pass", "title": "" }, { "docid": "899c169adea68e34b2e644c155949d8a", "score": "0.5021767", "text": "def next(self):\n raise NotImplementedError(\"This is not implemented yet\")", "title": "" }, { "docid": "7a04d8cc4cc68220420f02314d9bb6f6", "score": "0.50122577", "text": "def _next():", "title": "" }, { "docid": "a911c079a27118d60b4b1b1694481230", "score": "0.50012296", "text": "def _push(self, item):\n if self._logging == 'output':\n self._write_log(item)\n\n # The _process attribute will be set to the appropriate callable\n # when initializing the pipeline. I do this because I want the\n # chaining to be as efficient as possible. If logging is not set,\n # I don't want to have to hit that logic every push, so I just\n # invoke a callable attribute at each process that has been set\n # to the appropriate callable.\n for downstream in self._downstream_nodes:\n downstream._process(item)", "title": "" }, { "docid": "3f972f6ed8a4ce9f3613298ef639e6b6", "score": "0.49975964", "text": "def next(self):\n pass", "title": "" }, { "docid": "3f972f6ed8a4ce9f3613298ef639e6b6", "score": "0.49975964", "text": "def next(self):\n pass", "title": "" }, { "docid": "3f972f6ed8a4ce9f3613298ef639e6b6", "score": "0.49975964", "text": "def next(self):\n pass", "title": "" }, { "docid": "3f972f6ed8a4ce9f3613298ef639e6b6", "score": "0.49975964", "text": "def next(self):\n pass", "title": "" }, { "docid": "7861390afaa38f6bfc4b2b87863de05e", "score": "0.49942434", "text": "def add(self, proc):\n self.set_update(self.wait_update)\n self.procs.append(proc)\n proc.cb_queue = self._cb_queue\n proc.done_queue = self._done_queue", "title": "" }, { "docid": "5609341e241d3626ae9985092813ef8e", "score": "0.49916098", "text": "def GetNextProcessor(self, name, attrs):\n\n if self._handled == HandledDepth.SINGLE_TAG:\n handled = HandledDepth.NOT_HANDLED\n else:\n handled = self._handled\n\n return BaseHandler(context=self.context, handled=handled)", "title": "" }, { "docid": "403ceeb5f448ee810a31fb3ae6706ad5", "score": "0.49886668", "text": "def next(self, *args):\n pass", "title": "" }, { "docid": "46f2f911de7f690e96ba88b6ff07dbd8", "score": "0.49844867", "text": "def next(self): # real signature unknown; restored from __doc__\n pass", "title": "" }, { "docid": "46f2f911de7f690e96ba88b6ff07dbd8", "score": "0.49844867", "text": "def next(self): # real signature unknown; restored from __doc__\n pass", "title": "" }, { "docid": "46f2f911de7f690e96ba88b6ff07dbd8", "score": "0.49844867", "text": "def next(self): # real signature unknown; restored from __doc__\n pass", "title": "" }, { "docid": "70235512fb99c9b8d5d406f51ebcfd6f", "score": "0.49750322", "text": "def processor(forType):\n MILLI = 1000\n try:\n processor = _processors[forType]\n except KeyError:\n def __init__(self, *a, **kw):\n item.Item.__init__(self, *a, **kw)\n self.store.powerUp(self, iaxiom.IBatchProcessor)\n\n attrs = {\n '__name__': 'Batch_' + forType.__name__,\n\n '__module__': forType.__module__,\n\n '__init__': __init__,\n\n '__repr__': lambda self: '<Batch of %s #%d>' % (reflect.qual(self.workUnitType), self.storeID),\n\n 'schemaVersion': 2,\n\n 'workUnitType': forType,\n\n 'scheduled': attributes.timestamp(doc=\"\"\"\n The next time at which this processor is scheduled to run.\n \"\"\", default=None),\n\n # MAGIC NUMBERS AREN'T THEY WONDERFUL?\n 'busyInterval': attributes.integer(doc=\"\", default=MILLI // 10),\n }\n _processors[forType] = processor = item.MetaItem(\n attrs['__name__'],\n (item.Item, _BatchProcessorMixin),\n attrs)\n\n registerUpgrader(\n upgradeProcessor1to2,\n _processors[forType].typeName,\n 1, 2)\n\n return processor", "title": "" }, { "docid": "f7ce849aebf3a9a5ddcac26bf6964f0f", "score": "0.49653915", "text": "def next(self, next):\n\n self._next = next", "title": "" }, { "docid": "f7ce849aebf3a9a5ddcac26bf6964f0f", "score": "0.49653915", "text": "def next(self, next):\n\n self._next = next", "title": "" }, { "docid": "f7ce849aebf3a9a5ddcac26bf6964f0f", "score": "0.49653915", "text": "def next(self, next):\n\n self._next = next", "title": "" }, { "docid": "6e5a1291b3b9a76d9d313673009792f2", "score": "0.49625993", "text": "def _connect_producer_to_consumers(self, processor):\n if not hasattr(processor, 'consumers'):\n e_msg = 'consumer-less processor: ' + repr(processor)\n detail = 'Did you invoke base.Processor\\'s __init__?'\n raise exceptions.ConfigurationError(e_msg, detail)\n for consumer, edge_data in self.processor_graph.consumers[processor].items():\n if edge_data['is_error_consumer']:\n processor.error_consumers.append(consumer)\n else:\n processor.consumers.append(consumer)", "title": "" }, { "docid": "9ceb545b257c31a97f5e123348e1132e", "score": "0.4955837", "text": "def __next__(self):\n\t\treturn self.next()", "title": "" }, { "docid": "bd3b69fcb8daa45ee9ac809b7a9242ba", "score": "0.49549976", "text": "def next(self):\n\n next_prime = self.sieve.next()\n self.sieve = _Sieve(next_prime, self.sieve)\n self.primes.append(next_prime)\n return next_prime", "title": "" }, { "docid": "b8272f41509fc1d50feaa73dfa540538", "score": "0.49496502", "text": "def preprocessors(self):\n return super().preprocessors + [HugoPreprocessor]", "title": "" }, { "docid": "cf72ac8d7996981c80ae6f0bc05f5744", "score": "0.49449846", "text": "def getNext(proc):\n\t\tnode = ProcTree.NODES[proc]\n\t\treturn [nextn.proc for nextn in node.next]", "title": "" }, { "docid": "253bc642e3ccd27935c69463d98f41d4", "score": "0.49301922", "text": "def register_post_processor(self, post_processor: \"IPostProcessor\") -> None:\n ...", "title": "" }, { "docid": "aff6ccfe6ce5bdad7a13ebccf295fe3f", "score": "0.49210003", "text": "def next_performer(lineup):", "title": "" }, { "docid": "bf49c6f00d299737969112ac947a9f4f", "score": "0.49039873", "text": "def process(self):\n pass", "title": "" }, { "docid": "bf49c6f00d299737969112ac947a9f4f", "score": "0.49039873", "text": "def process(self):\n pass", "title": "" }, { "docid": "bf49c6f00d299737969112ac947a9f4f", "score": "0.49039873", "text": "def process(self):\n pass", "title": "" }, { "docid": "bf49c6f00d299737969112ac947a9f4f", "score": "0.49039873", "text": "def process(self):\n pass", "title": "" }, { "docid": "3901eddf869ccae23395af16dce91cd0", "score": "0.4898005", "text": "def setProcessorsToUse(self, processorlist):\n self._checkArgs( {\n 'processorlist' : types.ListType\n } )\n self.processorlisttouse = processorlist", "title": "" }, { "docid": "68fa1ac40f7ed4d3eaeaad6060c600d6", "score": "0.48902753", "text": "def lookupProcessor(self, msg):", "title": "" }, { "docid": "34228ecb534b829016125b768f48bd93", "score": "0.48899725", "text": "def process(self) -> None:\n pass", "title": "" }, { "docid": "a0882775b4c9cd4274c6433d5dca60dd", "score": "0.48893142", "text": "def processor(self, processor_type):\n cache_entry = self._processor_cache.get((processor_type, self.world))\n\n if cache_entry is None:\n proc = self.world.get_processor(processor_type)\n if proc is not None:\n self._processor_cache[(processor_type, self.world)] = proc\n return proc\n\n return None\n\n return cache_entry", "title": "" }, { "docid": "83da0dcae291e75b27d9e48ba09214a1", "score": "0.48833093", "text": "def _next_action_hook(self, a_next):\n return a_next", "title": "" }, { "docid": "ea74f8704880ec88b8d2b71152b35d3b", "score": "0.48823616", "text": "def _make_processor(self, processor_configuration):\n assert not 'inline-pipeline' in processor_configuration, \"Unflattened configuration provided\"\n\n if 'existing' in processor_configuration:\n # Placeholder we'll replace when all processors have been made.\n return processor_configuration['existing']\n\n # We're going to modify the configuration, so don't modify the original.\n # Deep-copying with all the consumers is an expensive operation, so temporarily\n # remove them from the configuration:\n consumers = processor_configuration.pop('consumers', None)\n error_consumers = processor_configuration.pop('error_consumers', None)\n\n copied_processor_configuration = deepcopy(processor_configuration)\n\n # add the consumers and error_consumers back into the original configuration:\n if consumers is not None:\n processor_configuration['consumers'] = consumers\n if error_consumers is not None:\n processor_configuration['error_consumers'] = error_consumers\n\n # We pop away stuff because we want the remaining dictionary to define the\n # configuration values.\n plugin_name = copied_processor_configuration.pop('__processor__')\n processor_id = copied_processor_configuration.pop('id', None)\n\n plugin_factory = self._get_plugin_factory_or_fail(plugin_name)\n try:\n processor = plugin_factory(**self._ensure_configuration_keys_are_ascii(copied_processor_configuration))\n except TypeError, e:\n # If creating the processor blows up, the stacktrace isn't\n # particularly helpful, so put some effort in making a\n # helpful exception.\n self._explain_instantiation_typeerror(e, plugin_name, plugin_factory, copied_processor_configuration)\n\n processor.processor_configuration = copied_processor_configuration\n processor.id = processor_id\n\n return processor", "title": "" }, { "docid": "c8cfaa210beaea19a0cb4a1d50b8de2a", "score": "0.48794234", "text": "def next(self):\n\t\treturn self.__next__()", "title": "" }, { "docid": "68812a120432e779c036d61d5b6cc525", "score": "0.48742604", "text": "def process(self):\n raise NotImplementedError(\"override in a subclass\")", "title": "" }, { "docid": "5b207e9e753d5f365733aa32ce9761a7", "score": "0.48692426", "text": "def add_next_to_generator_class(self,\n fn_info: FuncInfo,\n fn_decl: FuncDecl,\n sig: FuncSignature) -> None:\n self.enter(fn_info)\n self_reg = self.read(self.add_self_to_env(fn_info.generator_class.ir))\n none_reg = self.none_object()\n\n # Call the helper function with error flags set to Py_None, and return that result.\n result = self.add(Call(fn_decl, [self_reg, none_reg, none_reg, none_reg, none_reg],\n fn_info.fitem.line))\n self.add(Return(result))\n blocks, env, _, fn_info = self.leave()\n\n sig = FuncSignature((RuntimeArg(SELF_NAME, object_rprimitive),), sig.ret_type)\n next_fn_decl = FuncDecl('__next__', fn_info.generator_class.ir.name, self.module_name, sig)\n next_fn_ir = FuncIR(next_fn_decl, blocks, env)\n fn_info.generator_class.ir.methods['__next__'] = next_fn_ir\n self.functions.append(next_fn_ir)", "title": "" }, { "docid": "b6be7e9f178fe3ea825548a58570e399", "score": "0.4864786", "text": "def __next__(self):\n # load a new page if needed\n if self.page is None or self.index >= len(self.page):\n self.index = 0\n self.page = self.load_next_page()\n self.pagenum += 1\n\n # if we have run out of results...\n if self.page is None or len(self.page) == 0:\n raise StopIteration\n\n # pull the next item from the current page\n item = self.page[self.index]\n\n # setup for the next call\n self.index += 1\n\n return item", "title": "" }, { "docid": "10534975c837b867c47060c10db593d7", "score": "0.4862352", "text": "def test_get_processor_handler(self):\n pass", "title": "" }, { "docid": "62f3ada61acc64fc540ac59dede6b829", "score": "0.485444", "text": "def process(self):\n\n pass", "title": "" }, { "docid": "a071a60ce95b672dee1c7b45450525ff", "score": "0.4852628", "text": "def dispatch_next_process(self):\n # Starts the first two processes on the stack\n if len(self.run_stack) > 1:\n self.run_stack[-1].event.set()\n self.run_stack[-2].event.set()\n if len(self.run_stack) > 0:\n self.run_stack[-1].event.set()", "title": "" }, { "docid": "cafaf78eea6dd9055840e02de747af29", "score": "0.4846471", "text": "def _process(self):", "title": "" } ]
a5b7cd94ad797db3374c86c3d84dd89e
Loads the dbt manifest.
[ { "docid": "218f3c12ef21f4e5270383042bd1e288", "score": "0.6806921", "text": "def dbt_manifest(self):\n # Identity function used for macro hooks\n def identity(x):\n return x\n\n # Set dbt not to run tracking. We don't load\n # a dull project and so some tracking routines\n # may fail.\n from dbt.tracking import do_not_track\n\n do_not_track()\n\n if \"0.17\" in self.dbt_version:\n from dbt.parser.manifest import (\n load_internal_manifest as load_macro_manifest,\n load_manifest,\n )\n else:\n from dbt.parser.manifest import (\n load_macro_manifest,\n load_manifest,\n )\n\n load_macro_manifest = partial(load_macro_manifest, macro_hook=identity)\n\n dbt_macros_manifest = load_macro_manifest(self.dbt_config)\n self.dbt_manifest = load_manifest(\n self.dbt_config, dbt_macros_manifest, macro_hook=identity\n )\n return self.dbt_manifest", "title": "" } ]
[ { "docid": "f7d9e68a6289002b2b30bb5e1dce5a7e", "score": "0.6945915", "text": "def read_manifest(self): # -> None:\n ...", "title": "" }, { "docid": "99a973b3f287f2f28ff302065825d6f6", "score": "0.66541666", "text": "def load_manifest(path: Path):\n with open(path, \"rt\") as fin:\n data = json_load(fin)\n return Manifest.schema().load(data, many=True)", "title": "" }, { "docid": "a55e8b437fdeeaeca62d7c70fc12a444", "score": "0.6306055", "text": "def _load_manifest(self, filename: Optional[str] = None) -> Dict[str, str]:\n filename = filename or self.manifest_filename\n if not os.path.isfile(filename):\n self.log.debug(f\"Manifest file '{filename}' doesn't exist and will be created.\")\n return {}\n with open(filename, \"r\") as f:\n manifest: Dict[str, str] = json.load(f)\n self.log.debug(f\"Reading manifest '{manifest}' from file '{filename}'\")\n return manifest", "title": "" }, { "docid": "00a528a9695619693b05f27772004799", "score": "0.6271859", "text": "def load_manifest(filename):\n\n data = manifest.load(filename)\n for field in manifest.validate(data):\n name = field.cfg or ''\n if name and name[-1] != '.':\n name += '>'\n name += field.name\n for msg in field.warnings:\n print('WARNING: {}@{} {}'.format(filename, name, msg))\n for msg in field.errors:\n print('CRITICAL: {}@{} {}'.format(filename, name, msg))\n return data", "title": "" }, { "docid": "20ab3df3ae8f83c433e556ca8eb3c21f", "score": "0.61065125", "text": "def _loadManifest(self, pkg):\r\n if pkg in self._packages:\r\n return\r\n\r\n sys.path = self._generatePythonPath(pkg) + sys.path", "title": "" }, { "docid": "7106b78ba67fe84af9a50aa458a9b17d", "score": "0.6054963", "text": "def load_model_manifest(rel_path=\"model_manifest.json\"):\n manifest = []\n manifest_path = \"{}/{}\".format(Path(__file__).parents[1], rel_path)\n if path.exists(manifest_path):\n with open(manifest_path) as json_file:\n manifest = json.load(json_file)\n return manifest", "title": "" }, { "docid": "7b0837b21bdc482f2140082be43eb954", "score": "0.601994", "text": "def get_manifest(self):\r\n if os.path.exists(self.manifestfile):\r\n return Manifest(json.loads(file(self.manifestfile).read()))\r\n return Manifest({})", "title": "" }, { "docid": "0fe327ea9565fa552d000271058251f6", "score": "0.5973812", "text": "def manifest(ctx, config): # use as `schematic manifest ...`\n try:\n logger.debug(f\"Loading config file contents in '{config}'\")\n ctx.obj = CONFIG.load_config(config)\n except ValueError as e:\n logger.error(\"'--config' not provided or environment variable not set.\")\n logger.exception(e)\n sys.exit(1)", "title": "" }, { "docid": "9d91603a30630396d778931d512ab0b8", "score": "0.59669316", "text": "def manifest():\n return setup((), _manifest=1)", "title": "" }, { "docid": "604edbfa197a02bc70afdd197e5bb7d0", "score": "0.58998394", "text": "def fetch_manifest(self):\n manifest = self.open(self.urls['manifest'])\n return manifest.read()", "title": "" }, { "docid": "7d158e9ac3eab40a72870cb9bcd49d5c", "score": "0.5807419", "text": "def readManifestEntries(context):\n return GenericMetadata._readEntriesForSection(context.projectDir, GenericMetadata.MANIFEST_SECTION)", "title": "" }, { "docid": "cf7e2d23e65e75443297860439f5b6bb", "score": "0.5764459", "text": "def parse_manifest(manifest_path):\n with open(manifest_path, 'r') as f:\n data = f.read()\n if data:\n return json.loads(data)\n else:\n return {}", "title": "" }, { "docid": "2e3f5340a45f0b014482c07a83c3295c", "score": "0.57098615", "text": "def _load(self):\n with qisys.sh.TempDir() as work_dir:\n pkg = portage.xpak.tbz2(self.path)\n pkg.decompose(work_dir, cleanup=0)\n arch, arch_variant = _get_pkg_arch(work_dir)\n with open(os.path.join(work_dir, 'PF'), 'r') as fpf:\n pf = fpf.readline().strip()\n name, version, revision = portage.versions.pkgsplit(pf)\n dependency = dict()\n for dep, dep_filename in _DEPENDENCY.items():\n dep_path = os.path.join(work_dir, dep_filename)\n if not os.path.exists(dep_path):\n dependency[dep] = list()\n continue\n with open(dep_path, 'r') as fdep:\n dependency[dep] = fdep.read().strip().split()\n dependency['all'] = list()\n for dep_list in _DEPENDENCY:\n dependency['all'].extend(dependency[dep_list])\n for dep, dep_list in dependency.items():\n dependency[dep] = list(set(dep_list))\n metadata = {\n 'name': name,\n 'version': version,\n 'revision': revision,\n 'arch': arch,\n 'arch_variant': arch_variant,\n 'dependencies': dependency,\n }\n self.metadata = metadata", "title": "" }, { "docid": "6db07c5064499b68d5189f81035a1566", "score": "0.5703096", "text": "def plugin_manifest():\n\n # XXX: note, this doesn't get called. For an example of this working,\n # see the mockplugin unit test.\n\n filepath = importlib.resources.files(__package__) / \"plugin_manifest.json\"\n return manifest.manifest_from_string(\n filepath.read_text()\n )", "title": "" }, { "docid": "7e4533fc64105a820b862ea9cdb02fd6", "score": "0.56927377", "text": "def load_manifest(manifest_path, use_relative_manifest_path=False):\n if use_relative_manifest_path:\n extra_path = os.path.dirname(manifest_path)\n manifest = []\n sample_names = set()\n with open(manifest_path, \"rt\") as manifest_file:\n for line in manifest_file:\n if line.startswith('#'):\n continue\n line = line.rstrip()\n element = ManifestElement()\n element.load(line)\n\n if use_relative_manifest_path:\n element.append_to_path(extra_path)\n elif not os.path.exists(element.path):\n # check paths relative to manifest\n element.append_to_path(os.path.dirname(\n os.path.abspath(manifest_path)))\n\n # check if file exists\n if not os.path.exists(element.path):\n raise FileNotFoundError(\n errno.ENOENT, os.strerror(errno.ENOENT), element.path)\n\n # duplicate sample names are bad because we name folders after them\n if element.name in sample_names:\n raise Exception(\n \"Duplicate sample name in manifest: %s\" % element.name)\n sample_names.add(element.name)\n\n manifest.append(element)\n return manifest", "title": "" }, { "docid": "ae9cb021e1682f5997b4f51b3a1b1371", "score": "0.5653137", "text": "def _read_manifest_json(self):\n with open(os.path.join(self._crx_dir, \"manifest.json\")) as manifest:\n return json.load(manifest)", "title": "" }, { "docid": "1ccb117ec29f6283c5057d9bdf655118", "score": "0.5535802", "text": "def load_app_manifests(self):\n self.app_manifests = []\n apps_lib_path = os.path.join(self.apps_dir_path, \"lib\")\n for app_dir in os.listdir(apps_lib_path):\n if app_dir not in (\"__init__.py\", \"__init__.pyc\"):\n if app_dir.find(\"_v\") > 1:\n app_name = app_dir[:app_dir.find(\"_v\")]\n self.app_manifests.append(json.load(file(os.path.join(self.apps_dir_path, 'lib', app_dir, \"manifest.json\"))))\n log.info(\"Manifest for %s app was loaded\" % (app_dir))\n else:\n log.info(\"Directory %s will be skipped from app loader . Doesn't match naming convention .\" % app_dir)", "title": "" }, { "docid": "7d26c3828209e8535da49a32a2920b7e", "score": "0.5523236", "text": "def get_manifest(self):\n return self.manifest", "title": "" }, { "docid": "adfc7abb294e33ba47c8708ec3767959", "score": "0.5494579", "text": "def load_manifest(self, *, user: str, identity_file: str):\n if not self.master_ip:\n return\n\n master_ssh_client = get_ssh_client(\n user=user,\n host=self.master_ip,\n identity_file=identity_file,\n wait=True,\n print_status=False)\n\n with master_ssh_client:\n manifest_raw = ssh_check_output(\n client=master_ssh_client,\n command=\"\"\"\n cat \"$HOME/.flintrock-manifest.json\"\n \"\"\")\n # TODO: Would it be better if storage (ephemeral and otherwise) was\n # implemented as a Flintrock service and tracked in the manifest?\n ephemeral_dirs_raw = ssh_check_output(\n client=master_ssh_client,\n # It's generally safer to avoid using ls:\n # http://mywiki.wooledge.org/ParsingLs\n command=\"\"\"\n shopt -s nullglob\n for f in /media/ephemeral*; do\n echo \"$f\"\n done\n \"\"\")\n\n manifest = json.loads(manifest_raw)\n\n self.ssh_key_pair = SSHKeyPair(\n public=manifest['ssh_key_pair']['public'],\n private=manifest['ssh_key_pair']['private'])\n\n services = []\n for [service_name, manifest] in manifest['services']:\n # TODO: Expose the classes being used here.\n service = globals()[service_name](**manifest)\n services.append(service)\n self.services = services\n\n storage_dirs = StorageDirs(\n root='/media/root',\n ephemeral=sorted(ephemeral_dirs_raw.splitlines()),\n persistent=None)\n self.storage_dirs = storage_dirs", "title": "" }, { "docid": "7a6bd0e807055cc5264bd9bd0d41cead", "score": "0.5488276", "text": "def manifest_from_file(file_name):\n in_file = open(file_name, 'rb')\n try:\n return read_file_manifest(in_file)\n finally:\n in_file.close()", "title": "" }, { "docid": "50b1436d4dad2dae6c6bfcbad316a521", "score": "0.53888357", "text": "def parse_manifest(location):\n mode = 'r'\n with open(location, mode) as manifest:\n return parse_manifest_data(manifest.read())", "title": "" }, { "docid": "655118e9ba3dbba3270944ecf8a5725b", "score": "0.53654754", "text": "def read_manifest(manifest_fn):\n with open(manifest_fn, 'r') as csvfile:\n reader = csv.DictReader(csvfile, delimiter=';')\n dicts = list(reader)\n return dicts", "title": "" }, { "docid": "1ce7638a5cd096b7433f1ade4ed970fa", "score": "0.5360083", "text": "def extract_manifest(path, resource_name):\n with LoadLibrary(path) as handle:\n try:\n return win32api.LoadResource(handle, RT_MANIFEST, resource_name)\n except pywintypes.error as error:\n if error.args[0] == winerror.ERROR_RESOURCE_DATA_NOT_FOUND:\n return None\n else:\n raise", "title": "" }, { "docid": "1b0d63051ae40298e14d59e8684bcb16", "score": "0.53290707", "text": "def _set_manifest(self, manifest: Dict) -> None:\n if \"metadata\" not in manifest:\n manifest[\"metadata\"] = {}\n\n if \"files\" not in manifest:\n manifest[\"files\"] = {\n \"includes\": [],\n \"excludes\": [],\n }\n\n with open(self._manifest_path, \"w\", encoding=\"utf-8\") as file:\n # TODO: Exception handling\n self._yaml.dump(manifest, file)", "title": "" }, { "docid": "c327e2f4872c12c438d696fd42567dbe", "score": "0.53205585", "text": "def _buildmanifest(self):\n\n man = self._parents[0].manifest().copy()\n copied = self._repo.dirstate.copies()\n is_exec = util.execfunc(self._repo.root,\n lambda p: man.execf(copied.get(p,p)))\n is_link = util.linkfunc(self._repo.root,\n lambda p: man.linkf(copied.get(p,p)))\n modified, added, removed, deleted, unknown = self._status[:5]\n for i, l in ((\"a\", added), (\"m\", modified), (\"u\", unknown)):\n for f in l:\n man[f] = man.get(copied.get(f, f), nullid) + i\n try:\n man.set(f, is_exec(f), is_link(f))\n except OSError:\n pass\n\n for f in deleted + removed:\n if f in man:\n del man[f]\n\n self._manifest = man", "title": "" }, { "docid": "779a95c5259a1319f645df743eaa83a2", "score": "0.53105235", "text": "def manifest_dict(self):\n return self._parsed", "title": "" }, { "docid": "779a95c5259a1319f645df743eaa83a2", "score": "0.53105235", "text": "def manifest_dict(self):\n return self._parsed", "title": "" }, { "docid": "01a74a8067cf96208e5a6f84699f39a2", "score": "0.53061396", "text": "def require_manifest(func):\n\n @functools.wraps(func)\n def wrapper(*args, **kwargs):\n # Assume the manifest is in the current directory\n try:\n # If we are in a repository, we want to look in\n # the root of that repository for the manifest\n current_repo = vcs_git.RepoTool(Path.cwd(), search_parent=True)\n root_path = current_repo.get_root_path()\n except vcs_git.InvalidRepository:\n # Since we are not in a repository we will look\n # for the manifest in the current directory\n root_path = Path.cwd()\n\n manifest_path = root_path / manifest.MANIFEST_NAME\n\n try:\n loaded_manifest = manifest.load_manifest(manifest_path)\n return func(loaded_manifest, root_path, *args, **kwargs)\n except manifest.NotFound:\n ui.error(f\"Unable to load manifest: Not found: {str(manifest_path)}\")\n sys.exit(1)\n except manifest.ValidationFailed as exc:\n ui.error(f\"Unable to load manifest: Validation failed\")\n ui.error(str(exc))\n sys.exit(1)\n\n return wrapper", "title": "" }, { "docid": "64e5d397d57a697e741ab40fd11081cc", "score": "0.52327967", "text": "def load(config, args):\n if not confirm(\"WARNING: This isn't considered production ready just yet. Continue?\"):\n return\n if not args.no_backup:\n timestamp = args.project.dump(args.node)\n print \"Pre-load backup: %s\" % args.project.dump_path(timestamp)\n args.project.load(args.node, args.timestamp)\n print \"Service data from %s loaded at %s\" % (args.timestamp, \n args.node.hostname)", "title": "" }, { "docid": "338e16703a7a9f83dfbb260699665fc1", "score": "0.52210414", "text": "def build_manifest(self):\n return self._build_manifest", "title": "" }, { "docid": "46ae0dc6c22bbc4256e6479eea8aee5a", "score": "0.51629394", "text": "def check_manifest(manifest):\n if not manifest:\n raise Exception('manifest is null')\n\n for key in ['dublin_core', 'checking', 'projects']:\n if key not in manifest:\n raise Exception('manifest missing key \"{0}\"'.format(key))\n\n # check checking\n for key in ['checking_entity', 'checking_level']:\n if key not in manifest['checking']:\n raise Exception('manifest missing checking key \"{0}\"'.format(key))\n\n if not isinstance(manifest['checking']['checking_entity'], list):\n raise Exception('manifest key checking.checking_entity must be an array')\n\n # check projects\n if not isinstance(manifest['projects'], list):\n raise Exception('manifest key projects must be an array')\n\n for key in ['categories', 'identifier', 'path', 'sort', 'title', 'versification']:\n for project in manifest['projects']:\n if key not in project:\n raise Exception('manifest missing project key \"{0}\"'.format(key))\n\n # check dublin_core\n for key in ['conformsto', 'contributor', 'creator', 'description', 'format', 'identifier', 'issued', 'language',\n 'modified', 'publisher', 'relation', 'rights', 'source', 'subject', 'title', 'type', 'version']:\n if key not in manifest['dublin_core']:\n raise Exception('manifest missing dublin_core key \"{0}\"'.format(key))\n\n expectedRCVersion = 'rc0.2'\n if manifest['dublin_core']['conformsto'].lower() != expectedRCVersion:\n raise Exception('unsupported rc version {}. Expected {}'.format(manifest['dublin_core']['conformsto'], expectedRCVersion))\n\n for key in ['direction', 'identifier', 'title']:\n if key not in manifest['dublin_core']['language']:\n raise Exception('manifest missing dublin_core.language key \"{0}\"'.format(key))\n\n if not isinstance(manifest['dublin_core']['source'], list):\n raise Exception('manifest key dublin_core.source must be an array')\n\n for key in ['version', 'identifier', 'language']:\n for source in manifest['dublin_core']['source']:\n if key not in source:\n raise Exception('manifest missing dublin_core.source key \"{0}\"'.format(key))", "title": "" }, { "docid": "ce53a48bd5b93f86a7a69842682cb68d", "score": "0.5162862", "text": "def test_talbot_manifest_fetch():\n request, response = app.test_client.get(\"/iiif/manifest/452d6b51-949c-447d-9880-1108ffdfd96e.json\")\n assert response.status == 200", "title": "" }, { "docid": "9e902dad1ecdaea377fed483c2c9b74c", "score": "0.516229", "text": "def manifest(self) -> Optional[pulumi.Input[Mapping[str, Any]]]:\n return pulumi.get(self, \"manifest\")", "title": "" }, { "docid": "3f078b6b178c5d0e5bc424b6ec886675", "score": "0.5146479", "text": "def load(filename: Path) -> Optional[List[\"Downloadable\"]]:\n try:\n manifest = Manifest.load_manifest(filename)\n return Manifest.create_object_list(manifest)\n except FileNotFoundError as ex:\n logger.critical(f\"Error file not found: {ex.filename}\")\n except JSONDecodeError as ex:\n logger.critical(f\"format of manifest file is valid JSON: {ex.msg}\")\n\n return None", "title": "" }, { "docid": "6c7e411c2c15bce45e103811676aa800", "score": "0.5138625", "text": "def print_manifest_contents(jar_file_path):\n print(get_manifest_contents(jar_file_path))", "title": "" }, { "docid": "539856f10ee7afec85f51eb6bb5cac7b", "score": "0.5104779", "text": "def load_manifest_config(self, repository, revision):\n commit_log = self.m.gitiles.commit_log(\n repository, revision, step_name='Fetch manifest config',\n attempts=self._GITILES_ATTEMPTS)\n result = self.m.step.active_result\n\n # Handle missing/invalid response.\n if not (commit_log and commit_log.get('message')):\n self.m.python.failing_step('Fetch manifest config failure',\n 'Failed to fetch manifest config.')\n\n build_id = None\n loaded = []\n for line in reversed(commit_log['message'].splitlines()):\n # Automatic command?\n match = self._MANIFEST_CMD_RE.match(line)\n if match:\n self.c.chromite_branch = match.group(2)\n loaded.append('Chromite branch: %s' % (self.c.chromite_branch,))\n continue\n\n # Build ID?\n match = self._BUILD_ID_RE.match(line)\n if match:\n self.c.cbb.build_id = match.group(1)\n continue\n if loaded:\n loaded.insert(0, '')\n result.presentation.step_text += '<br/>'.join(loaded)", "title": "" }, { "docid": "70ff872d42bb02f2d93a3b7e8712e6fc", "score": "0.5101483", "text": "def parse_data(self):\n\n try:\n if self.is_bytes:\n self.data = etree.XML(self.manifest)\n else:\n with open(self.manifest) as fh:\n self.data = etree.XML(fh.read().encode())\n except etree.XMLSyntaxError:\n raise InvalidManifest('Not an XML file')\n\n self.tree = etree.ElementTree(self.data)\n\n self.find_remotes()\n self.find_defaults()\n self.find_projects()\n\n return self.generate_manifest_dict()", "title": "" }, { "docid": "c662582eda06e2c813e146a76349d4e4", "score": "0.5074428", "text": "def get_manifest(self):\n url = f'samples/{self.uuid}/manifest'\n return self.knex.get(url)", "title": "" }, { "docid": "fa9f1c7ac1c1cf13b90bb3ca1efa6177", "score": "0.505936", "text": "def test_incorrect_dependency(self):\n load_manifest(StringIO(manifest_incorrect_dependency))", "title": "" }, { "docid": "90c562ccc1e580b741c20fdb9872524c", "score": "0.5049897", "text": "def get_manifestfile(self):\r\n if self.__manifest_file is None:\r\n return os.path.join(self.cloudletdir, \"manifest\")\r\n return self.__manifest_file", "title": "" }, { "docid": "179ceba762ccea795c47f14819cba941", "score": "0.50358003", "text": "def readManifest(fn):\n fp = open(fn)\n manifest = []\n for line in fp.readlines():\n work = line.strip()\n if work.startswith(\"#\"): continue\n if not work: continue\n if work.startswith('\"'):\n source = work.split('\"')[1]\n work = work[len(source)+2:].strip()\n else:\n source = work.split()[0]\n work = work[len(source):].strip()\n pass #end-else\n if not work or work.startswith(\"#\"):\n target = None\n elif work.startswith('\"'):\n target = work.split('\"')[1]\n work = work[len(target)+2:].strip()\n else:\n target = work.split()[0]\n work = work[len(target):].strip()\n pass #end-if\n if not work.startswith(\"#\") and work:\n raise Exception(\"Bad format line %s\" % line.strip())\n manifest.append((source,target))\n pass #end-for\n return manifest", "title": "" }, { "docid": "b3847de5ff492d4c3321c741f0ee3fd0", "score": "0.50304276", "text": "def manifest(self) -> pulumi.Output[Optional[Mapping[str, Any]]]:\n return pulumi.get(self, \"manifest\")", "title": "" }, { "docid": "b528a893ada959204fd0cdc9a92c02b9", "score": "0.49886143", "text": "def load(description: Dict, index: Index) -> Manifest:\n\n sources = description.get(\"sources\", {})\n pipelines = description.get(\"pipelines\", [])\n\n manifest = Manifest()\n source_refs = set()\n\n # load the sources\n for name, desc in sources.items():\n info = index.get_module_info(\"Source\", name)\n items = desc.get(\"items\", {})\n options = desc.get(\"options\", {})\n manifest.add_source(info, items, options)\n source_refs.update(items.keys())\n\n for desc in pipelines:\n load_pipeline(desc, index, manifest, source_refs)\n\n # The \"runner\" property in the manifest format is the\n # runner to the run the pipeline with. In osbuild the\n # \"runner\" property belongs to the \"build\" pipeline,\n # i.e. is what runner to use for it. This we have to\n # go through the pipelines and fix things up\n pipelines = manifest.pipelines.values()\n\n host_runner = Runner(index.detect_host_runner())\n runners = {\n pl.id: pl.runner for pl in pipelines\n }\n\n for pipeline in pipelines:\n if not pipeline.build:\n pipeline.runner = host_runner\n continue\n\n runner = runners[pipeline.build]\n pipeline.runner = runner\n\n return manifest", "title": "" }, { "docid": "fd855874ea863136d944aa1c11523ccc", "score": "0.49822238", "text": "def manifest_file(self):\n return self._manifest_file", "title": "" }, { "docid": "af5fa5b4cc493e43c7266c31c836630f", "score": "0.4977194", "text": "def load_from_disk(cls, path: Path) -> DirManifest:\n package = DirManifest()\n\n package.path = path\n packagepathstr = str(path)\n paths: List[str] = []\n\n # Simply return empty manifests if the given path isn't a dir.\n # (the server may intend to create it and is just asking what's\n # there already)\n if path.is_dir():\n # Build the full list of package-relative paths.\n for basename, _dirnames, filenames in os.walk(path):\n for filename in filenames:\n fullname = os.path.join(basename, filename)\n assert fullname.startswith(packagepathstr)\n paths.append(fullname[len(packagepathstr) + 1:])\n\n import hashlib\n from concurrent.futures import ThreadPoolExecutor\n from multiprocessing import cpu_count\n\n def _get_file_info(filepath: str) -> Tuple[str, DirManifestFile]:\n sha = hashlib.sha256()\n fullfilepath = os.path.join(packagepathstr, filepath)\n if not os.path.isfile(fullfilepath):\n raise Exception(f'File not found: \"{fullfilepath}\"')\n with open(fullfilepath, 'rb') as infile:\n filebytes = infile.read()\n filesize = len(filebytes)\n sha.update(filebytes)\n return (filepath,\n DirManifestFile(filehash=sha.hexdigest(),\n filesize=filesize))\n\n # Now use all procs to hash the files efficiently.\n with ThreadPoolExecutor(max_workers=cpu_count()) as executor:\n package.files = dict(executor.map(_get_file_info, paths))\n\n return package", "title": "" }, { "docid": "ef1f539a2a3453b3a22fd53677e55abd", "score": "0.49754578", "text": "def read_manifest(manifest_path, max_duration=float('inf'), min_duration=0.0):\n manifest = []\n for json_line in codecs.open(manifest_path, 'r', 'utf-8'):\n try:\n json_data = json.loads(json_line)\n except Exception as e:\n raise IOError(\"Error reading manifest: %s\" % str(e))\n if (json_data[\"duration\"] <= max_duration and\n json_data[\"duration\"] >= min_duration):\n manifest.append(json_data)\n return manifest", "title": "" }, { "docid": "8f63ce71ff8f5d666f99eab60b12c48c", "score": "0.49737337", "text": "def description(path='election-manifest.json'):\n from electionguard.manifest import Manifest\n with open(path, 'r') as manifest:\n string_representation = manifest.read()\n return Manifest.from_json(string_representation)", "title": "" }, { "docid": "26d3f8774fc17313160112f9ca94151c", "score": "0.49673423", "text": "def fact():\n manifests = [x for x in os.walk(manifests_dir)]\n\n return { 'manifests': manifests }", "title": "" }, { "docid": "bf25d8a4d47663a062d11dd5bc932a57", "score": "0.4966431", "text": "def __init__(self, settings, load = True):\n\t\tself.version = 2.0\n\t\tself.data = {'@meta':{'version': 0}}# Default to no version, which will be converted.\n\t\tself.file = stringutil.normalize_file(settings.save_base() + '/Manifest.json.gz')\n\t\tself._completed = []\n\t\tself._failed = []\n\t\tif load and os.path.isfile(self.file): #!cover\n\t\t\ttry:\n\t\t\t\twith gzip.GzipFile(self.file, 'rb') as data_file:\n\t\t\t\t\tself.data = json.loads(data_file.read().decode('utf8'))\n\t\t\texcept:\n\t\t\t\tstringutil.error('Failed to load Manifest at [%s]. Probably corrupt. Try removing the file.' % self.file)\n\t\t\t\traise\n\t\tchange, self.data = self._adapt(self.data)\n\t\twhile change:\n\t\t\tchange, self.data = self._adapt(self.data)\n\t\t#\n\t\tassert 'elements' in self.data\n\t\tassert 'completed' in self.data['elements']\n\t\tassert 'failed' in self.data['elements']\n\t\tself.og_count = len(self.data['elements']['completed']+ self.data['elements']['failed'])", "title": "" }, { "docid": "39b9c8fa3c899df31acd93c613f2433b", "score": "0.4961703", "text": "def get_manifest_and_flags(self, manifest_id):\n if manifest_id == mercurial.node.nullid:\n return {}, {}\n revid = self._lookup_revision_by_manifest_id(manifest_id)\n return self.get_manifest_and_flags_by_revid(revid)", "title": "" }, { "docid": "15d3fc219880ae9aab1996153f0188ac", "score": "0.49540278", "text": "def get_manifest(path: str):\n base_url = urlparse(path.strip(\"/\"))\n if base_url.scheme != \"s3\":\n raise click.UsageError(\n f\"URL scheme should be s3, but received {base_url.geturl()}\"\n )\n\n s3 = boto3.resource(\"s3\")\n manifest_filenames = [\"lecida__manifest.yml\", \"manifest.yml\"]\n\n def read_s3(base_url: ParseResult, filename: str) -> Optional[bytes]:\n try:\n obj = s3.Object(\n bucket_name=base_url.netloc,\n key=base_url.path.strip(\"/\") + f\"/{filename}\"\n )\n return obj.get()['Body'].read()\n except ClientError as e:\n # Only allow NoSuchKey errors, blow up on any other errors\n if e.response['Error']['Code'] == 'NoSuchKey':\n return None\n raise e\n\n body: Optional[bytes] = None\n for mf in manifest_filenames:\n body = read_s3(base_url, mf)\n if body is not None:\n break\n if body is None:\n raise click.ClickException(\n f\"Can't find any manifest files ({manifest_filenames}) in {path}\"\n )\n\n click.secho(\n f\"Found manifest in {base_url.geturl()}/{mf}\", fg='green', err=True\n )\n click.echo(body.decode(\"utf-8\"))", "title": "" }, { "docid": "0c7388d807a35e9ab26a1facbde162d1", "score": "0.488468", "text": "def test_invalid_manifest_filepath(self):\n load_manifest(\"./ehiiehaiehnatheita\")", "title": "" }, { "docid": "ef160d4aa653d8ff7d32935e7e93e39f", "score": "0.48777252", "text": "def testGetManifest(self):\n manifest = self.dl_object._GetManifest()\n self.assertEqual(manifest.get('mediaType'),\n 'application/vnd.docker.distribution.manifest.v2+json')\n self.assertIn('layers', manifest)", "title": "" }, { "docid": "521b011e65109d6e46679e3ecc7be141", "score": "0.48673728", "text": "def read_manifest_xml(cls, document):\n manifest = []\n with zipfile.ZipFile(document, 'a') as open_document:\n for line in open_document.open(DOCUMENT_MANIFEST_PATH):\n manifest.append(line.decode('utf-8'))\n return manifest", "title": "" }, { "docid": "cda8f03505a34525b3b7f5fd2172ee2d", "score": "0.4864306", "text": "def get_manifest_info(manifest_xml):\n\n manifest_info = mf_parse.Manifest(manifest_xml, is_bytes=True)\n manifest_data = manifest_info.parse_data()\n\n return mf_info.ManifestInfo(manifest_data)", "title": "" }, { "docid": "3eff92cb969b3e149b8780d060a16d8d", "score": "0.48551023", "text": "def log_manifest(self):\n self._audit_log.log_json(\"manifest\", self.get_manifest_json())", "title": "" }, { "docid": "679bdaadf36b807251d2a887342ce2d1", "score": "0.4849527", "text": "def parse_manifest_data(manifest):\n # normalize line endings then split each section: they are separated by two LF\n lines = '\\n'.join(manifest.splitlines(False))\n sections = re.split('\\n\\n+', lines)\n return [parse_section(s) for s in sections]", "title": "" }, { "docid": "74f60b5a07651f651a7c49ce63faffcd", "score": "0.48401114", "text": "def manifestContent( self, pars, directory ):\n\n return None", "title": "" }, { "docid": "5d70564a1b2f0bb0c5e47a1113cfb48d", "score": "0.48191658", "text": "def extract_storage_manifest(storage, manifest_path='manifest.xml'):\n\n try:\n raw_manifest = storage.file(manifest_path)\n except PathNotFoundError:\n raise ValueError\n return parse_manifest(raw_manifest)", "title": "" }, { "docid": "b08d3e7f70d1c6ada6ad899818ab8518", "score": "0.4815947", "text": "def get_manifest_raw(args):\n # If we're given a path to a manifest file, use it\n if os.path.exists(args.manifest_location):\n manifest_fn = args.manifest_location\n log(\"INFO\", \"Using manifest file at location: {}\".format(manifest_fn))\n with open(manifest_fn, 'r') as manifest_file:\n manifest_raw = manifest_file.read()\n # Otherwise, use the CMake Magic manifest\n else:\n manifest_raw = _MANIFEST_CONTENTS\n log(\"TRACE\", \"Raw manifest contents: {}\".format(manifest_raw))\n return manifest_raw", "title": "" }, { "docid": "66ad0744c0442b2c711141e707bca14b", "score": "0.48058933", "text": "def get_manifest_contents(jar_file_path):\n _is_valid_jar_file(jar_file_path)\n manifest_file_contents = _get_manifest_file_contents(jar_file_path)\n return _format_attributes(manifest_file_contents)", "title": "" }, { "docid": "afb6a033b15a1e2f52a9d2f4201e2415", "score": "0.47987786", "text": "def update_manifest(self, filename: Optional[str] = None, manifest: Optional[Dict[str, str]] = None) -> None:\n filename = filename or self.manifest_filename\n manifest = manifest or {}\n self.log.debug(f\"Updating manifest '{manifest}' to file '{filename}'\")\n with open(filename, \"w\") as f:\n json.dump(manifest, f, indent=2)", "title": "" }, { "docid": "cdfb81321b5baba6eb9068205bd7d81f", "score": "0.47932842", "text": "def test_load_manifest_no_inheritance(self):\n temp_directory = tempfile.mkdtemp()\n parent_file_path = os.path.join(temp_directory, \"parent.cfg\")\n child_file_path = os.path.join(temp_directory, \"child.cfg\")\n with open(parent_file_path, \"w\") as fh:\n fh.write(parent_manifest)\n\n with open(child_file_path, \"w\") as fh:\n fh.write(child_manifest.format(parent_file_path))\n\n manifest = load_manifest(child_file_path, do_inherit=False)\n\n assert not manifest.has_option(\"config\", \"namespace\")", "title": "" }, { "docid": "f5beb29111b6e52c53c5cb75911a78db", "score": "0.47867966", "text": "def _ProcessManifest(manifest_path):\n doc = minidom.parse(manifest_path)\n manifests = doc.getElementsByTagName('manifest')\n assert len(manifests) == 1\n manifest = manifests[0]\n package = manifest.getAttribute('package')\n\n manifest.setAttribute('xmlns:%s' % _TOOLS_NAMESPACE_PREFIX, _TOOLS_NAMESPACE)\n\n tmp_prefix = os.path.basename(manifest_path)\n with tempfile.NamedTemporaryFile(prefix=tmp_prefix) as patched_manifest:\n doc.writexml(patched_manifest)\n patched_manifest.flush()\n yield patched_manifest.name, package", "title": "" }, { "docid": "ee93a15988741f0c421c04a837f4eb6e", "score": "0.47565392", "text": "def test_CreatePackageManifest(tempdir: pathlib.Path):\n (tempdir / \"a\").touch()\n m = dpack.CreatePackageManifest(tempdir, [pathlib.Path(\"a\")])\n assert len(m.file) == 1\n assert m.file[0].comment == \"\"\n assert not m.file[0].size_in_bytes\n assert m.file[0].checksum_hash == dpack_pb2.SHA256\n assert m.file[0].checksum == SHA256_EMPTY_FILE", "title": "" }, { "docid": "ed1efd6ba0fa7925a19d5452c9ede0ea", "score": "0.47510195", "text": "def supports_manifest(manifest):\n pass", "title": "" }, { "docid": "f2bca57cfcc9985abe0749bffe5e91e3", "score": "0.47421065", "text": "def add_runtime(tm_env, manifest):\n _transform_services(manifest)\n\n app_manifest.add_linux_system_services(tm_env, manifest)\n app_manifest.add_linux_services(manifest)", "title": "" }, { "docid": "1e23484f53d0f76ba1b209efa2a9bfe3", "score": "0.4688811", "text": "def load_yaml(self):\n env = self.state.document.settings.env\n relpath, abspath = env.relfn2path(directives.path(self.arguments[0]))\n\n env.note_dependency(relpath)\n\n encoding = self.options.get('encoding', env.config.source_encoding)\n with io.open(abspath, 'rt', encoding=encoding) as stream:\n spec = yaml.load(stream, _YamlOrderedLoader) # nosec\n self.spec = spec\n self.paths = spec[self.path_path]\n self.definitions = spec[self.models_path]\n self.openapi_version = spec.get('swagger', None) or spec['openapi']\n self.options.setdefault('uri', 'file://%s' % abspath)", "title": "" }, { "docid": "4dca52d23b03dd673dd1c153329be492", "score": "0.46530834", "text": "def test_valid_and_empty_manifest(self):\n collector = PypiCollector()\n collector.parse_and_collect(MANIFEST_START + DEP_1, True)\n collector.parse_and_collect(None, True)\n packages = dict(collector.counter.most_common())\n assert packages == {\n 'daiquiri': 1\n }", "title": "" }, { "docid": "1666b2b76538b5c07055126fdea7a473", "score": "0.46519795", "text": "def parse_manifest(self):\n import json\n import struct\n\n num = self.selection\n try:\n manifest = json.loads(self.cryptomattes[num].get(\"manifest\", \"{}\"))\n except:\n manifest = {}\n from_names = {}\n from_ids = {}\n\n unpacker = struct.Struct('=f')\n packer = struct.Struct(\"=I\")\n for name, value in manifest.iteritems():\n packed = packer.pack(int(value,16))\n packed = packed = '\\0' * (4 - len(packed)) + packed\n id_float = unpacker.unpack( packed )[0]\n name_str = str(name)\n from_names[name_str] = id_float\n from_ids[id_float] = name_str\n\n self.cryptomattes[num][\"names_to_IDs\"] = from_names\n self.cryptomattes[num][\"ids_to_names\"] = from_ids\n\n global g_cryptomatte_manf_from_names\n global g_cryptomatte_manf_from_IDs\n g_cryptomatte_manf_from_names = from_names\n g_cryptomatte_manf_from_IDs = from_ids\n\n return from_names", "title": "" }, { "docid": "24e6739b94123b601a11685c9bda0545", "score": "0.46446547", "text": "def getAssemblies(pth):\n if pth.lower().endswith(\".manifest\"):\n return []\n # check for manifest file\n manifestnm = pth + \".manifest\"\n if os.path.isfile(manifestnm):\n with open(manifestnm, \"rb\") as fd:\n res = {RT_MANIFEST: {1: {0: fd.read()}}}\n else:\n # check the binary for embedded manifest\n try:\n res = GetManifestResources(pth)\n except winresource.pywintypes.error as exc:\n if exc.args[0] == winresource.ERROR_BAD_EXE_FORMAT:\n logger.info('Cannot get manifest resource from non-PE '\n 'file %s', pth)\n return []\n raise\n rv = []\n if RT_MANIFEST in res and len(res[RT_MANIFEST]):\n for name in res[RT_MANIFEST]:\n for language in res[RT_MANIFEST][name]:\n # check the manifest for dependent assemblies\n try:\n manifest = Manifest()\n manifest.filename = \":\".join([pth, str(RT_MANIFEST),\n str(name), str(language)])\n manifest.parse_string(res[RT_MANIFEST][name][language],\n False)\n except Exception as exc:\n logger.error(\"Can not parse manifest resource %s, %s\"\n \" from %s\", name, language, pth, exc_info=1)\n else:\n if manifest.dependentAssemblies:\n logger.debug(\"Dependent assemblies of %s:\", pth)\n logger.debug(\", \".join([assembly.getid()\n for assembly in\n manifest.dependentAssemblies]))\n rv.extend(manifest.dependentAssemblies)\n return rv", "title": "" }, { "docid": "85b8cdd00a5eaecf3f96345d4373c615", "score": "0.46384037", "text": "def initFromFile(self):\n\n bootFilename = os.path.join(os.environ['CRAB3_BOOTSTRAP_DIR'], BOOTSTRAP_ENVFILE)\n if not os.path.isfile(bootFilename):\n msg = \"The CRAB3_BOOTSTRAP_DIR environment variable is set, but I could not find %s\" % bootFilename\n raise EnvironmentException(msg)\n else:\n with open(bootFilename) as fd:\n self.update(json.load(fd))", "title": "" }, { "docid": "742677b54bdac63a9c37d33e91add012", "score": "0.46214262", "text": "def load_from_module_name(\n cls, module_name: str\n ) -> \"ThreatExchangeExtensionManifest\":\n try:\n module = importlib.import_module(module_name)\n except (ImportError, ValueError):\n raise ValueError(f\"No such module '{module_name}'\")\n\n try:\n manifest = module.TX_MANIFEST\n except AttributeError:\n raise ValueError(f\"Module is missing TX_MANIFEST\")\n\n if not isinstance(manifest, cls):\n raise ValueError(f\"TX_MANIFEST is not a {cls.__name__}!\")\n return manifest", "title": "" }, { "docid": "7b95b1c76b42009ef2744f464bef6b1b", "score": "0.46139222", "text": "def test_load_manifest_inheritance(self):\n temp_directory = tempfile.mkdtemp()\n parent_file_path = os.path.join(temp_directory, \"parent.cfg\")\n child_file_path = os.path.join(temp_directory, \"child.cfg\")\n with open(parent_file_path, \"w\") as fh:\n fh.write(parent_manifest)\n\n with open(child_file_path, \"w\") as fh:\n fh.write(child_manifest.format(parent_file_path))\n\n manifest = load_manifest(child_file_path)\n\n assert (\n manifest.get(\"config\", \"namespace\") == \"inheritance\"\n ), \"Value not present in child should be pulled from parent!\"\n\n assert (\n manifest.get(\"parent_section\", \"parent\") == \"not me\"\n ), \"child value should override parent value!\"", "title": "" }, { "docid": "e604535e64fb03b9bddd475f8ce90622", "score": "0.46073496", "text": "def _load(self):\n p = os.path.join(paths.setup_dir, 'system_health.yaml')\n if os.path.isfile(p):\n with open(p, 'r') as rfile:\n config = yaml.load(rfile)\n if config:\n self._values = config['values']\n self._conditionals = config['conditionals']\n\n general = config['general']\n self._limit = general['limit']", "title": "" }, { "docid": "2efb06224e8d79a25e522cd0d1afac2c", "score": "0.46070045", "text": "def get_manifests(arcroot):\n manifests = []\n for root, dirs, files in os.walk(arcroot):\n if 'manifest.json' in files:\n manifests.append(os.path.join(root, 'manifest.json'))\n \n return manifests", "title": "" }, { "docid": "70c274157e08161aa8e10e323cba88c8", "score": "0.45951656", "text": "def test_MergeManifests_file_attributes():\n d1 = dpack_pb2.DataPackage()\n f1 = d1.file.add()\n f1.relative_path = \"a\"\n f1.size_in_bytes = 1\n f1.checksum_hash = dpack_pb2.SHA1\n f1.checksum = \"abc\"\n d2 = dpack_pb2.DataPackage()\n f2 = d2.file.add()\n f2.relative_path = \"a\"\n f2.size_in_bytes = 2\n f2.checksum_hash = dpack_pb2.MD5\n f2.checksum = \"def\"\n dpack.MergeManifests(d1, d2)\n assert d1.file[0].size_in_bytes == 1\n assert d1.file[0].checksum_hash == dpack_pb2.SHA1\n assert d1.file[0].checksum == \"abc\"", "title": "" }, { "docid": "3fa5d962e6a31bb79ca12b590566f748", "score": "0.4583587", "text": "def parse_manifest(manifest_contents):\n manifest = {}\n for line in manifest_contents.split('\\n'):\n line_unpacked = line.split()\n try:\n # Check that the line isn't empty or a comment\n if not line_unpacked or line.strip().startswith('#'):\n continue\n\n target, repo_hash, url, sha256_hash = line_unpacked\n manifest[target] = {\"repo_hash\": repo_hash,\n \"url\": url,\n \"sha256_hash\": sha256_hash,\n }\n except ValueError:\n log(\"WARN\", \"Warning: Invalid line in manifest file:\\n\"\n \" {}\".format(line))\n continue\n return manifest", "title": "" }, { "docid": "10c9dab093ad302cb3acc204147d8723", "score": "0.4579398", "text": "def update_manifest(self, dst):\n # Read the current manifest into memory\n mpath = os.path.join(os.path.dirname(dst), \"manifest.json\")\n try:\n with open(mpath, 'r') as f:\n manifest = json.load(f)\n except IOError:\n manifest = {}\n\n name, _ = os.path.splitext(os.path.basename(dst))\n # Update the manifest record\n manifest[name] = {\n \"url\": os.path.basename(dst),\n \"signature\": sha256sum(dst),\n }\n\n # Write the manifest back to disk\n with open(mpath, 'w') as f:\n json.dump(manifest, f, indent=2)", "title": "" }, { "docid": "22d1b2fceaeb43d570fdab95aaa9530b", "score": "0.4572856", "text": "async def resolve_manifest(\n self, resolve_manifest_request_body: ResolveManifestRequestBody = Body(None, description=\"\")\n ) -> ResolveManifest:\n try:\n return ResolveManifest(\n manifest=ManifestDeclarativeSource(\n resolve_manifest_request_body.manifest, construct_using_pydantic_models=True\n ).resolved_manifest\n )\n except Exception as error:\n self.logger.error(f\"Could not resolve manifest with error: {error.args[0]} - {self._get_stacktrace_as_string(error)}\")\n raise HTTPException(\n status_code=400,\n detail=f\"Could not resolve manifest with error: {error.args[0]}\",\n )", "title": "" }, { "docid": "2acfb925f2be2f192c547a86e3bbfbf4", "score": "0.45651814", "text": "def build_manifest(self, build_manifest):\n\n self._build_manifest = build_manifest", "title": "" }, { "docid": "df1b33c46b8108c0b33f40bb8ef8e9f2", "score": "0.4555126", "text": "def _load(self, directory):\n pass", "title": "" }, { "docid": "2c6cfbc1ed390cefcd3c6552a3713be2", "score": "0.45462036", "text": "def update_manifest(explicit=False):\n if not os.path.exists(MANIFEST_FILENAME):\n return\n\n manifest_file = open(MANIFEST_FILENAME, 'r')\n parts = manifest_file.read().partition('\\n' + AUTOGEN_LINE)\n manifest_file.close()\n if parts[1] == '':\n if explicit:\n print \"%s has no AUTOGENERATE section\" % MANIFEST_FILENAME\n return\n\n commands = [line for line in parts[2].split('\\n') if line.startswith('#!')]\n excludes = []\n for command in commands:\n match = re.match(r'#!\\s*EXCLUDE:\\s*(.*)\\s*$', command)\n if options.verbose:\n print \"Excluding paths beginning with '%s'\" % match.group(1)\n if match:\n excludes.extend(re.split(r\",\\s*\", match.group(1)))\n\n cached_files = []\n hash_lines = []\n\n paths = options.local_listing.keys()\n paths.sort()\n size = 0\n for path in paths:\n info = options.local_listing[path]\n if path == MANIFEST_FILENAME or path == META_FILENAME or \\\n info['size'] > MAX_FILE_SIZE or \\\n is_data_path(path) or \\\n prefix_match(excludes, path):\n continue\n cached_files.append(path)\n hash_lines.append(\"%s=%s\" % (path, info['sha1']))\n size += info['size']\n\n manifest_lines = [parts[0], AUTOGEN_LINE, AUTOGEN_EXPLAIN]\n manifest_lines.extend(commands)\n manifest_lines.extend((\n \"# TOTAL FILES: %s (%s bytes)\" % (intcomma(len(cached_files)), intcomma(size)),\n \"# SIGNATURE: %s\" % hashlib.sha1('\\n'.join(hash_lines)).hexdigest(),\n \"CACHE:\",\n ))\n manifest_lines.extend(cached_files)\n\n manifest_file = open(MANIFEST_FILENAME, 'w')\n manifest_file.write('\\n'.join(manifest_lines) + '\\n')\n manifest_file.close()\n\n # Make sure the listing for the manifest file is up to date\n # so it will be uploaded if changed.\n update_local_listing(MANIFEST_FILENAME)", "title": "" }, { "docid": "7348c687a4460382d2328a8765e7d7cd", "score": "0.45457402", "text": "def read_file_manifest(in_stream):\n count = struct.unpack(COUNT_FMT, checked_read(in_stream, COUNT_LEN))[0]\n name_map = {}\n for dummy in range(0, count):\n length, file_sha, history_sha = \\\n struct.unpack(MANIFEST_ENTRY_HDR_FMT,\n checked_read(in_stream,\n MANIFEST_ENTRY_HDR_LEN))\n\n length -= MANIFEST_ENTRY_HDR_LEN\n name = checked_read(in_stream, length)\n\n assert not name in name_map\n name_map[name] = (file_sha, history_sha)\n return name_map", "title": "" }, { "docid": "fa27f7a4fe2ea88ba4b8f45fad5c15cf", "score": "0.4543508", "text": "def load_recipe(root: Path) -> None:\n module_name = 'dotops.recipes.' + root.name\n fpath = find_recipe(root)\n\n try:\n _ast = import_file_to_ast(fpath, module_name)\n mod = imp.new_module(module_name)\n\n mod.__file__ = fpath\n eval(ast_compile(_ast, fpath, \"exec\"), _globals(root, mod))\n except (HyTypeError, LexException) as e:\n if e.source is None:\n with open(fpath, 'rt') as fp:\n e.source = fp.read()\n e.filename = fpath\n raise\n except Exception:\n sys.modules.pop(module_name, None)\n raise", "title": "" }, { "docid": "81a19b00fd269cb677b96fbe59973f31", "score": "0.4528928", "text": "def _load(self, funcdesc):\n # Get the module and function names\n funcdesc_elts = funcdesc.split(\".\")\n module_name = \".\".join(funcdesc_elts[:-1])\n func_name = funcdesc_elts[-1]\n\n # Get the absolute path the the xml file description\n # COMPATIBILITY: module not defined in python 2.6\n python_version = sys.version_info\n if python_version[:2] <= (2, 6):\n __import__(module_name)\n else:\n importlib.import_module(module_name)\n module = sys.modules[module_name]\n\n return getattr(module, func_name)", "title": "" }, { "docid": "c6ec686d7331119f3062dc5d783e2ea3", "score": "0.45259383", "text": "def load(app, verbose, replay, exp_config=None):\n if replay:\n exp_config = exp_config or {}\n exp_config[\"replay\"] = True\n log(header, chevrons=False)\n loader = LoaderDeployment(app, Output(), verbose, exp_config)\n loader.run()", "title": "" }, { "docid": "78d87336f2396342357242477fcc5652", "score": "0.45210376", "text": "def generateManifest(syn, allFiles, filename):\n keys, data = _extract_file_entity_metadata(syn, allFiles)\n _write_manifest_data(filename, keys, data)", "title": "" }, { "docid": "3bbf55af9fb240f03d336162e60eb3ab", "score": "0.45149973", "text": "def load():\n # get (or create) config path\n p = initialize()\n return load_config(open(p['config']))", "title": "" }, { "docid": "0e10fc44bf01aabbf4cff169e4240589", "score": "0.45127457", "text": "def load(self):\n self.suite.load()\n self.resource_map = {}\n dirlist = os.listdir(self.resources)\n for resource_name in (name for name in dirlist\n if os.path.isfile(os.path.join(self.resources,name)) and\n os.path.splitext(name)[1].lower() == '.fbr'):\n try:\n f = open(os.path.join(self.resources,resource_name),'rU')\n expr = f.read()\n d = eval(expr)\n resource_id = os.path.splitext(resource_name)[0].lower()\n d['id'] = resource_id\n kind = d['kind']\n del d['kind']\n self.resource_map[resource_id] = Resource.create(kind,**d)\n finally:\n f.close()", "title": "" }, { "docid": "a913d923b861db64f34f4f861537a01e", "score": "0.45092314", "text": "def update_manifest(builder):\r\n\r\n manifest_path = join(builder.Config.SourceRootPath, builder.Config.WMAppManifest)\r\n dom = parse(manifest_path)\r\n\r\n #import pdb;pdb.set_trace()\r\n #version = make_version_string(builder)\r\n version = builder.AppVersion\r\n\r\n update_manifest_with_values(dom,\r\n Title = builder.CustomCfg.Title,\r\n #ProductID = builder.CustomCfg.ProductID,\r\n #PublisherID = builder.Config.PublisherID,\r\n Version = version,\r\n Languages = getattr(builder.CustomCfg, \"Languages\", None ) )\r\n\r\n with open(manifest_path, 'wb') as f:\r\n data = dom.toprettyxml(indent = \" \")\r\n # toprettyxml adds extra new lines\r\n lines = [ x for x in data.split(\"\\n\") if len(x.strip()) > 0]\r\n data = \"\\n\".join(lines)\r\n f.write(data)\r\n\r\n return True", "title": "" }, { "docid": "db592431959e59a04623ec15dd0fb0cc", "score": "0.45089307", "text": "def load():\n out = load_as_root_module()\n parser = create_parser(os.path.basename(sys.argv[0]))\n opts = parser.parse_args(sys.argv[1:])\n load_env(opts, out.opt)\n\n return out", "title": "" }, { "docid": "f6fc0df767ed0b990c78dd9a3d94f731", "score": "0.45082822", "text": "def load_descriptor(descriptor):\n\n try:\n data = yaml.safe_load(descriptor)\n except Exception as ex:\n raise CekitError('Cannot load descriptor', ex)\n\n if isinstance(data, basestring):\n LOGGER.debug(\"Reading descriptor from '{}' file...\".format(descriptor))\n\n if os.path.exists(descriptor):\n with open(descriptor, 'r') as fh:\n return yaml.safe_load(fh)\n\n raise CekitError(\n \"Descriptor could not be found on the '{}' path, please check your arguments!\".format(descriptor))\n\n LOGGER.debug(\"Reading descriptor directly...\")\n\n return data", "title": "" }, { "docid": "1b20e6798345aea70ba11175f9875564", "score": "0.45015812", "text": "def parse_manifest_xml(manifest_path):\n dir_project_dict = {}\n parsed_xml = xml.dom.minidom.parse(manifest_path)\n projects = parsed_xml.getElementsByTagName('project')\n for project in projects:\n name = project.getAttribute('name')\n path = project.getAttribute('path')\n if path:\n dir_project_dict[path] = name\n else:\n dir_project_dict[name] = name\n return dir_project_dict", "title": "" }, { "docid": "f0c83fd40ec198a1cd63f07acc61258d", "score": "0.4491093", "text": "def validate_manifest(\n request: ValidateManifestRequest = Body(...),\n schema: Any = Depends(get_description_schema),\n) -> ValidateManifestResponse:\n\n _, response = _validate_manifest(request, schema)\n return response", "title": "" }, { "docid": "a6216f842ecc246653b25523d3e82310", "score": "0.44865447", "text": "def load():\n global tinyConfig\n if not tinyConfig:\n tinyConfig = CmdArgs()\n return tinyConfig", "title": "" }, { "docid": "1abb28a2e46d4f7b412c56d5c01d386e", "score": "0.4468919", "text": "def check_app_manifest(api_docs_path, overrides, marketplace):\n if not os.path.exists(api_docs_path):\n raise exceptions.ValidationError(\n click.style(\"Could not find the manifest file at {}.\", fg=\"red\").format(api_docs_path))\n\n if os.path.isdir(api_docs_path):\n raise exceptions.ValidationError(\n click.style(\"{} is a directory. Please enter the direct path to the manifest file.\",\n fg=\"red\").format(api_docs_path))\n\n file_size = os.path.getsize(api_docs_path) / 1e6\n if file_size > 2:\n raise exceptions.ValidationError(\n click.style(\"The size of the manifest file at {} exceeds the maximum limit of 2MB.\", fg=\"red\")\n .format(api_docs_path))\n\n try:\n with open(api_docs_path, \"r\") as f:\n original_manifest_dict = yaml.load(f.read())\n\n manifest_dict = transform_manifest(original_manifest_dict, overrides, marketplace)\n\n # write back the manifest in case some clean up or overriding has happend\n with open(api_docs_path, \"w\") as f:\n yaml.dump(manifest_dict, f)\n\n return manifest_dict\n except (YAMLError, ValueError):\n raise exceptions.ValidationError(\n click.style(\"Your manifest file at {} is not valid YAML.\", fg=\"red\")\n .format(api_docs_path))", "title": "" }, { "docid": "9dcb1e24d1e730d74793d0c10f533def", "score": "0.4461114", "text": "def load(path):\n pass", "title": "" }, { "docid": "1b1b1add2dfd5fe7880d517df9599e9c", "score": "0.44596192", "text": "def load(config, contents, dynamic_templates):\n # recipe should be foo.bar.baz, not .foo.bar.baz or ..foo.bar.baz or foo/bar/baz\n path = config[\"lambdas_path\"] + os.sep\n modules = [imp.load_source(module_name, path + module_name + \".py\") for module_name in get_module_names(config)]\n for module in modules:\n module.config = config\n module.contents = contents\n module.dynamic_templates = dynamic_templates\n return {name: getattr(mod, name) for mod in modules for name in dir(mod)\n if not name.startswith(\"__\") and type(getattr(mod, name)) == types.FunctionType}", "title": "" }, { "docid": "db59caa69c354306e4d49cb173a6dd5d", "score": "0.4454742", "text": "def get_manifest(\n ctx,\n title,\n data_type,\n jsonld,\n dataset_id,\n sheet_url,\n output_csv,\n use_annotations,\n oauth,\n json_schema,\n):\n # optional parameters that need to be passed to ManifestGenerator()\n # can be read from config.yml as well\n data_type = fill_in_from_config(\"data_type\", data_type, (\"manifest\", \"data_type\"))\n jsonld = fill_in_from_config(\"jsonld\", jsonld, (\"model\", \"input\", \"location\"))\n title = fill_in_from_config(\"title\", title, (\"manifest\", \"title\"), allow_none=True)\n json_schema = fill_in_from_config(\n \"json_schema\",\n json_schema,\n (\"model\", \"input\", \"validation_schema\"),\n allow_none=True,\n )\n\n # create object of type ManifestGenerator\n manifest_generator = ManifestGenerator(\n path_to_json_ld=jsonld,\n title=title,\n root=data_type,\n oauth=oauth,\n use_annotations=use_annotations,\n )\n\n # call get_manifest() on manifest_generator\n result = manifest_generator.get_manifest(\n dataset_id=dataset_id,\n sheet_url=sheet_url,\n json_schema=json_schema,\n )\n\n if sheet_url:\n logger.info(\"Find the manifest template using this Google Sheet URL:\")\n click.echo(result)\n\n elif isinstance(result, pd.DataFrame):\n if output_csv is None:\n prefix, _ = os.path.splitext(jsonld)\n prefix_root, prefix_ext = os.path.splitext(prefix)\n if prefix_ext == \".model\":\n prefix = prefix_root\n output_csv = f\"{prefix}.{data_type}.manifest.csv\"\n\n logger.info(\n f\"Find the manifest template using this CSV file path: {output_csv}\"\n )\n\n result.to_csv(output_csv, index=False)\n\n return result", "title": "" } ]
2c4f9f20da031ff4db9ea3dc8043a8b7
Perform forward pass of encoder. Returns mean with shape [batch_size, 784].
[ { "docid": "528fe2d56683e6b95bdf138bf0067021", "score": "0.57269955", "text": "def forward(self, input):\n hidden = self.z_hidden_vector(input)\n self.transformed_input = self.relu(hidden) \n self.transformed_input = self.hidden_linear_mean(self.transformed_input)\n\n mean = torch.sigmoid(self.transformed_input)\n return mean", "title": "" } ]
[ { "docid": "a65da797b7c1b5d048f0d5f35949b5c1", "score": "0.67233974", "text": "def forward(self, input):\n mean = self.decoder(input)\n\n return mean", "title": "" }, { "docid": "b5e10e0285e3058556788eb624c3f824", "score": "0.6601578", "text": "def forward(self, inputs):\n return torch.mean(inputs, dim=self.dim)", "title": "" }, { "docid": "489c5236c2b0b1c52fbb38e6a310bdb6", "score": "0.65683913", "text": "def forward(self, input_feat, wav):\n # normalize input features\n input_feat = self.normalize_input(input_feat)\n # compute cross-entropy using wavenet\n return self.m_wavenet.forward(input_feat, wav)", "title": "" }, { "docid": "4226731b65df08486bea31c7e9dba1f4", "score": "0.6552867", "text": "def forward(self, batch: dict) -> dict:\n sequence = pad_sequence(batch['y'], batch_first=True)\n sequence_lengths = batch['num_samples']\n if not torch.is_tensor(sequence_lengths):\n sequence_lengths = torch.tensor(sequence_lengths)\n\n # Call encoder\n encoded_raw, encoded_sequence_lengths = self.encoder(\n sequence, sequence_lengths)\n\n # Apply layer norm to the encoded signal\n encoded = rearrange(encoded_raw, 'b n l -> b l n')\n encoded = apply_examplewise(\n self.encoded_input_norm, encoded, encoded_sequence_lengths)\n\n # Apply convolutional layer if set\n if self.input_proj:\n encoded = rearrange(encoded, 'b l n -> b n l')\n encoded = self.input_proj(encoded)\n encoded = rearrange(encoded, 'b n l -> b l n')\n\n # Call DPRNN. Needs shape BxLxN\n processed = self.dprnn(encoded, encoded_sequence_lengths)\n processed = rearrange(processed, 'b l n -> b n l')\n\n processed = self.output_proj(self.output_prelu(processed))\n\n # Split a part of the output for an additional output\n if self.additional_out_size > 0:\n processed, additional_out = (\n processed[..., self.additional_out_size:, :],\n processed[..., :self.additional_out_size, :]\n )\n\n # Shape KxBxNxL\n processed = torch.stack(\n torch.chunk(processed, self.num_speakers, dim=1))\n processed = self.output_nonlinearity(processed)\n\n # The estimation can be a little longer than the input signal.\n # Shorten the estimation to match the input signal\n processed = processed[..., :encoded_raw.shape[-1]]\n assert encoded_raw.shape == processed.shape[1:], (\n processed.shape, encoded_raw.shape)\n\n if self.mask:\n # Mask if set\n processed = encoded_raw.unsqueeze(0) * processed\n\n # Decode stream for each speaker\n decoded = rearrange(\n self.decoder(rearrange(processed, 'k b n l -> (k b) n l')),\n '(k b) t -> k b t',\n k=processed.shape[0], b=processed.shape[1])\n\n # The length can be slightly longer than the input length if it is not\n # a multiple of a segment length.\n decoded = decoded[..., :sequence.shape[-1]]\n\n # This is necessary if an offset-invariant loss fn (e.g.,\n # SI-SNR from the TasNet paper) but an offset-variant evaluation metric\n # (e.g., SI-SDR) is used.\n # TODO: Fix the loss fn and remove this\n decoded = decoded - torch.mean(decoded, dim=-1, keepdim=True)\n\n out = {\n 'out': rearrange(decoded, 'k b t -> b k t'),\n 'encoded': rearrange(encoded_raw, 'b n l -> b l n'),\n 'encoded_out': rearrange(processed, 'k b n l -> b k l n'),\n 'encoded_sequence_lengths': encoded_sequence_lengths,\n }\n\n if self.additional_out_size > 0:\n additional_out = additional_out[..., :processed.shape[-1]]\n out['additional_out'] = additional_out\n\n return out", "title": "" }, { "docid": "7b6d24404f9bc48cff04e105375fee3f", "score": "0.6550777", "text": "def forward(self,X):\n batch_size = X.shape[0]\n \n z = self.encode(X)\n z_flat = z.view(batch_size,-1)\n out = self.fc_out(z_flat) # no activation for the last layer\n \n return out", "title": "" }, { "docid": "5feb7b5bfb9c0fd2138825c773878e76", "score": "0.6544755", "text": "def pretrain_forward(self, inp):\n return self.pre_fc(self.encoder(inp))", "title": "" }, { "docid": "be2b7000e8f2612b2341f540d7cb74ea", "score": "0.6498183", "text": "def forward(self, batch: torch.Tensor) -> torch.Tensor:\n return self.assignment(self.encoder(batch))", "title": "" }, { "docid": "be2b7000e8f2612b2341f540d7cb74ea", "score": "0.6498183", "text": "def forward(self, batch: torch.Tensor) -> torch.Tensor:\n return self.assignment(self.encoder(batch))", "title": "" }, { "docid": "1112a73ca0b4296e927f8d3b55c35cea", "score": "0.6480676", "text": "def forward_pass(self):\n \n # picks a random image \n #print(self.train_images.shape[0]) # this is 60,0000\n #print(self.batch_size) # this is 1\n # Generates a random array of len 1 (batch size) with the values being from 0 to 60k\n indices = np.random.choice(self.train_images.shape[0], self.batch_size)\n #print(indices)\n self.x = self.train_images[indices].astype(np.float32, copy=False)\n self.y = np.array(self.train_labels[indices])\n\n # Forward Propagation\n # print(self.x.shape) #1 x 784 \n # print(self.W.shape) #784 x 10 \n # print(self.b.shape) # 1 x 10 \n self.logits = np.dot(self.x, self.W) + self.b\n # print(self.logits.shape) # 1 x 10\n su = np.sum(np.exp(self.logits), axis=1).reshape((1, -1))\n self.softmax = np.exp(self.logits) / np.sum(np.exp(self.logits), axis=1).reshape((-1, 1))\n #print(self.softmax.shape) # 1 x 10\n\n return", "title": "" }, { "docid": "3c871ef9fbd17ca83fdac1668022ff3a", "score": "0.6218319", "text": "def forward(self, x, alpha=1e-8):\r\n batch_size, _, height, width = x.shape\r\n\r\n # [B x C x H x W] Subtract mean over batch.\r\n y = x - x.mean(dim=0, keepdim=True)\r\n\r\n # [1 x C x H x W] Calc standard deviation over batch\r\n y = th.sqrt(y.pow(2.).mean(dim=0, keepdim=False) + alpha)\r\n\r\n # [1] Take average over feature_maps and pixels.\r\n y = y.mean().view(1, 1, 1, 1)\r\n\r\n # [B x 1 x H x W] Replicate over group and pixels.\r\n y = y.repeat(batch_size, 1, height, width)\r\n\r\n # [B x C x H x W] Append as new feature_map.\r\n y = th.cat([x, y], 1)\r\n\r\n # return the computed values:\r\n return y", "title": "" }, { "docid": "01c71d00012035433673ac47feb6e397", "score": "0.61901253", "text": "def forward(self, inputs):\r\n inputs = inputs.reshape(inputs.size(0),1,inputs.size(1))\r\n x = self.tcn(inputs) # input should have dimension (N, C, L)\r\n x = F.avg_pool1d(x,kernel_size=self.step,stride=self.step)\r\n# x = self.conv1(x)\r\n x = self.dropout(x)\r\n x = self.conv1(x)\r\n x= x.reshape(x.size(0),-1)\r\n return x", "title": "" }, { "docid": "97df321beca738814c41d8fbde74dff7", "score": "0.615823", "text": "def forward(self, embedding):\n\n\t\t# Encoder\n\t\tembedding = self.relu(self.fc1(embedding))\n\t\tembedding = self.sigmoid(self.fc2(embedding))\n\n\t\treturn embedding", "title": "" }, { "docid": "7d2774ea9853e68259a61413fbb9d20e", "score": "0.6126581", "text": "def forward(self, frames) -> torch.Tensor:\n if len(frames.shape) == 4:\n frames = frames.unsqueeze(0)\n\n batch_sz = frames.shape[0]\n out = torch.mean(torch.abs(frames[:, 1:].reshape(batch_sz, -1) - frames[:, :-1].reshape(batch_sz, -1)), dim=1)\n\n return out", "title": "" }, { "docid": "d71b708b77d20c3916ad0ba0f786b911", "score": "0.6118297", "text": "def forward(self, input):\n\n \n #############################################################################\n # TODO: Implement the forward pass of the encoder. #\n # Apply the dropout to the embedding layer before you apply the #\n # recurrent layer #\n # Apply tanh activation to the hidden tensor before returning it #\n #############################################################################\n # input is (batch size, seq_len)\n # output is (batch size, seq len, embedding dim)\n embedding = self.embedding(input)\n # dropout certain signals. still output is (batch_size, seq len , embedding dim)\n embedding = self.dropout(embedding)\n # batch size dim is first.\n if self.model_type == \"RNN\":\n output,hidden= self.rnn(embedding)\n else:\n output, (hidden, cell) = self.rnn(embedding)\n # pass in hidden thru linear layers.\n hidden = self.linear1(hidden)\n hidden = self.activation(hidden)\n hidden = self.linear2(hidden)\n # apply final tanh activation\n hidden = self.activation_final(hidden)\n\n # hidden state will be used for decoder's first step. return the tuple for LSTM case.\n if self.model_type == \"LSTM\":\n hidden = (hidden, cell)\n #############################################################################\n # END OF YOUR CODE #\n #############################################################################\n return output, hidden", "title": "" }, { "docid": "5fa3a23d0d3c3877b1e706ef9ae0a497", "score": "0.61058897", "text": "def forward(self, prev_y_batch, prev_h_batch, encoder_outputs_batch):\n # Calculate attention from current RNN state and all encoder outputs;\n attn_weights = self.attn_module(prev_h_batch, encoder_outputs_batch) # B x SL\n\n # Apply attention weights to encoder outputs to get weighted average\n context = torch.bmm(attn_weights.unsqueeze(1), encoder_outputs_batch.transpose(0, 1)) # B x 1 x (num_directions * encoder_hidden_dim)\n\n # Combine embedded input word and attended context, run through RNN\n # 1 x B x dec_output_dim, 1 x B x dec_output_dim\n dec_rnn_output, dec_hidden = self.combine_context_run_rnn_step(prev_y_batch,\n prev_h_batch,\n context,\n )\n\n # Return final output, hidden state, and attention weights (for visualization)\n return dec_rnn_output, dec_hidden, attn_weights", "title": "" }, { "docid": "dddff7b6cf4765e6b2676a7c112cf94e", "score": "0.60984564", "text": "def forward(self, img):\n x = self.conv1(img)\n x = self.batchNorm1(x)\n x = F.relu(x)\n\n x = self.conv2(x)\n x = self.batchNorm2(x)\n x = F.relu(x)\n\n x = self.conv3(x)\n x = self.batchNorm3(x)\n x = F.relu(x)\n\n x = self.conv4(x)\n x = self.batchNorm4(x)\n x = F.relu(x)\n\n return x", "title": "" }, { "docid": "a3f0ae393da23ed37fc4905959a026d7", "score": "0.60959506", "text": "def forward(self, x: torch.Tensor):\n\n encoder_output = self.encoder(x)\n mean = self.fc_mean(encoder_output)\n log_variance = self.fc_variance(encoder_output)\n\n return mean, log_variance", "title": "" }, { "docid": "493bdc3f74c22c16bd08c4369de5026b", "score": "0.6085028", "text": "def forward(self, x):\n\n # flattening the input matrix\n batch = x.shape[0]\n shape = x.shape[1:]\n x = x.view([batch, -1])\n\n # forward through encoder\n for layer in self.encoder:\n x = layer(x)\n code = x\n\n # normalization if necessary\n if(self.norm==True):\n code = torch.sigmoid(code)\n x = code.clone()\n\n # forward through decoder\n for layer in self.decoder:\n x = layer(x)\n\n # reshaping the output\n y = x.view([batch, *shape])\n\n return y", "title": "" }, { "docid": "4b3bb5add5f082c947039e2ed1bd769d", "score": "0.6065772", "text": "def forward(self,*inputs):\n return self.loss(*self.embedding(*inputs))", "title": "" }, { "docid": "2fb185350dff426e2f9dbdd32c23e5f8", "score": "0.60473067", "text": "def forward(self, batch_data):\n encoder_outputs = self.manager.get_encoder_output(batch_data, self.get_device())\n loss, logprobs = self.compute_detection_loss(encoder_outputs, batch_data)\n return loss, logprobs", "title": "" }, { "docid": "198b64e2038ffbd05aac719e90a6f5f9", "score": "0.6046837", "text": "def forward(self, x, alpha=1e-8):\n batch_size, _, height, width = x.shape\n # [B x C x H x W] Subtract mean over batch.\n y = x - x.mean(dim=0, keepdim=True)\n # [1 x C x H x W] Calc standard deviation over batch\n y = torch.sqrt(y.pow(2.).mean(dim=0, keepdim=False) + alpha)\n\n # [1] Take average over feature_maps and pixels.\n y = y.mean().view(1, 1, 1, 1)\n\n # [B x 1 x H x W] Replicate over group and pixels.\n y = y.repeat(batch_size,1, height, width)\n\n # [B x C x H x W] Append as new feature_map.\n y = torch.cat([x, y], 1)\n # return the computed values:\n return y", "title": "" }, { "docid": "3d962ca0eaabec06179e7c62ae27aebc", "score": "0.60288894", "text": "def forward(self, X):\n N = X.size()[0]\n if self._is_all:\n assert X.size() == (N, 3, 448, 448)\n X = self.features(X)\n assert X.size() == (N, 512, 28, 28)\n X = self.relu5_3(X)\n assert X.size() == (N, 512, 28, 28)\n X = torch.reshape(X, (N, 512, 28 * 28))\n X = torch.bmm(X, torch.transpose(X, 1, 2)) / (28 * 28)\n assert X.size() == (N, 512, 512)\n X = torch.reshape(X, (N, 512 * 512))\n X = torch.sqrt(X + 1e-05)\n X = torch.nn.functional.normalize(X)\n X = self.fc(X)\n return X", "title": "" }, { "docid": "2b695531a6ec38cd5e21cda2085c8013", "score": "0.60206145", "text": "def forward(self, batch):\n batch = batch[0]\n batch = batch_to_ids(batch)\n\n # TODO: truncate before converting to char ids!\n sent_len = batch.size()[1]\n # print(sent_len)\n if sent_len > self.maxSentLen:\n batch = batch[:, :self.maxSentLen, :]\n # print(batch.shape)\n\n # logger.debug(\"forward called with batch of size %s: %s\" % (batch.size(), batch,))\n if self.on_cuda():\n batch = batch.type(torch.cuda.LongTensor)\n batch.cuda()\n # print(len(batch))\n # print(len(batch[0]))\n elmo_embed = self.elmo(batch)['elmo_representations']\n\n # print(len(elmo_embed))\n # print(elmo_embed[0].shape)\n # elmo_embed = torch.Tensor(elmo_embed)\n # avg_embed = elmo_embed[0]\n # embed = self.elmo.embed_sentences(batch[0])\n # avg_embd = [torch.mean(x, axis=0) for x in elmo_embed]\n avg_embd = torch.cat(elmo_embed, 2)\n # print(avg_embd.shape)\n # avg_embed = torch.FloatTensor(avg_embd)\n # if self.on_cuda():\n # avg_embed.cuda()\n \n # if self.on_cuda():\n # embed.cuda()\n out = self.layers(avg_embd)\n\n # logger.debug(\"output tensor is if size %s: %s\" % (out.size(), out, ))\n return out", "title": "" }, { "docid": "57863834f9a4875088dce9fe36d94fd2", "score": "0.6010376", "text": "def forward(self, x, alpha=1e-8):\r\n batch_size, _, height, width = x.shape\r\n # [B x C x H x W] Subtract mean over batch.\r\n y = x - x.mean(dim=0, keepdim=True)\r\n # [1 x C x H x W] Calc standard deviation over batch\r\n y = torch.sqrt(y.pow(2.).mean(dim=0, keepdim=False) + alpha)\r\n\r\n # [1] Take average over feature_maps and pixels.\r\n y = y.mean().view(1, 1, 1, 1)\r\n\r\n # [B x 1 x H x W] Replicate over group and pixels.\r\n y = y.repeat(batch_size, 1, height, width)\r\n\r\n # [B x C x H x W] Append as new feature_map.\r\n y = torch.cat([x, y], 1)\r\n # return the computed values:\r\n return y", "title": "" }, { "docid": "0e93f5b3cbab12b6bf19be8aac09b9c6", "score": "0.59976274", "text": "def forward(self, x):\n batch_size = x.size()[0]\n\n # Encode image to hidden features\n features = self.img_to_features(x)\n hidden = self.features_to_hidden(features.view(batch_size, -1))\n\n mean = self.fc_mean(hidden)\n log_var = self.fc_log_var(hidden)\n latent_dist = Normal(mean, F.softplus(log_var))\n latent_sample = self.sample_normal(mean, log_var)\n return self.decode(latent_sample), (mean, log_var)", "title": "" }, { "docid": "69b843d23f93d539c5d7299a970abbf7", "score": "0.59945554", "text": "def forward(self, input):\n mean = None\n x = self.relu(self.fc3(input))\n x = self.fc4(x)\n mean = self.sigmoid(x)\n\n return mean", "title": "" }, { "docid": "1a68f11a296c46bf872006650c94d52b", "score": "0.5989687", "text": "def forward(self, x: Tensor) -> Tensor:\n x = self.features(x)\n x = self.avgpool(x) # 512×14×14\n x = x.view(x.shape[0], x.shape[1], -1) # 512×196\n x = x.permute(0, 2, 1) # 196×512\n return x", "title": "" }, { "docid": "3ca35346008012ff0bc16be81df7ffdd", "score": "0.59789175", "text": "def forward(self, x, eps=1e-8):\n bs, nc, h, w = x.shape\n x_flat = x.view(bs, nc, -1)\n mean = x_flat.mean(-1).unsqueeze(-1).unsqueeze(-1) # take mean across width and height, bs x nc\n std = x_flat.std(-1).unsqueeze(-1).unsqueeze(-1) # bs x nc\n return (x - mean) / (std + eps) # normalize across each channel separately", "title": "" }, { "docid": "c352f62c5f1592ab9cb61b3e44499c34", "score": "0.5976637", "text": "def forward(self, inputs):\n bs = inputs.size(0)\n # Convolution layers\n x = self.extract_features(inputs)\n\n # Pooling and final linear layer\n x = self._avg_pooling(x)\n x = x.view(bs, -1)\n x = self._dropout(x)\n x = self.fc(x)\n return x", "title": "" }, { "docid": "019ed7375603240dcc400e26b8267772", "score": "0.59725493", "text": "def forward(self, input):\n\n #Encoding step\n mean, std_log_var = self.encoder(input)\n epsilon = torch.randn_like(mean)\n z = mean + torch.exp(0.5 * std_log_var) * epsilon\n \n #Decoding step\n encoded_sample = self.decoder(z)\n\n #Reconstruction term\n l_recon = nn.functional.binary_cross_entropy(encoded_sample, input,reduction='sum')/input.shape[0]\n \n #Regularised term \n #This is different than my own derivation but is similar and its the same as the original paper, where there the use the '-' \n l_reg_KL = -0.5 * torch.sum(1 + std_log_var - mean.pow(2) - std_log_var.exp()) /input.shape[0]\n\n average_negative_elbo = (l_recon + l_reg_KL) \n\n return average_negative_elbo", "title": "" }, { "docid": "ee5132aca7f3d54ca574bfd64ae09380", "score": "0.59516835", "text": "def forward(self, imgs):\n\n # Encoder Stage - 1\n img1, img2, img3, img4, img5, img6 = imgs[:, 0, :, :, :], imgs[:, 1, :, :, :], imgs[:, 2, :, :, :],\\\n imgs[:, 3, :, :, :], imgs[:, 4, :, :,:], imgs[:, 5, :, :, :]\n\n encodings = []\n\n for input_img in (img1, img2, img3, img4, img5, img6):\n dim_0 = input_img.size()\n x_00 = F.relu(self.encoder_conv_00(input_img))\n x_01 = F.relu(self.encoder_conv_01(x_00))\n x_0, indices_0 = F.max_pool2d(x_01, kernel_size=2, stride=2, return_indices=True)\n\n # Encoder Stage - 2\n dim_1 = x_0.size()\n x_10 = F.relu(self.encoder_conv_10(x_0))\n x_11 = F.relu(self.encoder_conv_11(x_10))\n x_1, indices_1 = F.max_pool2d(x_11, kernel_size=2, stride=2, return_indices=True)\n\n # Encoder Stage - 3\n dim_2 = x_1.size()\n x_20 = F.relu(self.encoder_conv_20(x_1))\n x_21 = F.relu(self.encoder_conv_21(x_20))\n x_22 = F.relu(self.encoder_conv_22(x_21))\n x_2, indices_2 = F.max_pool2d(x_22, kernel_size=2, stride=2, return_indices=True)\n\n # Encoder Stage - 4\n dim_3 = x_2.size()\n x_30 = F.relu(self.encoder_conv_30(x_2))\n x_31 = F.relu(self.encoder_conv_31(x_30))\n x_32 = F.relu(self.encoder_conv_32(x_31))\n x_3, indices_3 = F.max_pool2d(x_32, kernel_size=2, stride=2, return_indices=True)\n\n # Encoder Stage - 5\n dim_4 = x_3.size()\n x_40 = F.relu(self.encoder_conv_40(x_3))\n x_41 = F.relu(self.encoder_conv_41(x_40))\n x_42 = F.relu(self.encoder_conv_42(x_41))\n x_4, indices_4 = F.max_pool2d(x_42, kernel_size=2, stride=2, return_indices=True)\n\n # Decoder\n\n dim_d = x_4.size()\n\n # Decoder Stage - 5\n x_4d = F.max_unpool2d(x_4, indices_4, kernel_size=2, stride=2, output_size=dim_4)\n x_42d = F.relu(self.decoder_convtr_42(x_4d))\n x_41d = F.relu(self.decoder_convtr_41(x_42d))\n x_40d = F.relu(self.decoder_convtr_40(x_41d))\n dim_4d = x_40d.size()\n\n # Decoder Stage - 4\n x_3d = F.max_unpool2d(x_40d, indices_3, kernel_size=2, stride=2, output_size=dim_3)\n x_32d = F.relu(self.decoder_convtr_32(x_3d))\n x_31d = F.relu(self.decoder_convtr_31(x_32d))\n x_30d = F.relu(self.decoder_convtr_30(x_31d))\n dim_3d = x_30d.size()\n\n # Decoder Stage - 3\n x_2d = F.max_unpool2d(x_30d, indices_2, kernel_size=2, stride=2, output_size=dim_2)\n x_22d = F.relu(self.decoder_convtr_22(x_2d))\n x_21d = F.relu(self.decoder_convtr_21(x_22d))\n x_20d = F.relu(self.decoder_convtr_20(x_21d))\n dim_2d = x_20d.size()\n\n # Decoder Stage - 2\n x_1d = F.max_unpool2d(x_20d, indices_1, kernel_size=2, stride=2, output_size=dim_1)\n x_11d = F.relu(self.decoder_convtr_11(x_1d))\n x_10d = F.relu(self.decoder_convtr_10(x_11d))\n dim_1d = x_10d.size()\n\n # Decoder Stage - 1\n x_0d = F.max_unpool2d(x_10d, indices_0, kernel_size=2, stride=2, output_size=dim_0)\n x_01d = F.relu(self.decoder_convtr_01(x_0d))\n x_00d = F.relu(self.decoder_convtr_00(x_01d))\n dim_0d = x_00d.size()\n encodings.append(x_00d)\n\n feature_map = torch.cat(encodings, dim=1)\n\n\n\n\n logits = self.decoder(x_00d)\n logits = torch.squeeze(logits, 1)\n # x_softmax = F.softmax(x_00d, dim=1\n probs = torch.sigmoid(logits)\n\n\n return logits, probs", "title": "" }, { "docid": "fca8bf37721a26953d4440af80346aab", "score": "0.59406424", "text": "def forward(self, X):\n\n batch_size = X.shape[0]\n\n A = torch.ones_like(X).bool()\n A = A[...,None] | A[...,None,:]\n\n X_emb = self.embedder(X)\n \n flow = self.latent_encoder(X_emb, A)\n \n c = self.z_to_c(flow.z_k).view(batch_size, 155, -1)\n\n X_dec = self.decoder(torch.ones_like(X_emb), A, c)\n X_hat = self.logits(X_dec)\n\n return {\n 'X_hat': X_hat,\n 'flow': flow,\n }", "title": "" }, { "docid": "6278f8349b3da1c54dd5712fabebdc56", "score": "0.59364593", "text": "def forward(self, batch: torch.Tensor) -> torch.Tensor:\n output = self.embedding(batch) * math.sqrt(self.sz_emb)\n return self.positional_encoding(output)", "title": "" }, { "docid": "1c5dd38c1d7a45f21930a43f7482224c", "score": "0.59343445", "text": "def forward(self, x_batch: np.ndarray) -> np.ndarray:\n x_out = x_batch\n for layer in self.layers:\n x_out = layer.forward(x_out)\n\n return x_out", "title": "" }, { "docid": "fb2d787ca623d657787a21b7357b5a7e", "score": "0.59291375", "text": "def forward(self, x):\n out = x\n out = self.layer_dict['input_conv'].forward(out)\n for i in range(self.num_stages): # for number of layers times\n for j in range(self.num_blocks_per_stage):\n out = self.layer_dict['block_{}_{}'.format(i, j)].forward(out)\n out = self.layer_dict['reduction_block_{}'.format(i)].forward(out)\n\n out = F.avg_pool2d(out, out.shape[-1])\n out = out.view(out.shape[0], -1) # flatten outputs from (b, c, h, w) to (b, c*h*w)\n out = self.logit_linear_layer(out) # pass through a linear layer to get logits/preds\n return out", "title": "" }, { "docid": "6ff49d73a64754c8c72f344d4e5ac449", "score": "0.59207886", "text": "def forward(self, batch_data):\n features = batch_data['X'].to(self.device)\n weights = batch_data['X_w'].to(self.device)\n out = F.embedding(\n features, self.weight,\n self.padding_idx, self.max_norm, self.norm_type,\n self.scale_grad_by_freq, self.sparse)\n out = weights.unsqueeze(1).bmm(out).squeeze()\n return out", "title": "" }, { "docid": "70de154619f4d5b5a7034655b3c10c6a", "score": "0.59085536", "text": "def forward(self, images):\n # train the model\n features = self.model(images) # (batch_size, 1024, 7, 7)\n features = self.adaptive_pool(features) # (batch_size, 1024, encoded_image_size, encoded_image_size)\n features = features.permute(0, 2, 3, 1) # (batch_size, encoded_image_size, encoded_image_size, 1024)\n\n return features", "title": "" }, { "docid": "025c3341c64f9247b5de827ba63ac0e8", "score": "0.5892703", "text": "def forward(self,x,train=False):\n return", "title": "" }, { "docid": "e64753b9dcf0c28d4cff8b694a74bfc8", "score": "0.5880984", "text": "def forward(self, x):\n shape = list(x.size())\n target_shape = copy.deepcopy(shape)\n\n # compute the std's over the minibatch\n vals = self.adjusted_std(x, dim=0, keepdim=True)\n\n # perform averaging\n if self.averaging == 'all':\n target_shape[1] = 1\n vals = th.mean(vals, dim=1, keepdim=True)\n\n elif self.averaging == 'spatial':\n if len(shape) == 4:\n vals = th.mean(th.mean(vals, 2, keepdim=True), 3, keepdim=True)\n\n elif self.averaging == 'none':\n target_shape = [target_shape[0]] + [s for s in target_shape[1:]]\n\n elif self.averaging == 'gpool':\n if len(shape) == 4:\n vals = th.mean(th.mean(th.mean(x, 2, keepdim=True),\n 3, keepdim=True), 0, keepdim=True)\n elif self.averaging == 'flat':\n target_shape[1] = 1\n vals = th.FloatTensor([self.adjusted_std(x)])\n\n else: # self.averaging == 'group'\n target_shape[1] = self.n\n vals = vals.view(self.n, self.shape[1] /\n self.n, self.shape[2], self.shape[3])\n vals = th.mean(vals, 0, keepdim=True).view(1, self.n, 1, 1)\n\n # spatial replication of the computed statistic\n vals = vals.expand(*target_shape)\n\n # concatenate the constant feature map to the input\n y = th.cat([x, vals], 1)\n\n # return the computed value\n return y", "title": "" }, { "docid": "cb5da1e18290b49cb03ae8fd9372d57c", "score": "0.58769625", "text": "def encoder(self, batch):\n unused_outputs, last_states = tf.nn.bidirectional_dynamic_rnn(\n self.enc_cell_fw,\n self.enc_cell_bw,\n batch,\n time_major=False,\n swap_memory=True,\n dtype=tf.float32,\n scope='ENC_RNN')\n\n last_state_fw, last_state_bw = last_states\n last_h_fw = self.enc_cell_fw.get_output(last_state_fw)\n last_h_bw = self.enc_cell_bw.get_output(last_state_bw)\n last_h = tf.concat([last_h_fw, last_h_bw], 1)\n mu = rnn.super_linear(\n last_h,\n self.hps.z_size,\n input_size=self.hps.enc_rnn_size * 2, # bi-dir, so x2\n scope='ENC_RNN_mu',\n init_w='gaussian',\n weight_start=0.001)\n # presig = rnn.super_linear(\n # last_h,\n # self.hps.z_size,\n # input_size=self.hps.enc_rnn_size * 2, # bi-dir, so x2\n # scope='ENC_RNN_sigma',\n # init_w='gaussian',\n # weight_start=0.001)\n return mu", "title": "" }, { "docid": "ed639de13eb024f5a50ee3de64209115", "score": "0.5876039", "text": "def train_encoder(self):\n if self.opt.language != '':\n self._build_ud_dataset()\n else:\n self._build_wsj_dataset()\n self.encoder = self.create_encoder()\n self.optim = self.optimizers[self.opt.optimizer](\n [{'params': [param for param in self.encoder.parameters() if param.requires_grad]}])\n self.encoder.train()\n\n self.len_train = len(self.dataset.train)\n self.len_real_train = 0\n for i in range(1, self.len_train + 1):\n sample = self.train_sampler.next()\n if sample.word[1].item() <= self.opt.train_max_length:\n self.len_real_train += 1\n total_loss_act = 0.\n for epoch in range(1, self.opt.epochs + 1):\n cur_time = time.time()\n cur_sample = 0\n i = 0\n for _ in range(1, self.len_train + 1):\n self.optim.zero_grad()\n sample = self.train_sampler.next()\n if sample.word[1].item() > self.opt.train_max_length:\n continue\n i += 1\n loss_act = self.encoder.train_parser(words=sample.word[0], pos_tags=sample.pos_tag, oracle_actions=sample.action)\n if loss_act is not None:\n total_loss_act += loss_act.data.item()\n loss_act.backward()\n self.optim.step()\n if i % self.opt.print_every == 0 or i == self.len_real_train:\n elapsed_time = time.time() - cur_time\n cur_time = time.time()\n elapsed_sample = i - cur_sample\n cur_sample = i\n self.logger.info('epoch {:3d} | {:5d}/{:5d} | avg loss act {:5.2f} | time {:5.2f}s'. \\\n format(epoch, i, self.len_real_train,\n total_loss_act / elapsed_sample, elapsed_time))\n total_loss_act = 0.\n self.logger.info('=' * 80)\n valid_dda = self.parse(self.valid_sampler)\n self.valid_sampler = self.valid_iter.__iter__() # renew the iterator\n self.logger.info('epoch {:3d} | valid dda {:5.2f}'.format(epoch, valid_dda))\n test_dda = self.parse(self.test_sampler)\n self.test_sampler = self.test_iter.__iter__() # renew the iterator\n self.logger.info('epoch {:3d} | test dda {:5.2f}'.format(epoch, test_dda))\n self.logger.info('=' * 80)", "title": "" }, { "docid": "8af306d2b84b2846daf9775e22ac3478", "score": "0.587294", "text": "def forward(self, x, train=False):\n pass", "title": "" }, { "docid": "beecb2697a078bb7b36d98552428e830", "score": "0.5869555", "text": "def forward(self, batch=None):\n emb = self.embedding.weight\n if self.num_node_feats:\n node_feat_transform = self.lin_node(self.node_feats)\n master_emb = torch.cat([emb, node_feat_transform], 1)\n else:\n master_emb = emb\n return master_emb if batch is None else master_emb[batch]", "title": "" }, { "docid": "f470219c2d836e10f69c5d9dfd877029", "score": "0.58635026", "text": "def forward(self, X: np.ndarray) -> np.ndarray:\n if self.n_in is None:\n self._init_parameters(X.shape)\n\n W = self.parameters[\"W\"]\n b = self.parameters[\"b\"]\n\n kernel_height, kernel_width, in_channels, out_channels = W.shape\n n_examples, in_rows, in_cols, in_channels = X.shape\n kernel_shape = (kernel_height, kernel_width)\n\n ### BEGIN YOUR CODE ###\n padded_x, p = pad2d(X, self.pad, kernel_shape, stride=self.stride)\n _, padH, padW, _ = p\n\n # implement a convolutional forward pass\n Hout = int(1 + (in_rows + 2*padH - kernel_height) / self.stride)\n Wout = int(1 + (in_cols + 2*padW - kernel_width) / self.stride)\n Z = np.empty((n_examples,Hout,Wout,out_channels))\n \n for h in range(Hout):\n for wi in range(Wout):\n toConvolute = padded_x[:, h*self.stride : h*self.stride+kernel_height, wi*self.stride : wi*self.stride+kernel_width, :]\n for f in range(out_channels):\n Z[:, h, wi, f] = np.sum(toConvolute*W[:, :, :, f], axis=(1,2,3)) + b[0, f]\n \n out = self.activation(Z)\n\n # cache any values required for backprop\n self.cache[\"X\"] = X\n self.cache[\"Z\"] = Z\n ### END YOUR CODE ###\n\n return out", "title": "" }, { "docid": "124aecfd7ea602924a7e45c41ee15956", "score": "0.58433264", "text": "def forward(self, w, data, training_mask=None):\n\n raise NotImplementedError()", "title": "" }, { "docid": "0b781a92d852268030561e9ec2025471", "score": "0.5841712", "text": "def forward(self, batch):\n # Ensure contiguous memory. Necessary when running multi-GPU.\n self.encoder.rnn.flatten_parameters()\n self.decoder.rnn.flatten_parameters()\n batch.to(DEVICE)\n batch_size = len(batch)\n vocab = batch.vocab\n vocab_size = len(vocab)\n\n # Get word embeddings for encoder\n embedded = self.embedding(vocab.filter_oov(batch.inputs))\n # Run embeddings through encoder.\n enc_outputs, enc_state = self.encoder(embedded, batch.input_lengths)\n # Calculate encoder attention features (relevant only for some attn_models)\n enc_features = self.encoder_projection(enc_outputs)\n # Prepare input for decoder\n dec_input = torch.LongTensor([vocab.SOS] * batch_size).to(DEVICE)\n # Use last (forward) hidden state from encoder\n dec_state = self.reduce_state(enc_state)\n dec_context = torch.zeros(batch_size, self.hidden_size * 2, device=DEVICE)\n\n # Prepare tensor to store outputs\n max_target_length = batch.target_lengths.max()\n outputs = torch.zeros(batch_size, vocab_size, max_target_length, device=DEVICE)\n # Prepare tensors to store stepwise coverage loss\n step_cov_losses = torch.zeros(batch_size, max_target_length, device=DEVICE)\n\n coverage = (\n torch.zeros(batch_size, max(batch.input_lengths), device=DEVICE)\n if self.coverage\n else None\n )\n\n # Run through decoder one time step at a time\n for t in range(max_target_length):\n embedded = self.embedding(dec_input)\n dec_output, dec_state, dec_context, attn, new_coverage = self.decoder(\n embedded,\n dec_state,\n dec_context,\n enc_outputs,\n batch.input_mask,\n batch.inputs,\n vocab_size,\n coverage,\n enc_features,\n )\n\n if self.coverage:\n step_cov_loss = torch.sum(torch.min(coverage, attn), dim=1)\n step_cov_losses[:, t] = step_cov_loss\n coverage = new_coverage\n\n outputs[:, :, t] = dec_output\n\n # Next input is current target (teacher forcing)\n dec_input = batch.targets[:, t].clone()\n\n if self.sample_when_unknown:\n # sub UNKs in teacher forced input, if didn't predict OOV\n for i in range(batch_size):\n if dec_input[i].item() == vocab.UNK:\n pred = dec_output[i].argmax()\n dec_input[i] = pred\n\n # Note that we do in place filter since we already cloned\n vocab.filter_oov_(dec_input)\n\n loss, cov_loss = self._calc_loss(outputs, batch, step_cov_losses)\n return (loss, cov_loss, outputs)", "title": "" }, { "docid": "75896fab610a4f51ffae6459ac9f9b6a", "score": "0.5832709", "text": "def forward(self, x):\n\n # flattening the input matrix\n batch = x.shape[0]\n shape = x.shape[1:]\n indices = []\n sizes = []\n\n # forward through encoder\n for layer in self.encoder:\n if(isinstance(layer, nn.MaxPool2d)):\n sizes.append(x.size())\n x, idx = layer(x)\n indices.append(idx)\n else:\n x = layer(x)\n\n # forward through decoder\n i = 1\n for layer in self.decoder:\n if(isinstance(layer, nn.MaxUnpool2d)):\n x = layer(x, indices[-i], output_size=sizes[-i])\n i = i + 1\n else:\n x = layer(x)\n\n return x", "title": "" }, { "docid": "4dd17bb1eed771d5688b2b773c16c4f2", "score": "0.5826671", "text": "def forward(self, x):\n \n batch_size = x.shape[0]\n \n out = self.feature_extractor.forward(x)\n \n out = self.classifier(out)\n \n expected_shape = (batch_size, self.num_classes)\n assert out.shape == (batch_size, self.num_classes),\\\n f\"Expected output of forward pass to be: {expected_shape}, but got: {out.shape}\"\n return out", "title": "" }, { "docid": "a5eddfe46525e9c36fa05d0cdc9f1b84", "score": "0.58261585", "text": "def forward(self, inputs, mode = \"analytic\"):\n\n # # TODO: add padding to the inputs\n # assert input_height % self.kernel_height == 0 and input_width % self.kernel_width == 0, \\\n # \"The input height and/or width is not divisible by kernel_size.\"\n\n if mode == \"analytic\":\n outputs = F.avg_pool2d(inputs, (self.kernel_height, self.kernel_width))\n\n else: # if mode in [\"MAP\", \"sampling\"]:\n\n # 4th-order: batch_size(0) x channels(1) x input_height(2) x input_width(3)\n (batch_size, channels, input_height, input_width) = inputs.size()\n\n # compute height and width of the output feature maps\n output_height = input_height // self.kernel_height\n output_width = input_width // self.kernel_width\n\n kernel_area = self.kernel_height * self.kernel_width\n output_area = output_height * output_width\n\n # fold the inputs into [batch_size, channels * kernel_area, output_area]\n inputs = F.unfold(inputs, kernel_size = (self.kernel_height, self.kernel_width),\n stride = (self.kernel_height, self.kernel_width))\n\n # reshape the inputs into [batch_size, channels, kernel_area, output_area]\n inputs = inputs.view(batch_size, channels, kernel_area, output_area)\n\n # permute the inputs into [batch_size, channels, output_area, kernel_area]\n inputs = inputs.permute(0, 1, 3, 2).contiguous()\n\n # reshape the inputs into [batch_size * channels * output_area, kernel_area]\n num_patches = batch_size * channels * output_area\n inputs = inputs.view(num_patches, kernel_area)\n\n # sample uniformly from the inputs, returning a vector of length [batch_size * channels * output_area]\n outputs = inputs[range(num_patches), torch.randint(0, kernel_area, (num_patches,))]\n\n # reshape the outputs into [batch_size, channels, output_height, output_width]\n outputs = outputs.view(batch_size, channels, output_height, output_width)\n \n return outputs", "title": "" }, { "docid": "3d4f974b7fc528fd224cdfab4ed79db5", "score": "0.58249176", "text": "def __feature_encoder(self, image):\n return self.fnet(image)", "title": "" }, { "docid": "15ae8ba61e72d92af0e7f2527f13db07", "score": "0.5817203", "text": "def forward_step(self, encoder_output, prev_word_embeddings, states):\n decoder_hidden_state, decoder_cell_state = states\n\n attention_weighted_encoding, alpha = self.attention(\n encoder_output, decoder_hidden_state\n )\n gating_scalars = self.sigmoid(self.f_beta(decoder_hidden_state))\n attention_weighted_encoding = gating_scalars * attention_weighted_encoding\n\n decoder_input = torch.cat(\n (prev_word_embeddings, attention_weighted_encoding), dim=1\n )\n decoder_hidden_state, decoder_cell_state = self.decode_step(\n decoder_input, (decoder_hidden_state, decoder_cell_state)\n )\n\n decoder_hidden_state_embedded = self.linear_h(decoder_hidden_state)\n attention_weighted_encoding_embedded = self.linear_z(\n attention_weighted_encoding\n )\n scores = self.linear_o(\n self.dropout(\n prev_word_embeddings\n + decoder_hidden_state_embedded\n + attention_weighted_encoding_embedded\n )\n )\n\n states = [decoder_hidden_state, decoder_cell_state]\n return scores, states, alpha", "title": "" }, { "docid": "c3fdf37e57d05f4cf1c3e88b954f85cb", "score": "0.5809043", "text": "def forward(self, inputs, valid_length=None): # pylint: disable=arguments-differ\n x = self.embed(inputs)\n x = x.transpose(axes=(1, 0, 2))\n x, additional_outputs = self.encoder(x, states=None, valid_length=valid_length)\n x = x.transpose(axes=(1, 0, 2))\n\n return x", "title": "" }, { "docid": "33be6a4bc7538c8b2d47b251459fe787", "score": "0.58085245", "text": "def training_step(self, batch, batch_idx):\n image, _ = batch\n out = self.forward(image, only_encoder=False)\n loss = self.criterion(out, image)\n self.log('train_loss', loss, prog_bar=True)\n return loss", "title": "" }, { "docid": "3207453331e1321dba281df927740485", "score": "0.5807778", "text": "def forward(self, x: Tensor) -> int:\n out = self.enc(x)\n \n \"\"\"\n Classifier Section\n \"\"\" \n out = self.MaxPool(out)\n out = self.batchnorm(out)\n out = self.relu(out)\n out = self.conv2dense(out)\n out = self.GlobalAvgPool2d(out)\n out = self.flatten(out)\n out = self.dense_classifier(out)\n #out = self.output_activation(out)\n return out", "title": "" }, { "docid": "6ec5f3931b4c8841a9b90bf5df28e7ef", "score": "0.5800814", "text": "def normal_ae_forward(self,x):\n if hasattr(self,'share_head'):\n if self.share_head is not None:\n x = self.share_head(x) \n latent_rep = self.encoder_mean(x)\n x_rec = self.decoder(latent_rep)\n\n return x_rec,latent_rep", "title": "" }, { "docid": "e1e1e2b7c3429fe8101b85bd1323f1ea", "score": "0.5792708", "text": "def mean_calc(self):\n for sample in self._train_samples:\n input_image_full_path = sample.input_image_full_path\n transform = sample.transform\n _ = sample.mode\n image = cv2.imread(input_image_full_path, cv2.IMREAD_GRAYSCALE)\n image = cv2.resize(image, (self._img_width, self._img_height))\n\n # 'raw', 'cw_90', 'cw_180', 'cw_270', 'h_mirror', 'v_mirror'\n if transform == 'raw':\n pass\n elif transform == 'cw_90':\n image = np.rot90(image)\n elif transform == 'cw_180':\n image = np.rot90(np.rot90(image))\n elif transform == 'cw_270':\n image = np.rot90(np.rot90(np.rot90(image)))\n elif transform == 'h_mirror':\n image = cv2.flip(image, 1)\n elif transform == 'v_mirror':\n image = cv2.flip(image, 0)\n else:\n print 'Error: do not support other transformation!'\n break\n\n image = np.expand_dims(image, axis=-1)\n self._average_pixels += image\n self._average_pixels = self._average_pixels / len(self._train_samples)\n mean_dump_wf = open(self._dump_norm_full_path, 'wb')\n pickle.dump(self._average_pixels, mean_dump_wf)\n print 'Dump mean of images to {}...'.format(self._dump_norm_full_path)", "title": "" }, { "docid": "e13fc42d594e97551f23e82d33295af3", "score": "0.5787525", "text": "def forward(self, x, mask):\n # encoder process\n b, n, h, w = x.size()\n if h != 512 or w != 512: # resize the image to fixed size for inpainting\n x = nn.functional.interpolate(x, (512, 512))\n mask = torch.nn.functional.interpolate(mask, (512, 512))\n features = self.net_E(x, mask=mask)\n # decoder process\n f_in, f_e, scale_mask = self.get_G_inputs(features, mask)\n g_img, attn_f = self.net_G(f_in, f_e, scale_mask)\n if h != 512 or w != 512:\n for i in range(len(g_img)):\n g_img[i] = nn.functional.interpolate(g_img[i], (h, w))\n return g_img", "title": "" }, { "docid": "b4e087d762314467187eab95f7a56be4", "score": "0.57858396", "text": "def forward(self, x: torch.Tensor, encoder_features: List[torch.Tensor, ...]) -> torch.Tensor:\n for i in range(len(self.channels)-1):\n x = self.ups[i](x)\n encoder_feature = self.crop(encoder_features[i], x)\n x = torch.cat([x, encoder_feature], dim=1)\n x = self.decode_UNET[i](x)\n return x", "title": "" }, { "docid": "d00984a2bacda08d81e48369aa9e8c22", "score": "0.57835585", "text": "def forward(self, x: torch.Tensor) -> torch.Tensor:\n x = self.model(x)\n # Average pooling and flatten\n\n output = torch.nn.functional.avg_pool2d(x, x.size()[2:]).view(x.size()[0], -1)\n return torch.sigmoid(output) if self.use_sigmoid else output", "title": "" }, { "docid": "5808b042fc01640e6154f44cf121f330", "score": "0.57824564", "text": "def forward(self, img, return_loss=True, **kwargs):\n return self.model(img, return_loss=return_loss, **kwargs)", "title": "" }, { "docid": "f480524509a8171352a7f280757376fa", "score": "0.5778512", "text": "def forward(self, x):\n # 299 x 299 x 3\n x = self.Conv2d_1a_3x3(x)\n # 149 x 149 x 32\n x = self.Conv2d_2a_3x3(x)\n # 147 x 147 x 32\n x = self.Conv2d_2b_3x3(x)\n # 147 x 147 x 64\n x = F.max_pool2d(x, kernel_size=3, stride=2)\n # 73 x 73 x 64\n x = self.Conv2d_3b_1x1(x)\n # 73 x 73 x 80\n x = self.Conv2d_4a_3x3(x)\n # 71 x 71 x 192\n x = F.max_pool2d(x, kernel_size=3, stride=2)\n # 35 x 35 x 192\n x = self.Mixed_5b(x)\n # 35 x 35 x 256\n x = self.Mixed_5c(x)\n # 35 x 35 x 288\n x = self.Mixed_5d(x)\n # 35 x 35 x 288\n x = self.Mixed_6a(x)\n # 17 x 17 x 768\n x = self.Mixed_6b(x)\n # 17 x 17 x 768\n x = self.Mixed_6c(x)\n # 17 x 17 x 768\n x = self.Mixed_6d(x)\n # 17 x 17 x 768\n x = self.Mixed_6e(x)\n # 17 x 17 x 768\n if self.training and self.aux_logits:\n aux = self.AuxLogits(x)\n # 17 x 17 x 768\n x = self.Mixed_7a(x)\n # 8 x 8 x 1280\n x = self.Mixed_7b(x)\n # 8 x 8 x 2048\n x = self.Mixed_7c(x)\n # 8 x 8 x 2048\n #x = F.avg_pool2d(x, kernel_size=x.size()[2:])\n\n # 1 x 1 x 2048\n x = F.dropout(x, training=self.training)\n # 1 x 1 x 2048\n # x = x.view(x.size(0), -1)\n # 2048\n\n return x", "title": "" }, { "docid": "dcb4986c934dedb834035974c31e1341", "score": "0.5770534", "text": "def forward(self, x):\n # Get encoder activations\n x_enc = [x]\n for l in self.enc:\n x_enc.append(l(x_enc[-1]))\n\n # Three conv + upsample + concatenate series\n y = x_enc[-1]\n for i in range(3):\n y = self.dec[i](y)\n y = self.upsample(y)\n y = torch.cat([y, x_enc[-(i + 2)]], dim=1)\n\n # Two convs at full_size/2 res\n y = self.dec[3](y)\n y = self.dec[4](y)\n\n # Upsample to full res, concatenate and conv\n if self.full_size:\n y = self.upsample(y)\n y = torch.cat([y, x_enc[0]], dim=1)\n y = self.dec[5](y)\n\n # Extra conv for vm2\n if self.vm2:\n y = self.vm2_conv(y)\n\n return y", "title": "" }, { "docid": "e0e402a46551a9e525f95e1abe9261f9", "score": "0.57681346", "text": "def forward(self, data, target, device, teach_rate=0):\n # check shape\n batch_size, n_frames, dim_data = data.shape\n assert(n_frames == self.n_frames)\n assert(dim_data == self.dim_data)\n assert(target.shape[0] == batch_size) # data, target batch_size mismatch\n assert(target.shape[1] == self.encode_len)\n # check end\n # predict start\n encode_out, encode_hid = self.encoder(data)\n pre_prob, pre_sent = self.decoder(encode_out, encode_hid, target, device, teach_rate)\n return pre_prob, pre_sent", "title": "" }, { "docid": "ff041d238704c1b64c14c737d6f9c7de", "score": "0.5765087", "text": "def forward(self, src, src_mask=None):\n memory = self.encoder(src, src_mask=src_mask)\n return memory", "title": "" }, { "docid": "77a901f08e0e1088b3865ef6ed58f317", "score": "0.5753515", "text": "def forward(self, editor_input, draw_samples, draw_p=False):\n encoder_output = self.encoder(editor_input.encoder_input, draw_samples, draw_p)\n total_loss = self.train_decoder.loss(encoder_output, editor_input.train_decoder_input)\n return total_loss", "title": "" }, { "docid": "cd3c6f3791c7faee8506a828683e004c", "score": "0.5747975", "text": "def forward(self):\n values = [x.value for x in self.inbound_neurons]\n self.value = math.fsum(values)", "title": "" }, { "docid": "23a6dde99c865a06a1b4911d28082051", "score": "0.57357466", "text": "def forward(self, batch: torch.Tensor) -> torch.Tensor:\n batch_pos_enc = self.pos_enc[: batch.size(1)]\n batch_pos_enc = batch_pos_enc.unsqueeze(0).repeat(batch.size(0), 1, 1)\n return self.dropout(batch + batch_pos_enc)", "title": "" }, { "docid": "a5659fab69935d436d48b99f820df59d", "score": "0.57312185", "text": "def forward(self, input_batch, ):\n # flatten neurons, time dimension\n x = torch.flatten(input_batch, start_dim=2, end_dim=-1)\n # x shape: (nr_batches, len_sequence, nr_neurons*time)\n\n # Encoding\n x = self.encoder_layer(x)\n x = F.relu(x)\n # x shape: (nr_batches, len_sequence, x_feat_length)\n\n # Recurrent network\n x, h_n = self.rnn(x, ) # h_0 can still be given\n # x shape: (nr_batches, len_sequence, gru_hidden_size)\n\n # Decoding\n y_logit = self.decoder_layer(x)\n # y_logit shape: (nr_batches, len_sequence, n_images)\n\n return y_logit", "title": "" }, { "docid": "93e6f66faafd1cccb8c97f3d74abdcc2", "score": "0.57253635", "text": "def forward(self, input_data, name):\n with tf.variable_scope(name_or_scope=name):\n # centerlized data\n if self._summary:\n tf.summary.image('Encoder_Inputs_With_Normal', input_data[:1], max_outputs=1)\n if config.USE_STN:\n input_data = self._stn.forward(input_data)\n input_data = tf.subtract(tf.divide(input_data, 127.5), 1.0)\n\n # first apply the cnn feature extraction stage\n cnn_out = self._cnn_extraction(\n inputdata=input_data, name='feature_extraction_module'\n )\n\n # second apply the map to sequence stage\n sequence = self._map_to_sequence(\n inputdata=cnn_out, name='map_to_sequence_module'\n )\n\n # third apply the sequence lstm stage\n net_out = self._sequence_lstm(\n inputdata=sequence, name='sequence_rnn_module'\n )\n\n return net_out", "title": "" }, { "docid": "7fe891c79fe557eb7d04e6ad82125dec", "score": "0.57235926", "text": "def preval_forward(self, data_shot, label_shot, data_query):\n embedding_query = self.encoder(data_query)\n embedding_shot = self.encoder(data_shot)\n logits = self.base_learner(embedding_shot)\n loss = F.cross_entropy(logits, label_shot)\n grad = torch.autograd.grad(loss, self.base_learner.parameters())\n fast_weights = list(map(lambda p: p[1] - 0.01 * p[0], zip(grad, self.base_learner.parameters())))\n logits_q = self.base_learner(embedding_query, fast_weights)\n\n for _ in range(1, 100):\n logits = self.base_learner(embedding_shot, fast_weights)\n loss = F.cross_entropy(logits, label_shot)\n grad = torch.autograd.grad(loss, fast_weights)\n fast_weights = list(map(lambda p: p[1] - 0.01 * p[0], zip(grad, fast_weights)))\n logits_q = self.base_learner(embedding_query, fast_weights)\n return logits_q", "title": "" }, { "docid": "f82d23310153239fa889f7966575c0e2", "score": "0.5714538", "text": "def forward(self, input):\n x = input\n x_shape = x.shape\n if self.att:\n x = self.conv1(x)\n # x = x.reshape(x_shape[0], -1)\n if self.act():\n x = self.act(x)\n else:\n if self.norm == 'bn':\n x = self.act(self.bn1(self.conv1(x)))\n else:\n x = self.act(self.conv1(x))\n\n d = x.dim()\n if d == 4:\n batch, freq, height, width = x.shape\n if height == 1:\n x = x.reshape(batch, freq, width)\n elif width == 1:\n x = x.reshape(batch, freq, height)\n elif d == 3:\n batch, freq, width = x.shape\n if width == 1:\n x = x.reshape(batch, -1)\n\n return x", "title": "" }, { "docid": "d8661598e0319f2f89f82f540c0070b3", "score": "0.5687416", "text": "def forward(self, loss_input_dict):\n predict = loss_input_dict['prediction']\n\n if(isinstance(predict, (list, tuple))):\n predict = predict[0]\n if(self.softmax):\n predict = nn.Softmax(dim = 1)(predict)\n\n # for numeric stability\n predict = predict * 0.999 + 5e-4\n C = list(predict.shape)[1]\n entropy = torch.sum(-predict*torch.log(predict), dim=1) / np.log(C)\n avg_ent = torch.mean(entropy)\n return avg_ent", "title": "" }, { "docid": "be6a9f071c7f905df42caf54c744cf4a", "score": "0.5672226", "text": "def epoch_iter(model, data, optimizer):\n\n #Measurement: bits per dimension\n bpds = 0.0\n\n for i, (imgs, _) in enumerate (data):\n\n #forward pass\n imgs.to(device)\n log_px = model.forward(imgs)\n\n #compute loss\n loss = -1*torch.mean(log_px)\n bpds += loss.item()\n\n #backward pass only when in training mode\n if model.training:\n optimizer.zero_grad()\n loss.backward()\n\n #clip gradient to avoid exploding grads\n torch.nn.utils.clip_grad_norm_(model.parameters(), max_norm=5.0)\n\n optimizer.step()\n\n #for readibility\n n_batches = i + 1\n img_shape = imgs.shape[1] # 28 x 28\n #compute average bit per dimension for one epoch\n avg_bpd = bpds / (n_batches * (28**2) * np.log(2))\n\n return avg_bpd", "title": "" }, { "docid": "554cdb3b180c36d8ce83b1ae26f8eed1", "score": "0.56697124", "text": "def forward(self, inp):\n encoder_out = self.forward_encoder(inp)\n prev_output_tokens = self.prepare_prev_toks(inp)\n # prev_output_tokens = inp2[\"prev_out_toks\"]\n\n decoder_out = self.forward_decoder(prev_output_tokens, encoder_out)\n lprobs = F.log_softmax(decoder_out[0], dim=-1)\n\n return {\"net_out\": decoder_out, \"lprobs\": lprobs}", "title": "" }, { "docid": "f772e3466b5b7d5bff62c32c31b6ea4a", "score": "0.5667259", "text": "def forward(self, *args, **kwargs):\n return self.criterion(*args, **kwargs).mean()", "title": "" }, { "docid": "9bec058999372bb981381a72bf71b4e9", "score": "0.56665564", "text": "def forward(self, x):\n x = self.features(x.view(-1, 1, 120, 120)) # # of timestep, 3, 3, 512\n\n x = x.view(x.size(0), -1) # flatten to (_, 4608)\n x = self.classifier(x) # (_, 512) mapping of frames to classes\n\n return x", "title": "" }, { "docid": "ff4014704a5a34b21f70391a7ecce0bc", "score": "0.56665415", "text": "def forward(self, x):\n batch_size = x.shape[0]\n x = self.feature_extractor(x)\n x = x.view(-1, self.num_output_features)\n x = self.classifier(x)\n out = x\n expected_shape = (batch_size, self.num_classes)\n assert out.shape == (batch_size, self.num_classes),\\\n f\"Expected output of forward pass to be: {expected_shape}, but got: {out.shape}\"\n return out", "title": "" }, { "docid": "d02cd4e8f80be3f6d3a8bfa175ecacbd", "score": "0.56618315", "text": "def forward(self, code, args):\n if args.split:\n # splitting the code\n code = code[:,:args.nb_channels_split,:,:]\n \n # applying the convolution layers\n x1 = self.c1(self.sc1(code))\n x2 = self.c2(self.sc2(x1))\n x3 = self.c3(self.sc3(x2))\n \n # performing a mean pool\n m1 = torch.mean(x3, dim=-1)\n m2 = torch.mean(m1, dim=-1)\n \n # FC layers\n x6 = self.lin2(self.lin(m2))\n \n # sigmoid activation function\n out = self.softm(x6)\n \n return out", "title": "" }, { "docid": "44e060e4f0f9cb08a8e60ee8998d854b", "score": "0.5658986", "text": "def forward(self, x):\n\t\tfor affine in self.affine_layers:\n\t\t\tx = F.tanh(affine(x))\n\n\t\taction_mean = self.action_mean(x)\n\n\n\t\treturn action_mean", "title": "" }, { "docid": "7c0ccf205ccb6a50bfdece72f3b5f54c", "score": "0.5654338", "text": "def forward(self, x):\n #######################################################################\n # YOUR CODE #\n #######################################################################\n output_size = x.size()[2:4] # Get spatial dimensions (i.e HxW for the input images)\n \n # Sets of layers in the network\n features = self.aug_vgg16.features\n avgpool = self.aug_vgg16.avgpool\n classifier = self.aug_vgg16.classifier\n upsample = nn.Upsample(size=output_size, mode='bilinear')\n\n # for layer in features:\n # x = layer(x)\n # print(x.size())\n \n # x = avgpool(x)\n # print(x.size())\n\n # for layer in classifier:\n # x = layer(x)\n # print(x.size())\n\n # Upsample the output by a factor of 32 with bilinear interpolation (TODO: For testing purposes, comment later)\n #layer = nn.Upsample(size=output_size, mode='bilinear')\n x = features(x)\n x = avgpool(x)\n x = classifier(x)\n x = upsample(x)\n # Output shape is: B x (num_classes x H x W) -> num_classes-many heat maps per image\n # PyTorch is able to calculate the loss over different class predictions/heat maps with cross entropy loss automatically.\n # Therefore, a reduction over the dim-1 (i.e. num_classes) is not needed.\n #######################################################################\n # END OF YOUR CODE #\n #######################################################################\n\n return x", "title": "" }, { "docid": "95ae09b863a51591ae17195621cc89cc", "score": "0.5653306", "text": "def forward(self, x):\n x = torch.nn.functional.relu(self.conv0(x))\n x = self.resblocks(x)\n x = torch.nn.functional.relu(self.avg(x))\n x = x.view(x.size(0), -1)\n x = self.fc(x)\n return x", "title": "" }, { "docid": "e60dceb3fd97887898dee96955bf3148", "score": "0.5651908", "text": "def forward(self, xs):\r\n N, T = np.shape(xs) \r\n _, D = np.shape(self.W)\r\n\r\n out = np.empty((N, T, D))\r\n self.layers = []\r\n\r\n for t in range(T): \r\n layer = Embedding(self.W)\r\n out[:, t, :] = layer.forward(xs[:, t])\r\n self.layers.append(layer)\r\n\r\n return out", "title": "" }, { "docid": "27f6b30bfaa06ec8278fb2791d3897b4", "score": "0.5648777", "text": "def forward(self, encoded):\n\n # Decoder Stage - 5\n #x_4d = F.max_unpool2d(x_4, indices_4, kernel_size=2, stride=2)\n x_42d = F.relu(self.decoder_convtr_42(encoded))\n x_41d = F.relu(self.decoder_convtr_41(x_42d))\n x_40d = F.relu(self.decoder_convtr_40(x_41d))\n dim_4d = x_40d.size()\n\n # Decoder Stage - 4\n #x_3d = F.max_unpool2d(x_40d, indices_3, kernel_size=2, stride=2)\n x_32d = F.relu(self.decoder_convtr_32(x_40d))\n x_31d = F.relu(self.decoder_convtr_31(x_32d))\n x_30d = F.relu(self.decoder_convtr_30(x_31d))\n dim_3d = x_30d.size()\n\n # Decoder Stage - 3\n #x_2d = F.max_unpool2d(x_30d, indices_2, kernel_size=2, stride=2)\n x_22d = F.relu(self.decoder_convtr_22(x_30d))\n x_21d = F.relu(self.decoder_convtr_21(x_22d))\n x_20d = F.relu(self.decoder_convtr_20(x_21d))\n dim_2d = x_20d.size()\n\n # Decoder Stage - 2\n #x_1d = F.max_unpool2d(x_20d, indices_1, kernel_size=2, stride=2)\n x_11d = F.relu(self.decoder_convtr_11(x_20d))\n x_10d = F.relu(self.decoder_convtr_10(x_11d))\n dim_1d = x_10d.size()\n # Decoder Stage - 1\n #x_0d = F.max_unpool2d(x_10d, indices_0, kernel_size=2, stride=2)\n x_01d = F.relu(self.decoder_convtr_01(x_10d))\n x_00d = self.decoder_convtr_00(x_01d)\n x = F.upsample(x_00d, size=(800, 800), mode='bilinear')\n x = torch.sigmoid(x).squeeze(1)\n\n if DEBUG:\n print(\"dim_0: {}\".format(dim_0))\n print(\"dim_1: {}\".format(dim_1))\n print(\"dim_2: {}\".format(dim_2))\n print(\"dim_3: {}\".format(dim_3))\n print(\"dim_4: {}\".format(dim_4))\n\n print(\"dim_d: {}\".format(dim_d))\n print(\"dim_4d: {}\".format(dim_4d))\n print(\"dim_3d: {}\".format(dim_3d))\n print(\"dim_2d: {}\".format(dim_2d))\n print(\"dim_1d: {}\".format(dim_1d))\n print(\"dim_0d: {}\".format(dim_0d))\n\n\n return x", "title": "" }, { "docid": "2d35e292efac196adce172106e0187e7", "score": "0.5645064", "text": "def forward(self, images):\n with torch.no_grad():\n features = self.model(images)\n\n #####MARKER\n if not self.attention:\n features = features.view(features.size(0), -1)\n if self.architecture == \"alexnet\":\n features = self.classifier(features)\n features = self.embed(features)\n features = self.bn(features)\n else:\n features = self.adaptive_pool(features)\n\n return features", "title": "" }, { "docid": "f56f26992b798911f087db1dcb7d806e", "score": "0.56448406", "text": "def forward(self, batched_inputs, batched_states=None):\n pass", "title": "" }, { "docid": "fd25f5e1e9f398c97bfe5ce3efce684e", "score": "0.56433296", "text": "def forward(self, images):\n\t\tbatch_size = images.size(0)\n\t\tresult = self.resnet(images)#se pasa por resnet y se recibe \n\t\t#(batch_size,2048,7,7)\n\n\t\tresult = self.adaptive_pool(result)\n\n\t\tresult = result.permute(0,2,3,1)#en vez de tener tensor en (batch_size,2048,14,14)\n\t\t#se pasa a tener shape (batch_size, 14, 14, 2048)\n\n\n\t\t#se devuelven los resultados del encoding en shape\n\t\t#(batch_size,196,2048)\n\t\treturn result", "title": "" }, { "docid": "6ace17a8acc937bc6c3a03e95b129d4c", "score": "0.5640279", "text": "def forward(self, batch):\n # assuming that the precomputed features are already l2-normalized\n\n images = batch['image'].to(self.device).to(self.device)\n\n images = self.pool(images.permute(0, 2, 1)) # Global pooling\n images = images.permute(0, 2, 1)\n features = self.fc(images)\n # normalize in the joint embedding space\n if not self.no_imgnorm:\n features = l2norm(features, dim=-1)\n\n return features", "title": "" }, { "docid": "9f51e0d1f2415a1e11fe9fb552765bdd", "score": "0.5637839", "text": "def forward(self, inputs):\n conv_outputs = inputs\n up_output = [center]\n for i in range(self.feature_level):\n b = self.feature_level - i - 1\n x = self.up_concat[i](conv_outputs[b], up_output[i])\n up_output.append(x)\n\n final = self.final(up_output[-1])\n\n # print(final.device)\n # print(self.final_gn.weight.device)\n # print(self.final_gn.bias.device)\n # final_gn = self.final_gn(final)\n # print(final_gn.device)\n # return final_gn\n return final", "title": "" }, { "docid": "9b418bf744bd6835432e844e6f7c2ada", "score": "0.563591", "text": "def forward(self,images):\n #Run the image through the ResNet\n encoded_image = self.resnet(images) # (batch_size,2048,7,7)\n batch_size = encoded_image.shape[0]\n features = encoded_image.shape[1]\n num_pixels = encoded_image.shape[2] * encoded_image.shape[3]\n # Get the global features of the image\n global_features = self.avgpool(encoded_image).view(batch_size, -1) # (batch_size, 2048)\n enc_image = encoded_image.permute(0, 2, 3, 1) # (batch_size,7,7,2048)\n enc_image = enc_image.view(batch_size,num_pixels,features) # (batch_size,num_pixels,2048)\n return enc_image, global_features", "title": "" }, { "docid": "4ce6830576138ec5d4fe7cb66eacd528", "score": "0.56343335", "text": "def forward(self, x):\n x = self.conv_layers(x)\n x = self.avg_pool(x)\n x = torch.flatten(x, 1)\n x = self.linear_layers(x)\n return x", "title": "" }, { "docid": "d4ee31aa058059c58ea1de76f5bad8a4", "score": "0.5632121", "text": "def encoder(self):\n\n\t\tdims = self.opts.dims\n\t\tcode_len = self.opts.encoder_vec_size\n\t\tif self.opts.dataset == \"CIFAR\":\n\t\t\twith tf.variable_scope(\"encoder\"):\n\t\t\t\tconv1 = model.conv2d(self.images, [3, 3, self.c, dims], 2, \"conv1\", alpha=0.01) # 16x16x64\n\t\t\t\tconv2 = model.conv2d(conv1, [3, 3, dims, dims*2], 2, \"conv2\", alpha=0.01) # 8x8x128\n\t\t\t\tconv3 = model.conv2d(conv2, [3, 3, dims * 2, dims * 4], 2, \"conv3\", alpha=0.01) # 4x4x256\n\t\t\t\tconv4 = model.conv2d(conv3, [3, 3, dims * 4, dims * 8], 2, \"conv4\", alpha=0.01) # 2x2x512\n\t\t\t\tself.conv3_flat_len = int(np.prod(conv4.get_shape()[1:]))\n\t\t\t\tconv3_flat = tf.reshape(conv4, [-1, self.conv3_flat_len])\n\t\t\t\tmean = model.fully_connected(conv3_flat, code_len, self.is_training, None, \"full3_mean\", use_leak=True, bias_constant=0.01) # 40\n\t\t\t\tstds = model.fully_connected(conv3_flat, code_len, self.is_training, None, \"full3_stds\", use_leak=True, bias_constant=0.01) # 40\n\t\telse:\n\t\t\twith tf.variable_scope(\"encoder\"):\n\t\t\t\tdims = 16\n\t\t\t\tconv1 = model.conv2d(self.images, [3, 3, self.c, dims], 2, \"conv1\", alpha=0.2, use_leak=True, bias_constant=0.01) # 14x14x16\n\t\t\t\tconv2 = model.conv2d(conv1, [3, 3, dims, dims * 2], 2, \"conv2\", alpha=0.2, use_leak=True, bias_constant=0.01) # 7x7x32\n\t\t\t\tconv2d_flat = tf.reshape(conv2, [-1, 7*7*32])\n\t\t\t\tmean = model.fully_connected(conv2d_flat, code_len, self.is_training, None, \"full3_mean\", use_leak=True, bias_constant=0.01) # 40\n\t\t\t\tstds = model.fully_connected(conv2d_flat, code_len, self.is_training, None, \"full3_stds\", use_leak=True, bias_constant=0.01) # 40\n\n\t\treturn mean, stds", "title": "" }, { "docid": "b731ebd5edb0248fed6c1264974ce496", "score": "0.5628335", "text": "def _forward(self, *x):\n # Average\n results = [estimator(*x) for estimator in self.estimators_]\n output = op.average(results)\n\n return output", "title": "" }, { "docid": "5c8bb2f68ef41f5bdb207d8f466208d4", "score": "0.5622046", "text": "def forward(self, x, is_training):\n raise NotImplementedError", "title": "" }, { "docid": "937867771e44eba45ec3173609767461", "score": "0.5619656", "text": "def forward(self, batch):\n adj = [b[0] for b in batch]\n x = torch.cat([b[1] for b in batch], 0)\n # b[2] is stats\n\n x = self.preproc(x)\n x, adj = self.hops((x, adj))\n # For BN, trim after computation instead of before\n x = self.model_out(x)\n r = []\n i = 0\n for a in adj:\n j = i + a.size(0)\n r.append(x[i])\n i = j\n assert i == x.size(0), f'{i} == {x.size(0)}'\n\n r = torch.stack(r)\n assert len(adj) == r.size(0), r.size()\n assert 1 == r.size(1), r.size()\n return r", "title": "" }, { "docid": "43b9bd908e0143ed509345ae6ac288a7", "score": "0.5612291", "text": "def _forward(\n batch: dataset.Batch,\n is_training: bool,\n) -> jnp.ndarray:\n net = hk.nets.ResNet50(1000,\n resnet_v2=FLAGS.model_resnet_v2,\n bn_config={'decay_rate': FLAGS.model_bn_decay})\n return net(batch['images'], is_training=is_training)", "title": "" }, { "docid": "5de94dadb545b5c2f1775c46b03a1a2a", "score": "0.5609254", "text": "def forward(self, input):\n # input is shape: [seq,batch,feature]\n gru_out, self.hidden1 = self.gru1(input, self.hidden1)\n gru_out, self.hidden2 = self.gru2(gru_out, self.hidden2)\n keyword_space = self.hidden2keyword(gru_out)\n result = F.log_softmax(keyword_space, dim=2)\n # return the mean across the sequence length to produce the\n # best prediction of which word exists in that sequence.\n # we can do that because we know each window_size sequence in\n # the training dataset contains at most one word.\n result = result.mean(dim=0)\n return result", "title": "" }, { "docid": "8df8fcdd83ed0f228efa2761d185a844", "score": "0.56032103", "text": "def forward(self, input_):\n\t\tif not self.training:\n\t\t\treturn F.softmax(input_, dim=-1) \n\t\telse: \n\t\t\t#return input_\n\t\t\treturn F.log_softmax(input_, dim=-1)", "title": "" }, { "docid": "83c736e0f44c1dd166c573766b72c464", "score": "0.55989426", "text": "def feed_forward(self, arr):\n for layer in self.layers:\n arr = layer.feed_forward(arr)\n return arr", "title": "" }, { "docid": "113e8dfb826a0fb65277b82ca00f3cec", "score": "0.5595157", "text": "def forward(self, input):\n ### YOUR CODE HERE for part 1h\n\n X_words_emb = []\n for X_padded in input:\n X_emb = self.char_embedding(X_padded) # batch_size x max_word_length x char_embed_size\n X_reshaped = torch.transpose(X_emb, dim0=1, dim1=2)\n\n X_conv_out = self.cnn(X_reshaped)\n X_highway = self.highway(X_conv_out)\n X_word_emb = self.dropout(X_highway)\n X_words_emb.append(X_word_emb)\n X_words_emb = torch.stack(X_words_emb)\n return X_words_emb\n\n ### END YOUR CODE", "title": "" }, { "docid": "980a85df4df2dec220abd2c144c4d1e8", "score": "0.55903023", "text": "def exec_train():\n # Ensure dropout layers are in train mode\n # Zero gradients\n encoder_optimizer.zero_grad()\n decoder_optimizer.zero_grad()\n # Forward batch of sequences through decoder one time step at a time\n for step in range(max_target_len):\n forward_decoder(step)\n # Perform backpropagation\n loss.backward()\n # Clip gradients: gradients are modified in place\n _ = nn.utils.clip_grad_norm_(encoder.parameters(), clip)\n _ = nn.utils.clip_grad_norm_(decoder.parameters(), clip)\n # Adjust model weights\n encoder_optimizer.step()\n decoder_optimizer.step()", "title": "" } ]
0e08c2053cefb4745ed8ee41b1a1530f
Gets the content of a resource
[ { "docid": "8dccfbc4eec6b6eb13db2655e677441e", "score": "0.0", "text": "def load_resource(resource_path):\n resource_content = pkg_resources.resource_string(__name__, resource_path)\n return unicode(resource_content)", "title": "" } ]
[ { "docid": "4734bb5b047d6b7b31a78cee1d6808e6", "score": "0.8737112", "text": "def _get_content(self, resource):", "title": "" }, { "docid": "0f26c74e63b2210f70c0d0c161313c0e", "score": "0.80320984", "text": "def get_resource(self):\n try:\n print('fetching:', self.path)\n r = requests.get(self.path)\n if r.status_code != 200:\n print('raising exception...')\n raise Exception()\n except Exception as e:\n print('ERROR: getting URL resource failed')\n\n return r.content", "title": "" }, { "docid": "febfa7da464c5d4a6368f8681538aae1", "score": "0.7396146", "text": "def get(self, path=None):\n resource = self._retrieve_resource_entity()\n\n if isinstance(resource, str):\n self.set_header('Content-Type', 'image/vnd.microsoft.icon')\n return self.finish(resource)\n elif resource.exists and resource.cacheable:\n self._cache_objects[self.request.uri] = resource\n elif not resource.exists:\n # Return HTTP 404 if the content is not found.\n self._logger.error('%s could not be found.' % resource.path)\n\n raise HTTPError(404)\n\n # Get the content type.\n self.set_header(\"Content-Type\", resource.kind or 'text/plain')\n\n # Retrieve the plugins if registered.\n if not ResourceService._plugins and not ResourceService._plugins_registered:\n ResourceService._plugins = services.find_by_tag(\n ResourceService._plugins_tag_name\n )\n\n # Apply the plugin.\n for plugin in ResourceService._plugins:\n if plugin.expect(resource):\n resource = plugin.execute(resource)\n # End the iteration\n\n # Return the content.\n try:\n self.finish(resource.content)\n except Exception as e:\n raise HTTPError(500)", "title": "" }, { "docid": "70e5bb8a70a1c58a421626262f57de76", "score": "0.7164376", "text": "def resource(self):\n return self._res", "title": "" }, { "docid": "3cd1cd4368d8009ba02b7d49a6293c8e", "score": "0.70911694", "text": "def resource(self):\n return self.api.get_resource(self.resource_name)", "title": "" }, { "docid": "9ccf0015fd643a1820a4b86be19a9bd5", "score": "0.7008779", "text": "def get(self):\n with open(self.path, 'r') as f:\n content = f.read()\n return content", "title": "" }, { "docid": "dfc4b8298a49b97de11ca3b3e074f750", "score": "0.6995272", "text": "def get(self, resource, **kwargs):\n url = self.construct_url(self.base_url, resource, **kwargs)\n LOG.debug(\"%s\", url)\n response = self.session.get(url, auth=('admin','admin'), timeout=Client.REQUEST_TIMEOUT)\n LOG.debug(\"result: [%s]\", response)\n self._handle_response(response, resource)\n\n return response.text", "title": "" }, { "docid": "e0a78c12012aba69185c48813a66ab9a", "score": "0.6842633", "text": "def resource(self):\n return self.get_resource()", "title": "" }, { "docid": "81bae5115d3af88a46c38ade822dc3d7", "score": "0.6836169", "text": "def get_resource(self, res: PudlResourceKey) -> bytes:\n desc = self.get_descriptor(res.dataset)\n url = desc.get_resource_path(res.name)\n content = self._fetch_from_url(url).content\n desc.validate_checksum(res.name, content)\n return content", "title": "" }, { "docid": "22e04e733076922f68345b580a7ec1d7", "score": "0.6818987", "text": "def contents(self):\n response = request(self._api_session.get, \"%s/contents\" % self.detail_path(),\n configuration=self._configuration, stream=True)\n return response", "title": "" }, { "docid": "a70f1e93011a0ebf8fad9be257a82453", "score": "0.67865133", "text": "def get_content(self):", "title": "" }, { "docid": "5e28af4a11f50842fd0b8ff0b2c470bc", "score": "0.6775583", "text": "def read(url, **args):\n with FileResource(url, **args) as resource:\n return resource.read()", "title": "" }, { "docid": "645a9584685ef654bd6454d2b6eb903f", "score": "0.6755838", "text": "def _get(resource):\n url = '{0}{1}'.format(base_url, resource)\n res = requests.get(url)\n if res.status_code == 200:\n return res.json()['message']\n else:\n return res", "title": "" }, { "docid": "cc5bd8008ada1b1c9abf6d29b5280ace", "score": "0.6712248", "text": "def _get_resource(self, resource, obj, params=None, **kwargs):\n r = self._http_resource('GET', resource, params=params)\n item = self._resource_deserialize(r.content)\n\n return obj.new_from_dict(item, h=self, **kwargs)", "title": "" }, { "docid": "a90b4c25d4f0c4edb2d73e604f1dab48", "score": "0.66995305", "text": "def get(self, resource, **kwargs):\n raise NotImplementedError()", "title": "" }, { "docid": "6ea1f53f3bde1c1d032afce8107552af", "score": "0.6683851", "text": "def GetContent(self):\n pass", "title": "" }, { "docid": "ab0524c576403c8e44efc83740588ded", "score": "0.66831344", "text": "def loadResource(self):\n pass", "title": "" }, { "docid": "c255069f86cd64245775619d0c81b92e", "score": "0.6666054", "text": "def get_content(self):\n return self.content", "title": "" }, { "docid": "21016191817d8ea7663dc2a4713bff61", "score": "0.66653436", "text": "def get_content(self):\n raise NotImplementedError", "title": "" }, { "docid": "150eeabca26efb5771a591ed7274fd95", "score": "0.666468", "text": "def resource(self):\n return self._resource", "title": "" }, { "docid": "150eeabca26efb5771a591ed7274fd95", "score": "0.666468", "text": "def resource(self):\n return self._resource", "title": "" }, { "docid": "150eeabca26efb5771a591ed7274fd95", "score": "0.666468", "text": "def resource(self):\n return self._resource", "title": "" }, { "docid": "150eeabca26efb5771a591ed7274fd95", "score": "0.666468", "text": "def resource(self):\n return self._resource", "title": "" }, { "docid": "150eeabca26efb5771a591ed7274fd95", "score": "0.666468", "text": "def resource(self):\n return self._resource", "title": "" }, { "docid": "002d793d75c88d45364a136e8115e4f9", "score": "0.66343087", "text": "def _get_resource(filename, data=None):\n resource = open(_get_path(filename))\n output = _apply_data(resource.read(), filename, data)\n\n resource.close()\n return output.strip()", "title": "" }, { "docid": "d0738bfca99322d8c01e7b29a120ed2f", "score": "0.6575553", "text": "def get_contents(self, path, raw=False):\n contents = self.call(A_GETCONTENTS_NAME, path)\n\n if not raw and \"content\" in contents:\n return contents[\"content\"]\n else:\n return contents", "title": "" }, { "docid": "e6b547b12eb1ed7ab213a0b3b40d8330", "score": "0.6564535", "text": "def __get_content(self):\n with open(self.file, 'rb') as handler:\n return handler.read()", "title": "" }, { "docid": "1c045432b8f74ad9a661ec285df2ed46", "score": "0.6561423", "text": "def resource(filename, encoding=None):\n content = resource_string(MODULE, filename)\n if encoding:\n return content.decode(encoding=encoding)\n return content", "title": "" }, { "docid": "1c045432b8f74ad9a661ec285df2ed46", "score": "0.6561423", "text": "def resource(filename, encoding=None):\n content = resource_string(MODULE, filename)\n if encoding:\n return content.decode(encoding=encoding)\n return content", "title": "" }, { "docid": "0305bf00f828c2facdce51d66522a5b8", "score": "0.6550542", "text": "def get(self, path, content=None):\n return self.raw_call(\"get\", path, content)", "title": "" }, { "docid": "55f28576ae3788a369a0e84b76cbaa24", "score": "0.654843", "text": "def get_resource(rel_path, pkg_name='onedrive_client', is_text=True):\n content = pkgutil.get_data(pkg_name, rel_path)\n if is_text:\n content = content.decode('utf-8')\n return content", "title": "" }, { "docid": "a5c1e57d98fbd20086d6bcded914b11b", "score": "0.65454644", "text": "def get_resource(url):\n resp = requests.get(url)\n resp_obj = {\n 'resp_obj': resp,\n 'resp_data': json.loads(resp.text)\n }\n return resp_obj", "title": "" }, { "docid": "1ffebe2d967db0d9445964654e89253d", "score": "0.653753", "text": "def _read_resource(path):\n if os.path.exists(path):\n with open(args.filename) as f:\n return f.read()\n\n return urllib.urlopen(path).read()", "title": "" }, { "docid": "125a54bec88582794aa9a2e02f078ac6", "score": "0.6524705", "text": "def get_content(self):\n\n return self.content", "title": "" }, { "docid": "20b786dd1e233e6e6267e8f9d148540e", "score": "0.6521623", "text": "def get(self, ressource):\n url = self._makeUrlFromRessource(ressource)\n r = requests.get(url)\n\n if r.status_code == 200:\n ressource.load_data(r.json())\n return r\n else:\n raise RequestAPIException(\"Can't get the ressource on the API\", response=r)", "title": "" }, { "docid": "bd1271bc517a4d02fe096b3125f62364", "score": "0.6514214", "text": "def get_content(self):\n return self._content", "title": "" }, { "docid": "dcde58ae51961152a8bd381f7158f127", "score": "0.6451402", "text": "def get_resource_content(\n frame_id: FrameId,\n url: str,\n) -> typing.Generator[T_JSON_DICT, T_JSON_DICT, typing.Tuple[str, bool]]:\n params: T_JSON_DICT = {}\n params[\"frameId\"] = frame_id.to_json()\n params[\"url\"] = url\n cmd_dict: T_JSON_DICT = {\n \"method\": \"Page.getResourceContent\",\n \"params\": params,\n }\n json = yield cmd_dict\n return (\n str(json[\"content\"]),\n bool(json[\"base64Encoded\"]),\n )", "title": "" }, { "docid": "9fa0e32f944fae12ac03183cc30cb478", "score": "0.64382815", "text": "def get_content(self):\n with self.open() as handle: # pylint:disable=no-value-for-parameter\n return handle.read().decode('UTF-8')", "title": "" }, { "docid": "3979ff6b1288d2f440d8e1d74985dfa4", "score": "0.643633", "text": "def read_resource(token: str, resource_id: str) -> tuple:\r\n response = requests.get(f'{url_items}/{resource_id}', headers={'Authorization': f'Bearer {token}'})\r\n try:\r\n resource_content = response.json()['items']\r\n except KeyError:\r\n return None, response\r\n else:\r\n return resource_content, response", "title": "" }, { "docid": "58aef7d45b19f3574731537d3c764931", "score": "0.6349977", "text": "def getContent(self):\n return self.content", "title": "" }, { "docid": "9f942ba409683f5cca6b7254f9a095ab", "score": "0.6336912", "text": "def get_content(src):\n with open(src, 'r+') as file:\n content = file.read()\n return content", "title": "" }, { "docid": "1604d04c603eb3960b62bc3c6bbd715d", "score": "0.6325941", "text": "def get_resource(self, urlToFetch ):\n r = requests.get(urlToFetch, headers=self.__header, verify=True)\n logger.debug(\"URL: %s, status:%i\", urlToFetch, r.status_code)\n logger.debug(r.headers.get('content-type'))\n\n resp = \"\"\n if r.status_code == 200:\n logger.info(\"successful request... \")\n content_type = r.headers.get('content-type')\n if content_type == \"text/plain\":\n try:\n resp = r.text\n except:\n resp = \"couldn't load remote page content\"\n elif \"application/json\" in content_type:\n try:\n resp = r.json()\n except json.JSONDecodeError:\n resp = \"couldn't load json\"\n elif r.status_code == 400:\n resp = fourHundredString\n elif r.status_code == 403:\n resp = fourOThreeString\n elif r.status_code == 404:\n resp = fourOFourString\n elif r.status_code == 429:\n resp = fourTwentyNineString\n elif r.status_code >= 500:\n resp = fiveHundredString\n logger.debug(\"response: %s\", resp[:50])\n\n # return the response, whatever it is\n return resp", "title": "" }, { "docid": "5ba1319718b1efebdae7b0333746e58c", "score": "0.6317176", "text": "def get_resource_stream(manager, resource_name):", "title": "" }, { "docid": "5ba1319718b1efebdae7b0333746e58c", "score": "0.6317176", "text": "def get_resource_stream(manager, resource_name):", "title": "" }, { "docid": "03ed3c394adf5207d631d3d25684b9df", "score": "0.6316075", "text": "def get_resource(restApiId=None, resourceId=None, embed=None):\n pass", "title": "" }, { "docid": "b089e04df630ebba60905d3e2be9f18b", "score": "0.63140666", "text": "def content(self) -> str:\n return pulumi.get(self, \"content\")", "title": "" }, { "docid": "b089e04df630ebba60905d3e2be9f18b", "score": "0.63140666", "text": "def content(self) -> str:\n return pulumi.get(self, \"content\")", "title": "" }, { "docid": "b089e04df630ebba60905d3e2be9f18b", "score": "0.63140666", "text": "def content(self) -> str:\n return pulumi.get(self, \"content\")", "title": "" }, { "docid": "b089e04df630ebba60905d3e2be9f18b", "score": "0.63140666", "text": "def content(self) -> str:\n return pulumi.get(self, \"content\")", "title": "" }, { "docid": "b089e04df630ebba60905d3e2be9f18b", "score": "0.63140666", "text": "def content(self) -> str:\n return pulumi.get(self, \"content\")", "title": "" }, { "docid": "b089e04df630ebba60905d3e2be9f18b", "score": "0.63140666", "text": "def content(self) -> str:\n return pulumi.get(self, \"content\")", "title": "" }, { "docid": "b089e04df630ebba60905d3e2be9f18b", "score": "0.63140666", "text": "def content(self) -> str:\n return pulumi.get(self, \"content\")", "title": "" }, { "docid": "b089e04df630ebba60905d3e2be9f18b", "score": "0.63140666", "text": "def content(self) -> str:\n return pulumi.get(self, \"content\")", "title": "" }, { "docid": "ab41d99aea9766baeced46772346fcf0", "score": "0.62920386", "text": "def getContent(url):\n content = loadLocal(url)\n if content is None:\n response = getWithTOR(url)\n content = response\n storeLocal(url, content)\n return content", "title": "" }, { "docid": "0219ac3229c573be7d9cc5df8ed1232e", "score": "0.6281783", "text": "def get(self, data, check, mutator):\n content_id = data.kwargs.get('content_id')\n if not content_id:\n raise exception.NotFound(message=DEF_CONTENT_NOT_FOUND)\n\n q = static_content.StaticContent.all()\n q.ancestor(data.program)\n q.filter('content_id', content_id)\n entity = q.get()\n if not entity:\n raise exception.NotFound(message=DEF_CONTENT_NOT_FOUND)\n\n return bs_helper.sendBlob(entity.content)", "title": "" }, { "docid": "c54ba609755a34ba284913dcb1c92f1e", "score": "0.62728494", "text": "def getContent():", "title": "" }, { "docid": "9e9560813a2ed16ef9d81d003aeae7bf", "score": "0.6265667", "text": "def _get_resource(context, data_dict):\n if 'resource' in data_dict:\n return data_dict['resource']\n return toolkit.get_action('resource_show')(context, {'id': data_dict['id']})", "title": "" }, { "docid": "fecf0e18d2ef157c3f6dfa377f3e1a10", "score": "0.62633616", "text": "def get_content(self):\r\n tag = self.get_content_type()\r\n return getattr(self, tag)", "title": "" }, { "docid": "2ebd9d895fe415d577470d3e45448698", "score": "0.62462574", "text": "def get_resource(request):\n path = request.GET.get('path')\n if not path:\n return HttpResponseBadRequest('\"path\" parameter is missing')\n \n try:\n with open(join_fb_root(path)) as f:\n content = f.read()\n return JsonResponse({'content': content})\n except Exception as e: # pragma: no cover\n msg = \"Impossible to open '\" + path + \"' : \" + htmlprint.code(str(type(e)) + ' - ' + str(e))\n if settings.DEBUG:\n messages.error(request, \"DEBUG set to True: \" + htmlprint.html_exc())\n return HttpResponseNotFound(msg)", "title": "" }, { "docid": "fe5cca5b78760fa90f89c3e545cc39f1", "score": "0.62255716", "text": "def resource(self) -> Path:", "title": "" }, { "docid": "baa54442518483d020e87fabe16306d7", "score": "0.62140626", "text": "def get(self, request, pk, pathname):\n try:\n resource_file = hydroshare.get_resource_file(pk, pathname)\n logical_file = resource_file.logical_file\n metadata = resource_file.metadata\n except ObjectDoesNotExist:\n # Backwards compatibility for file_id\n try:\n resource_file = ResourceFile.objects.get(id=pathname)\n logical_file = resource_file.logical_file\n metadata = resource_file.metadata\n except Exception:\n # is it a folder?\n resource = hydroshare.get_resource_by_shortkey(pk)\n dir_path = pk + os.path.join(\"/data/contents/\", pathname)\n logical_file = resource.get_folder_aggregation_object(dir_path)\n metadata = None\n\n title = logical_file.dataset_name \\\n if logical_file else \"\"\n keywords = metadata.keywords \\\n if metadata else []\n spatial_coverage = metadata.spatial_coverage.value \\\n if metadata and metadata.spatial_coverage else {}\n extra_metadata = metadata.extra_metadata \\\n if metadata else {}\n temporal_coverage = metadata.temporal_coverage.value if \\\n metadata and metadata.temporal_coverage else {}\n extra_data = logical_file.metadata.dict() \\\n if logical_file else {}\n\n # TODO: How to leverage serializer for this?\n return Response({\n \"title\": title,\n \"keywords\": keywords,\n \"spatial_coverage\": spatial_coverage,\n \"extra_metadata\": extra_metadata,\n \"temporal_coverage\": temporal_coverage,\n \"logical_file\": extra_data\n })", "title": "" }, { "docid": "f704050aa62863c6299ffc6faf3b4e33", "score": "0.6213257", "text": "def get(self, resource_id):\n self.get_resource_id(\n self.manager.country_code,\n resource_id\n )\n url = '%s/%s' % (self.resource_url, resource_id)\n data = self.manager.request(url, 'GET')\n return self.resource_class(**data)", "title": "" }, { "docid": "f34d73de50ea6d13c57baae8cdb35924", "score": "0.6193711", "text": "def render_GET(self, _):\n return self.content", "title": "" }, { "docid": "c9ca40b4743c47327c84ffaeebddca59", "score": "0.61862534", "text": "def get_resource(self, resource, region, query_params={}):\n url = self._format_api_url(resource, region)\n return self._request_handler(url, region, query_params)", "title": "" }, { "docid": "856f35ee4b4ac2297498f525aed88785", "score": "0.6183947", "text": "def content(self):\n return self.response.content", "title": "" }, { "docid": "62eb2fd06310e91d295be2de217f87a2", "score": "0.61785364", "text": "def resource(self) -> str:\n return pulumi.get(self, \"resource\")", "title": "" }, { "docid": "8fd4f8d129b515b928358fc2cbf964d6", "score": "0.61712843", "text": "def get_content(self):\n logger.info(\"Get %s from %s.\", self.name, self.server_relative_url)\n\n get = self._send_get_request(\n \"_api/web/GetFolderByServerRelativeUrl('{}')/Files('{}')/$value\",\n self.folder_relative_url,\n self.name,\n headers=headers[\"GET\"],\n exception=ListingException\n )\n\n return get.content", "title": "" }, { "docid": "cb7920a5899c62556440139e822b4160", "score": "0.6153767", "text": "def contents(self):\n self._result.get()\n return self._contents", "title": "" }, { "docid": "feb5f821ffe08661996601ddc7189666", "score": "0.6147069", "text": "def get(self, resource, **params):\n if type(resource) == int:\n endpoint = '%s%s/' % (self.endpoint, resource)\n else:\n if resource.startswith('http://'):\n endpoint = resource\n else:\n endpoint = '%s%s' % (self.api_host, resource)\n \n return self._make_request(endpoint, params)", "title": "" }, { "docid": "5e169eca65e9fd34d044cf7e13d2e74c", "score": "0.61340004", "text": "def get_raw_content(self):\n return self.raw_content", "title": "" }, { "docid": "961696853821f6cdf0b01d7c90e8fcab", "score": "0.6124972", "text": "def do_GET(s):\n print \"Requesting content...\"\n response = api.api_handler_json(s.path, HTTPHandler.api_info)\n s.send_response(200)\n s.send_header('Content-type', 'text/json')\n s.end_headers()\n s.wfile.write(response)", "title": "" }, { "docid": "b3292ee54b40ef55cb47a50ca05075d5", "score": "0.6116768", "text": "def get_content(self, url):\r\n cache_path = self._url_to_path(url)\r\n try:\r\n with open(cache_path, 'rb') as f:\r\n return f.read()\r\n except IOError:\r\n return None", "title": "" }, { "docid": "8f8e512e09bf26a0f1ce91d6e7ca81d6", "score": "0.6107339", "text": "def get(self):\n\t\treturn self.connection.get(self.base_uri)", "title": "" }, { "docid": "8f8e512e09bf26a0f1ce91d6e7ca81d6", "score": "0.6107339", "text": "def get(self):\n\t\treturn self.connection.get(self.base_uri)", "title": "" }, { "docid": "860241ebc2e7d8206ba67f66de0eae4c", "score": "0.6106095", "text": "def detail(self, data, resource):\n try:\n return resource.get(**data)\n except HTTP_ERRORS as err:\n self.handle_error('detail', data, err)", "title": "" }, { "docid": "09876fd7500353f3ad1c49711e0c1e5a", "score": "0.6089593", "text": "def get_text_contents(self):\r\n return self.get_contents()", "title": "" }, { "docid": "3e15f2a8a5b9a4ba4a6a639db354cc72", "score": "0.6085173", "text": "def get_file_content(self):\n f = open(self.path, 'rb')\n content = f.read()\n f.close()\n return content", "title": "" }, { "docid": "24ec0ddd04fb1066325e78af06d2a3c0", "score": "0.6062087", "text": "def get(self):\n resources = ResourceModel.query.all()\n resources = resources_schema.dump(resources)\n return {'status': 'success', 'data': resources}, 200", "title": "" }, { "docid": "1f33772007b24391114fa532c71ad2b6", "score": "0.6055666", "text": "def get_content(self, icnname: Name) -> Content:", "title": "" }, { "docid": "fa5786ca52758b02ebc4bd660b0489e7", "score": "0.6036996", "text": "def get_data(self):\n if self.is_file():\n # To read file, it should exist\n self.__validate_file_path(check_file_exists=True)\n return self.__get_file_data()\n elif self.is_web():\n return self.get_web_data()\n else:\n raise Exception(\n \"Invalid resource type. Should be either file or web\"\n )", "title": "" }, { "docid": "120653e95a0f81b6f8713d8d53a8640a", "score": "0.60327876", "text": "def fetch(url, resource_type):\n return urlopen(Request(url, headers=HTTP_HEADERS)).read()", "title": "" }, { "docid": "994271099dd5cf2ffeb51120b998f768", "score": "0.6019335", "text": "def get(self, resource, keys):\n return self.service.get(\n resource, keys, self.url_prefix, self.auth, self.session,\n self.session_send_opts)", "title": "" }, { "docid": "a602c79822719e501066570cff88ae40", "score": "0.60168195", "text": "def read(self):\n if not self.is_open:\n # pylint: disable=broad-except\n raise Exception(\"VisaResource not open\")\n rst = self.resource.read(delay=self.delay)\n if self.verbose:\n logger.debug('%s:READ %s', self.name, rst)", "title": "" }, { "docid": "27a5fcd4042789f0a27d613e08a88e76", "score": "0.60075504", "text": "def getContent(self):\n return self._content", "title": "" }, { "docid": "27a5fcd4042789f0a27d613e08a88e76", "score": "0.60075504", "text": "def getContent(self):\n return self._content", "title": "" }, { "docid": "6568d3e2d8067b61e4659172a09d322b", "score": "0.5994887", "text": "def get_resource(app,resource,key,args):\n cfg = get_cfg()\n return ResourceClient('%s/api/resource' % cfg.get('Connection','endpoint'),app,get_active_api_version(args),resource,auth_user=key)", "title": "" }, { "docid": "fa6f140c376d6ab0b3b01f9796bcaafe", "score": "0.5982428", "text": "def get_content(ref, file_path):", "title": "" }, { "docid": "7b00601f15299a13761701ad314ef99c", "score": "0.5978908", "text": "def content(self):\n with open(self.fixture) as handle:\n return handle.read()", "title": "" }, { "docid": "9d8bef31eb83acc36ba267299ad2a94f", "score": "0.59762824", "text": "def getContents(self):\n target = open(self.file)\n self.contents = target.read()\n target.close()", "title": "" }, { "docid": "3a860e4c4af4e4d4f5ede7f32a72c198", "score": "0.59746635", "text": "def resource(self, name):\n if name in self.resources:\n res = self.resources[name]\n else:\n url = self.url + '/' + name.strip('/') + '/'\n res = RestResource(url)\n self.resources[name] = res\n return res", "title": "" }, { "docid": "ea895abe161337b6540cab195e67623c", "score": "0.5970174", "text": "async def load_resource(self) -> typing.Union[str, web.Response]:\n try:\n res = await self.SESSION.get(\n f\"{self.BASE_URL}{self.request.path}\",\n headers={\n \"User-Agent\": self.USER_AGENT,\n \"Accept\": \"text/html\",\n \"Accept-Language\": \"en,ru;q=0.9,cs;q=0.8,la;q=0.7\",\n \"Accept-Encoding\": \"gzip, deflate\",\n },\n verify_ssl=False,\n )\n res.raise_for_status()\n except Exception as e:\n logger.exception(\"Error while loading page (path=%s, error=%s)\", self.request.path, e)\n return web.Response(text=f\"<b>Error</b>: {e}\", status=500)\n else:\n if res.content_type != \"text/html\":\n raw = await res.read()\n headers = res.headers.copy()\n headers.pop(\"Transfer-Encoding\", None)\n headers.pop(\"Content-Encoding\", None)\n return web.Response(body=raw, status=res.status, headers=headers)\n html = await res.text(encoding=\"utf-8\")\n return html", "title": "" }, { "docid": "4635a276466be4b9a0bb27277a4ac639", "score": "0.5952686", "text": "def content(self):\n return self._content", "title": "" }, { "docid": "4635a276466be4b9a0bb27277a4ac639", "score": "0.5952686", "text": "def content(self):\n return self._content", "title": "" }, { "docid": "4635a276466be4b9a0bb27277a4ac639", "score": "0.5952686", "text": "def content(self):\n return self._content", "title": "" }, { "docid": "4635a276466be4b9a0bb27277a4ac639", "score": "0.5952686", "text": "def content(self):\n return self._content", "title": "" }, { "docid": "4635a276466be4b9a0bb27277a4ac639", "score": "0.5952686", "text": "def content(self):\n return self._content", "title": "" }, { "docid": "d03e697e7c427c59a168013e9632c808", "score": "0.5952632", "text": "def get_resource(self, resource_request):\n if not self.session:\n self.session = IpSession(self.pairing_data)\n url = '/resource'\n body = _dump_json(resource_request).encode()\n\n try:\n response = self.session.post(url, body)\n content_type = 'application/octet-stream'\n for header in response.headers:\n if header[0] == 'Content-Type':\n content_type = header[1]\n return (content_type, response.read())\n except (AccessoryDisconnectedError, EncryptionError):\n self.session.close()\n self.session = None\n raise", "title": "" }, { "docid": "80da6e52b8f924cb33804904d06347cc", "score": "0.5952029", "text": "def get_content(self, uri, co3_context_token=None, timeout=None):\n url = u\"{0}/rest/orgs/{1}{2}\".format(self.base_url, self.org_id, ensure_unicode(uri))\n response = self._execute_request(self.session.get,\n url,\n proxies=self.proxies,\n cookies=self.cookies,\n headers=self.make_headers(co3_context_token),\n verify=self.verify,\n timeout=timeout)\n BasicHTTPException.raise_if_error(response)\n return response.content", "title": "" }, { "docid": "74b62d30074a21c6862021ba20b60839", "score": "0.59480613", "text": "def getContent(url):\n \"\"\"\n # if you want cashe, here you go\n content = loadLocal(url)\n if content is None:\n response = getWithTOR(url)\n content = response\n storeLocal(url, content)\n return content\n \"\"\"\n response = getWithTOR(url)\n content = response\n return content", "title": "" }, { "docid": "98fba893dc39a8c8d7d90578e37e2356", "score": "0.59426194", "text": "def get_content(self) -> dict:\n pass", "title": "" }, { "docid": "d7bd5cb73833ae783db07f911ad9d138", "score": "0.5935362", "text": "def _retrieve_resource(self, class_name: str, id: str) -> Any:\n url = self._instance_url(class_name, id)\n\n response = Requestor(self._client).request(method=RequestMethod.GET, url=url)\n\n return convert_to_easypost_object(response=response)", "title": "" } ]
4a9881689bc78af4cfb52834600bea05
Gets a set of slides (.PDL1.mrxs) and divides each one of them into patches. The final patches are stored in a folder with the same name as the slide.
[ { "docid": "64780dffa2afde7f412f5be70ad82c01", "score": "0.6368157", "text": "def patch_division(slides, outpath, level, tile_size=224, tissue_ratio=0.50, jobs=1):\n\n # Creates directory outpath if doesn't exist yet\n try:\n os.mkdir(outpath)\n print(\"Directory\", outpath, \"created\")\n print()\n except FileExistsError:\n print(\"Directory\", outpath, \"already exists\")\n print()\n\n # Collects all files in folder slide with the format *.PDL1.mrxs\n slides = os.path.join(slides, '*.PDL1.mrxs')\n slide_list = []\n start = time.time()\n n = 0\n n = Parallel(n_jobs=jobs)(delayed(get_patches)(\n s, outpath, level, tissue_ratio, tile_size) for s in glob.glob(slides))\n end = time.time()\n print('Total time patch extraction: {:.4f} s'.format(end - start))\n classifier = []\n for s in glob.glob(slides):\n slidename = os.path.basename(s)\n outpath_slide = os.path.join(outpath, slidename)\n image_path = os.path.join(outpath_slide, \"*.jpg\")\n patches = glob.glob(image_path)\n c = numpy.zeros((len(patches), 4))\n c = c.astype(int)\n for p in patches:\n name = os.path.basename(p)\n name = os.path.splitext(p)[0]\n number = name.split('#')[1]\n number = number.split('-')\n slide_number = int(number[0])\n x = int(number[2])\n y = int(number[3])\n # Loads data in classifier matrix\n c[slide_number][0] = slide_number\n c[slide_number][1] = x\n c[slide_number][2] = y\n # Positive column = 1 if patch is in list_positive\n c[slide_number][3] = 1\n classifier.append((s, c))\n\n pickle_save(classifier, outpath, 'class_{}_{}.p'.format(level, tile_size))\n\n n = sum(n)\n if not n == 0:\n print('Total number of patches extracted {}'.format(n))\n print()\n return classifier", "title": "" } ]
[ { "docid": "119b53bb19bd42ac90c7ad21c6d8612e", "score": "0.60560036", "text": "def get_patches(slidepath, outpath, level=10, tissue_ratio=0.25, size=256):\n\n # Opens the slide with OpenSlide\n slide = OpenSlide(slidepath)\n\n # Gets deepzoom tile division\n slide_dz = deepzoom.DeepZoomGenerator(\n slide, tile_size=(size - 2), overlap=1)\n\n # Gets the name and number of the slide\n slidename = os.path.basename(slidepath)\n\n # Saves a preview of the slide under 'slidename.png'\n slide_preview(slide, slidename, outpath)\n\n # Asures that the chosen level is valid\n if level < slide_dz.level_count:\n tiles = slide_dz.level_tiles[level]\n print('Level {} contains {} tiles (empty tiles included)'.format(\n level, slide_dz.level_tiles[level][0] * slide_dz.level_tiles[level][1]))\n else:\n print('Invalid level')\n return 0\n\n # Creates new directory - where patches will be stored\n outpath = os.path.join(outpath, slidename)\n try:\n os.mkdir(outpath)\n print(\"Directory\", outpath, \"created\")\n except FileExistsError:\n print(\"Directory\", outpath, \"already exists\")\n print(\"Patches already extracted\")\n return 0\n\n # Saves tiles if detects tissue presence higher than tissue_ratio\n n = 0\n print(\"Saving tiles image \" + slidepath + \"...\")\n for i in tqdm(range(tiles[0])):\n for j in range(tiles[1]):\n # Gets the tile in position (i, j)\n tile = slide_dz.get_tile(level, (i, j))\n image = numpy.array(tile)[..., :3]\n mask = tissue.get_tissue_from_rgb(image, blacktol=10, whitetol=240)\n # Saves tile in outpath only if tissue ratio is higher than threshold\n if mask.sum() > tissue_ratio * tile.size[0] * tile.size[1]:\n tile_path = os.path.join(\n outpath, '{}#{}-level{}-{}-{}.jpg'.format(slidename, n, level, i, j))\n tile.save(tile_path)\n n = n + 1\n print('Total of {} tiles with tissue ratio >{} in slide {}'.format(\n n, tissue_ratio, slidepath))\n print()\n\n return n", "title": "" }, { "docid": "09d5d6c7291388f01caf0f6f7809f850", "score": "0.5605953", "text": "def images_to_patches(self, patch_size=(80,96), max_patches=30):\n print 'Extracting patches from all images in the pipeline.'\n for img_list in self.img_lst2:\n for img_arr in img_list:\n patches = self.extract_patch(img_arr, patch_size=patch_size, max_patches=max_patches)\n self.patches_lst2.append(patches)", "title": "" }, { "docid": "fca5356cf75785caacbfa5f20f15939d", "score": "0.5595178", "text": "def pickles_to_midi(self):\n for file_name in os.listdir(PREPROCESS_PICKLE_DIR):\n if file_name[-7:] == \".pickle\":\n # print(PICKLE_PATH+file_name)\n with open(PREPROCESS_PICKLE_DIR + file_name, \"rb\") as f:\n roll = pickle.load(f)\n midi = self.piano_roll_to_pretty_midi(\n roll, fs=400, program=self.program)\n midi.write(OUTPUT_PATH + file_name[:-7] + \".mid\")", "title": "" }, { "docid": "d1dceef48aef01f002df60b5816b84e8", "score": "0.5491693", "text": "def extract_data(filename, num_images):\n imgs = []\n for i in range(1, num_images+1):\n imageid = \"satImage_%.3d\" % i\n image_filename = filename + imageid + \".png\"\n if os.path.isfile(image_filename):\n #print ('Loading ' + image_filename)\n img = mpimg.imread(image_filename)\n imgs.append(img)\n else:\n print ('File ' + image_filename + ' does not exist')\n\n num_images = len(imgs)\n IMG_WIDTH = imgs[0].shape[0]\n IMG_HEIGHT = imgs[0].shape[1]\n N_PATCHES_PER_IMAGE = (IMG_WIDTH/IMG_PATCH_SIZE)*(IMG_HEIGHT/IMG_PATCH_SIZE)\n\n img_patches = [img_crop(imgs[i], IMG_PATCH_SIZE, IMG_PATCH_SIZE) for i in range(num_images)]\n\n ######## ADD OFFSET\n if OFFSET_DATA[0]:\n for offset_size in OFFSET_DATA[1]:\n img_patches_offset = [img_crop_augmented(imgs[i], IMG_PATCH_SIZE, IMG_PATCH_SIZE,offset_size) for i in range(num_images)] \n img_patches += img_patches_offset\n \n ######\n\n data = [img_patches[i][j] for i in range(len(img_patches)) for j in range(len(img_patches[i]))]\n\n return numpy.asarray(data)", "title": "" }, { "docid": "d0d27da381459bc556b5382c8f5ccf72", "score": "0.5395526", "text": "def _load_patches(self, img_files, name, patch_size, num_channels):\n patches_all = []\n # reduce the number of files to load if we are in testing mode\n img_files = img_files [0:self.n] if self.test else img_files\n # load patches\n pbar = tqdm(img_files)\n for file in pbar:\n pbar.set_description('Loading dataset %s' % name)\n # pick file name\n assert os.path.isfile(file), 'Not a file: %s' % file\n # load the image containing the patches and convert to float point\n # and make sure that que use only one single channel\n img = cv2.imread(file)[:,:,0] / 255.\n # split the image into patches and\n # add patches to buffer as individual elements\n patches_row = np.split(img, self.PATCHES_PER_ROW, axis=0)\n for row in patches_row:\n patches = np.split(row, self.PATCHES_PER_ROW, axis=1)\n for patch in patches:\n # resize the patch\n patch_resize = cv2.resize(patch, (patch_size,patch_size))\n # convert to tensor [w x h x d]\n patch_tensor = patch_resize.reshape(patch_size,\n patch_size,\n num_channels)\n patches_all.append(patch_tensor)\n return np.asarray(patches_all) if not self.test \\\n else np.asarray(patches_all[0:self.n])", "title": "" }, { "docid": "84d9426d8293833e98a122fe5374b8f8", "score": "0.53791505", "text": "def sequential_patches(self, patch_size, step_size, normalize):\n img_shape = self.img.shape\n x = range(0, img_shape[0] - patch_size[0], step_size[0])\n y = range(0, img_shape[1] - patch_size[1], step_size[1])\n if (img_shape[0] <= patch_size[0]):\n x = [0]\n if (img_shape[1] <= patch_size[1]):\n y = [0]\n\n ic = (min(img_shape[0], patch_size[0]), min(img_shape[1], patch_size[1]))\n xy = [(i, j) for i in x for j in y]\n img_patches = []\n for i, j in xy:\n img_patch = self.getPatch(i, j, patch_size, ic, normalize)\n img_patches.append(img_patch)\n # print(len(img_patches))\n return (img_patches)", "title": "" }, { "docid": "db276a3a80cb1ea1054f933dce7915bb", "score": "0.53749466", "text": "def split_patches(data_dir, save_dir, postfix=None):\n # 根据data_dir地址确定save_dir\n create_folder(save_dir)\n mode = os.path.basename(data_dir) # train/val\n\n save_dir = os.path.join(save_dir, mode)\n create_folder(save_dir)\n\n image_list = os.listdir(data_dir)\n for i_, image_name in enumerate(image_list):\n name = image_name.split('.')[0]\n if postfix and name[-len(postfix):] != postfix: # label\n continue\n image_path = os.path.join(data_dir, image_name)\n image = np.array(Image.open(image_path))\n # image = io.imread(image_path)\n seg_imgs = []\n\n # split into 16 patches of size 250x250\n h, w = image.shape[0], image.shape[1]\n patch_size = 250\n h_overlap = math.ceil((4 * patch_size - h) / 3)\n w_overlap = math.ceil((4 * patch_size - w) / 3)\n for i in range(0, h - patch_size + 1, patch_size - h_overlap):\n for j in range(0, w - patch_size + 1, patch_size - w_overlap):\n if len(image.shape) == 3:\n patch = image[i:i + patch_size, j:j + patch_size, :]\n else:\n patch = image[i:i + patch_size, j:j + patch_size]\n seg_imgs.append(patch)\n\n\n for k in range(len(seg_imgs)):\n\n if postfix:\n seg_imgs_pil = Image.fromarray(seg_imgs[k])\n seg_imgs_pil.save('{:s}/{:s}_{:d}_{:s}.png'.format(save_dir, name[:-len(postfix) - 1], k, postfix))\n # io.imsave('{:s}/{:s}_{:d}_{:s}.png'.format(save_dir, name[:-len(postfix) - 1], k, postfix), seg_imgs[k])\n else:\n seg_imgs_pil = Image.fromarray(seg_imgs[k])\n seg_imgs_pil.save('{:s}/{:s}_{:d}.png'.format(save_dir, name, k))\n # io.imsave('{:s}/{:s}_{:d}.png'.format(save_dir, name, k), seg_imgs[k])\n logging.info('{},{}/{}'.format(name, i_+1, k+1))", "title": "" }, { "docid": "992e301df520c940d9668b67c280c5ac", "score": "0.5373239", "text": "def get_patches(xs, ys, nn_params, stride=49):\n patch_size = nn_params['patch_size']\n fully = nn_params['fully']\n assert stride >= 1, \"Stride must be at least 1\"\n assert (patch_size) >= 1, \"Patch size has to be >= 1\"\n assert patch_size % 2 == 1, \"Patch size should be odd\"\n assert xs[0].shape[0] >= patch_size and xs[0].shape[1] >= patch_size, \\\n (\"Patch is too big for this image: img.shape = %s\" % str(xs[0].shape))\n logging.info(\"Get patches of size: %i\", patch_size)\n patches, labels = [], []\n for X, y in zip(xs, ys):\n px_left_patchcenter = (patch_size - 1) / 2\n start_x = px_left_patchcenter\n end_x = X.shape[0] - px_left_patchcenter\n start_y = start_x\n end_y = X.shape[1] - px_left_patchcenter\n for patch_center_x in range(start_x, end_x + 1, stride):\n for patch_center_y in range(start_y, end_y + 1, stride):\n if fully:\n # Get Labels of the patch and flatt it to 1D\n # x1 = patch_center_x - px_left_patchcenter\n # x2 = patch_center_x + px_left_patchcenter + 1\n # y1 = patch_center_y - px_left_patchcenter\n # y2 = patch_center_y + px_left_patchcenter + 1\n l = y[patch_center_x - px_left_patchcenter:\n patch_center_x + px_left_patchcenter + 1,\n patch_center_y - px_left_patchcenter:\n patch_center_y + px_left_patchcenter + 1]\n\n labels.append(l.flatten())\n\n # Get patch from original image\n patches.append(X[patch_center_x - px_left_patchcenter:\n patch_center_x + px_left_patchcenter + 1,\n patch_center_y - px_left_patchcenter:\n patch_center_y + px_left_patchcenter + 1,\n :])\n else:\n labels.append(y[patch_center_x][patch_center_y])\n # Get patch from original image\n patches.append(X[patch_center_x - px_left_patchcenter:\n patch_center_x + px_left_patchcenter + 1,\n patch_center_y - px_left_patchcenter:\n patch_center_y + px_left_patchcenter + 1,\n :])\n assert len(patches) == len(labels), \"len(patches) != len(labels)\"\n logging.info(\"%i patches were generated.\", len(patches))\n if fully:\n return (np.array(patches, dtype=np.float32),\n np.array(labels, dtype=np.float32))\n else:\n return (np.array(patches, dtype=np.float32),\n np.array(labels, dtype=np.int32))", "title": "" }, { "docid": "67ac7073b5b85bc3438681169d81b106", "score": "0.5355265", "text": "def get_DIV2k_data_patches(pLow, bs: int):\n# src = ImageImageList.from_folder(pLow, presort=True).split_by_idxs(\n# train_idx=list(range(0, 565407)), valid_idx=list(range(565408, 637408)))\n src = ImageImageList.from_folder(pLow, presort=True).split_by_rand_pct(valid_pct=0.05, seed=42)\n \n data = (src.label_from_func(lambda x: path_fullRes_patches/x.name)\n .transform(get_transforms(\n flip_vert = True\n ), tfm_y=True\n ).databunch(bs=bs, num_workers=2, no_check=True).normalize(imagenet_stats, do_y=True))\n data.c = 3\n return data", "title": "" }, { "docid": "f1a384c62adeca071107412f22d80ce7", "score": "0.5239801", "text": "def montagePngs(png1, png2='', outname='', sidebyside=True, png3=None, geometry='auto',\n trim=False, spacing=2, background='black', png4=None,\n border2=None, montage='montage', crop2=None, splice2=None, \n tile='', png5=None, png6=None, convert='convert', density='',\n labelImages=False, pointsize=36, labelBasename=True, font='',\n verbose=False, cleanup=True, labelOverwrite=False, \n labelBackground='white', labelColor='black', addLabel='',\n png7=None, png8=None, filetype='png', removeSourcePngs=False,\n protectOriginals=False, png9=None, png10=None, png11=None, \n png12=None, png13=None, png14=None, png15=None, gravity='', forceLabel=False, \n order='', sort=True):\n if outname == '':\n outname = 'montagePngs.'+filetype\n if verbose: print(\"montagePngs using outname=%s\" % (outname))\n if png3 == '': png3 = None\n if png4 == '': png4 = None\n if png2 == '':\n if type(png1) == list:\n if len(png1) < 2:\n print(\"au.montagePngs received png1='%s', png2='%s'\" % (png1,png2))\n print(\"You must specify png2 or set png1 to a list of 2 or more files\")\n return\n else:\n if png1.find('*') < 0 and png1.find(',') < 0:\n print(\"You must specify png2 or include a wildcard in png1.\")\n return\n if png1.find('*') >= 0:\n pnglists = png1.split(',')\n png1 = []\n for mypnglist in pnglists:\n if mypnglist.find('!*') >= 0:\n png1listAll = glob.glob(os.path.join(os.path.dirname(mypnglist),'*.png'))\n png1listAvoid = glob.glob(mypnglist.replace('!',''))\n png1list = []\n for png in png1listAll:\n if png not in png1listAvoid:\n png1.append(png)\n else:\n png1 += sorted(glob.glob(mypnglist))\n if sort:\n png1 = sorted(png1)\n if order != '':\n if type(order) == str:\n order = order.split(',')\n if len(order) != len(png1):\n print(\"Number of items in order (%d) not equal to len of png1 (%d)\"% (len(order), len(png1)))\n return\n # convert from numpy.string to string\n indices = np.array([int(i) for i in order])\n print(\"indices = \", indices)\n png1 = [str(i) for i in np.array(png1)[indices]]\n print(\"New order: \", png1)\n else:\n png1 = png1.split(',')\n if sort:\n png1 = sorted(png1)\n if len(png1) == 0:\n print(\"No images found\")\n return\n print(\"png1 = \", png1)\n png = png1[:]\n if len(png) >= 2:\n png1 = png[0]\n png2 = png[1]\n if len(png) >= 3:\n png3 = png[2]\n if len(png) >= 4:\n png4 = png[3]\n if len(png) >= 5:\n png5 = png[4]\n if len(png) >= 6:\n png6 = png[5]\n if len(png) >= 7:\n png7 = png[6]\n if len(png) >= 8:\n png8 = png[7]\n if len(png) >= 9:\n png9 = png[8]\n if len(png) >= 10:\n png10 = png[9]\n if len(png) >= 11:\n png11 = png[10]\n if len(png) >= 12:\n png12 = png[11]\n if len(png) >= 13:\n png13 = png[12]\n if len(png) >= 14:\n png14 = png[13]\n if len(png) >= 15:\n png15 = png[14]\n elif os.path.isdir(png2):\n print(\"png2 is a directory, not an image file.\")\n return\n if type(png1) == str or type(png1) == np.string_: # png1 can already be a list (if png2=='')\n if png1.find('*') >= 0:\n png1list = glob.glob(png1)\n if len(png1list) > 1:\n print(\"You have specified png1 and png2, but the wildcard in png1 found more than 1 file.\")\n return\n elif len(png1list) == 0:\n print(\"You have specified png1 and png2, but the wildcard in png1 found zero files: \", png1)\n return\n else:\n png1 = png1list[0]\n else:\n print(\"type(png1) = %s\" % (type(png1)))\n png1list = png1\n print(\"Setting png1 to %s\" % (png1list[0]))\n png1 = png1list[0]\n if png2.find('*') >= 0:\n png2list = glob.glob(png2)\n if len(png2list) > 1:\n print(\"You have specified png1 and png2, but the wildcard in png2 found more than 1 file.\")\n return\n elif len(png2list) == 0:\n print(\"You have specified png1 and png2, but the wildcard in png2 found zero files: \", png2)\n return\n else:\n png2 = png2list[0]\n if (os.path.exists(png1) == False):\n print(\"Could not find file png1 = %s.\" % (png1))\n return\n if (os.path.exists(png2) == False):\n print(\"Could not find file png2 = \", png2)\n return\n if (png3 is not None):\n if (png4 is not None):\n if (png5 is not None):\n if (png6 is not None):\n if png7 is not None:\n if png8 is not None:\n if png9 is not None:\n if png10 is not None:\n if png11 is not None:\n if png12 is not None:\n if png13 is not None:\n if png14 is not None:\n if png15 is not None:\n npngs = 15\n else:\n npngs = 14\n else:\n npngs = 13\n if tile == '':\n tile = '3x5'\n else:\n npngs = 12\n if tile == '':\n tile = '3x4'\n else:\n npngs = 11\n if tile == '':\n tile = '3x4'\n else:\n npngs = 10\n if tile == '':\n tile = '3x4'\n else:\n npngs = 9\n if tile == '':\n tile = '3x3'\n else:\n npngs = 8\n if tile == '':\n tile = '2x4'\n else:\n npngs = 7\n if tile == '':\n tile = '2x4'\n else:\n npngs = 6\n if (tile == ''):\n tile = '2X3'\n else:\n npngs = 5\n if (tile == ''):\n tile = '2X3'\n else:\n npngs = 4\n if (tile == ''):\n tile = '2X2'\n else:\n npngs = 3\n if (tile == ''):\n if (sidebyside):\n tile = '3X1'\n else:\n tile = '1X3'\n else: # png3 is None\n npngs = 2\n if (tile == ''):\n if (sidebyside):\n tile = '2X1'\n else:\n tile = '1X2'\n if (geometry is None or filetype=='pdf'):\n geometry = '1000x1000+%d+%d' % (spacing,spacing)\n elif (geometry.find('auto') >= 0):\n gap = 0\n if (geometry.find('+') > 0):\n gap = int(geometry.split('+')[1])\n if png1.find('.png') >=0 :\n geometry = 'x'.join([str(i) for i in pngWidthHeight(png1, ignoreCommas=True)])\n else: # assume jpg\n geometry = 'x'.join([str(i) for i in jpgWidthHeight(png1)])\n if gap != 0:\n geometry += '+%d+%d' % (gap,gap)\n if verbose:\n print(\"geometry = \", geometry)\n if (trim):\n trim = '-trim'\n else:\n trim = ''\n removeFiles = []\n if border2 is not None:\n cmd = \"%s %s -bordercolor %s -border %d border_%s\" % (convert, png2, background, border2, png2)\n if verbose: print(\"Running %s\" % (cmd))\n os.system(cmd)\n png2 = \"border_\" + png2\n if cleanup:\n removeFiles.append(png2)\n if crop2 is not None:\n cmd = \"%s %s -crop %s crop_%s\" % (convert, png2, crop2, png2)\n if verbose: print(\"Running %s\" % (cmd))\n os.system(cmd)\n png2 = \"crop_\" + png2\n if cleanup:\n removeFiles.append(png2)\n if splice2 is not None:\n cmd = \"%s %s -background %s -splice %s splice_%s\" % (convert, png2, background, splice2, png2)\n if verbose: print(\"Running %s\" % (cmd))\n os.system(cmd)\n png2 = \"splice_\" + png2\n if cleanup:\n removeFiles.append(png2)\n if trim:\n path = os.path.dirname(png1)\n if not os.access(path,os.W_OK):\n path = ''\n if len(path) > 0: path += '/'\n pcmd = \"%s -trim %s %s%s1.%s\" % (convert, png1, path, filetype,filetype)\n if verbose: print(\"running trim1: \", pcmd)\n mystatus = os.system(pcmd)\n if mystatus != 0:\n convert = '/opt/local/bin/convert'\n print(\"Switching to \", convert)\n pcmd = \"%s -trim %s %s%s1.%s\" % (convert, png1, path, filetype, filetype)\n print(\"running: \", pcmd)\n mystatus = os.system(pcmd)\n if mystatus != 0:\n convert = '/usr/local/bin/convert'\n print(\"Switching to \", convert)\n pcmd = \"%s -trim %s %s%s1.%s\" % (convert, png1, path, filetype, filetype)\n print(\"running: \", pcmd)\n mystatus = os.system(pcmd)\n if protectOriginals:\n png1 = 'png1_trimmed.png'\n shutil.move('%s%s1.%s' % (path, filetype, filetype), png1)\n\n path = os.path.dirname(png2)\n if not os.access(path,os.W_OK):\n path = ''\n if len(path) > 0: path += '/'\n pcmd = \"%s -trim %s %s%s2.%s\" % (convert, png2, path, filetype, filetype)\n if verbose: print(\"running trim2: \", pcmd)\n mystatus = os.system(pcmd)\n if protectOriginals:\n png2 = 'png2_trimmed.png'\n if verbose: print(\"moving: %s%s2.%s to %s\" % (path, filetype, filetype, png2))\n shutil.move('%s%s2.%s' % (path, filetype, filetype), png2)\n if density != '':\n density = '-density %s' % (str(density))\n if gravity != '':\n gravity = '-gravity ' + gravity\n if labelImages != False and labelImages != '':\n if len(font) > 0:\n font = '-font ' + font\n if labelImages == True:\n if labelBasename:\n label = os.path.basename(png1)\n else:\n label = png1\n else:\n label = labelImages[1-1]\n if len(grep(png1,label)[0]) == 0 or forceLabel:\n # Then we have not already labeled this png.\n newpng1 = png1.replace('.'+filetype,'_label01.'+filetype)\n # Note: -pointsize argument must come before label argument\n # +swap puts the label on the top of the image, otherwise it would be on the bottom.\n pcmd = '%s %s -background %s -fill %s %s -pointsize %d label:\"%s\" +swap %s -append %s' % (convert, png1, labelBackground, labelColor, font, pointsize, label, gravity, newpng1)\n# I tried the following to see if it would prevent also labeling in small font at bottom, but it did not change:\n# pcmd = '%s -background %s -fill %s %s -pointsize %d label:\"%s\" %s -gravity Center -append %s' % (convert, labelBackground, labelColor, font, pointsize, label, png1, newpng1)\n if verbose: print(\"Running: \", pcmd)\n os.system(pcmd)\n if not os.path.exists(newpng1):\n tmpnewpng1 = '/tmp/' + newpng1\n pcmd = pcmd.replace(newpng1,tmpnewpng1)\n os.system(pcmd)\n newpng1 = tmpnewpng1\n print(\"Wrote \", tmpnewpng1)\n if labelOverwrite:\n os.remove(png1)\n os.rename(newpng1,png1)\n else:\n png1 = newpng1\n else:\n print(\"We have already labeled png1: %s. Set forceLabel=True.\" % (png1))\n if labelImages == True:\n if labelBasename:\n label = os.path.basename(png2)\n else:\n label = png2\n else:\n label = labelImages[2-1]\n if len(grep(png2,label)[0]) == 0 or forceLabel:\n # Then we have not already labeled this png.\n newpng2 = png2.replace('.'+filetype,'_label02.'+filetype)\n pcmd = '%s %s -background %s -fill %s %s -pointsize %d label:\"%s\" +swap %s -append %s' % (convert, png2, labelBackground, labelColor, font, pointsize, label, gravity, newpng2)\n if verbose: print(\"Running: \", pcmd)\n os.system(pcmd)\n if not os.path.exists(newpng2):\n tmpnewpng2 = '/tmp/' + newpng2\n pcmd = pcmd.replace(newpng2,tmpnewpng2)\n os.system(pcmd)\n newpng2 = tmpnewpng2\n print(\"Wrote \", tmpnewpng2)\n if labelOverwrite:\n os.remove(png2)\n os.rename(newpng2,png2)\n else:\n png2 = newpng2\n else:\n print(\"We have already labeled png2: %s. Set forceLabel=True\" % (png2))\n\n cmd = \"%s %s %s -background %s -tile %s %s -geometry %s %s %s \"%(montage,trim,density,background,tile,gravity,geometry,png1,png2)\n if removeSourcePngs:\n removeFiles.append(png1)\n removeFiles.append(png2)\n if type(labelImages) == bool:\n nLabelImages = 2\n for i in [png3,png4,png5,png6,png7,png8,png9,png10,png11,png12,png13,png14,png15]:\n if i is not None: nLabelImages += 1\n else:\n nLabelImages = len(labelImages)\n if (png3 is not None): \n if trim:\n path = os.path.dirname(png3)\n if not os.access(path,os.W_OK):\n path = ''\n if len(path) > 0: path += '/'\n pcmd = \"%s -trim %s %s%s3.%s\" % (convert, png3, path, filetype, filetype)\n if verbose: print(\"Running: \", pcmd)\n mystatus = os.system(pcmd)\n if protectOriginals:\n png3 = 'png3_trimmed.png'\n shutil.move('%s%s3.%s'%(path,filetype,filetype), png3)\n if labelImages and nLabelImages > (3-1):\n newpng3 = png3.replace('.'+filetype,'_label03.'+filetype)\n if labelImages == True:\n if labelBasename:\n label = os.path.basename(png3)\n else:\n label = png3\n else:\n label = labelImages[3-1].replace('//','////')\n if len(grep(png3,label)[0]) == 0 or forceLabel:\n # Then we have not already labeled this png.\n pcmd = '%s %s -background %s -fill %s %s -pointsize %d label:\"%s\" +swap %s -append %s' % (convert, png3, labelBackground, labelColor, font, pointsize, label, gravity, newpng3)\n if verbose: print(\"Running: \", pcmd)\n os.system(pcmd)\n if not os.path.exists(newpng3):\n tmpnewpng3 = '/tmp/' + newpng3\n pcmd = pcmd.replace(newpng3,tmpnewpng3)\n os.system(pcmd)\n newpng3 = tmpnewpng3\n print(\"Wrote \", tmpnewpng3)\n if labelOverwrite:\n os.remove(png3)\n os.rename(newpng3,png3)\n else:\n png3 = newpng3\n cmd += png3 \n if removeSourcePngs:\n removeFiles.append(png3)\n if (png4 is not None): \n if trim:\n path = os.path.dirname(png4)\n if not os.access(path,os.W_OK):\n path = ''\n if len(path) > 0: path += '/'\n pcmd = \"%s -trim %s %s%s4.%s\" % (convert, png4, path, filetype, filetype)\n if verbose: print(\"Running: \", pcmd)\n mystatus = os.system(pcmd)\n if protectOriginals:\n png4 = 'png4_trimmed.png'\n shutil.move('%s%s4.%s'%(path,filetype,filetype), png4)\n if labelImages and nLabelImages > (4-1):\n newpng4 = png4.replace('.'+filetype,'_label04.'+filetype)\n if labelImages == True:\n if labelBasename:\n label = os.path.basename(png4)\n else:\n label = png4\n else:\n label = labelImages[4-1].replace('//','////')\n if len(grep(png4,label)[0]) == 0 or forceLabel:\n # Then we have not already labeled this png.\n pcmd = '%s %s -background %s -fill %s %s -pointsize %d label:\"%s\" +swap %s -append %s' % (convert, png4, labelBackground, labelColor, font, pointsize, label, gravity, newpng4)\n if verbose: print(\"Running: \", pcmd)\n os.system(pcmd)\n if not os.path.exists(newpng4):\n tmpnewpng4 = '/tmp/' + newpng4\n pcmd = pcmd.replace(newpng4,tmpnewpng4)\n os.system(pcmd)\n newpng4 = tmpnewpng4\n print(\"Wrote \", tmpnewpng4)\n if labelOverwrite:\n os.remove(png4)\n os.rename(newpng4,png4)\n else:\n png4 = newpng4\n cmd += ' ' + png4\n if removeSourcePngs:\n removeFiles.append(png4)\n if (png5 is not None):\n if trim:\n path = os.path.dirname(png5)\n if not os.access(path,os.W_OK):\n path = ''\n if len(path) > 0: path += '/'\n pcmd = \"%s -trim %s %s%s5.%s\" % (convert, png5, path, filetype, filetype)\n if verbose: print(\"Running: \", pcmd)\n mystatus = os.system(pcmd)\n if protectOriginals:\n png5 = 'png5_trimmed.png'\n shutil.move(path+filetype+'5.'+filetype, png5)\n if labelImages and nLabelImages > (5-1):\n newpng5 = png5.replace('.'+filetype,'_label05.'+filetype)\n if labelImages == True:\n if labelBasename:\n label = os.path.basename(png5)\n else:\n label = png5\n else:\n label = labelImages[5-1].replace('//','////')\n if len(grep(png5,label)[0]) == 0 or forceLabel:\n # Then we have not already labeled this png.\n pcmd = '%s %s -background %s -fill %s %s -pointsize %d label:\"%s\" +swap %s -append %s' % (convert, png5, labelBackground, labelColor, font, pointsize, label, gravity, newpng5)\n if verbose: print(\"Running: \", pcmd)\n os.system(pcmd)\n if labelOverwrite:\n os.remove(png5)\n os.rename(newpng5,png5)\n else:\n png5 = newpng5\n\n cmd += ' ' + png5\n if removeSourcePngs:\n removeFiles.append(png5)\n if (png6 is not None): \n if trim:\n path = os.path.dirname(png6)\n if not os.access(path,os.W_OK):\n path = ''\n if len(path) > 0: path += '/'\n pcmd = \"%s -trim %s %s%s6.%s\" % (convert, png6, path, filetype, filetype)\n if verbose: print(\"Running: \", pcmd)\n mystatus = os.system(pcmd)\n if protectOriginals:\n png6 = 'png6_trimmed.png'\n shutil.move(path+filetype+'6.'+filetype, png6)\n if labelImages and nLabelImages > (6-1):\n newpng6 = png6.replace('.'+filetype,'_label06.'+filetype)\n if labelImages == True:\n if labelBasename:\n label = os.path.basename(png6)\n else:\n label = png6\n else:\n label = labelImages[6-1].replace('//','////')\n if len(grep(png6,label)[0]) == 0 or forceLabel:\n # Then we have not already labeled this png.\n pcmd = '%s %s -background %s -fill %s %s -pointsize %d label:\"%s\" +swap %s -append %s' % (convert, png6, labelBackground, labelColor, font, pointsize, label, gravity, newpng6)\n if verbose: print(\"Running: \", pcmd)\n os.system(pcmd)\n if labelOverwrite:\n os.remove(png6)\n os.rename(newpng6,png6)\n else:\n png6 = newpng6\n cmd += ' ' + png6\n if removeSourcePngs:\n removeFiles.append(png6)\n if (png7 is not None): \n if trim:\n path = os.path.dirname(png7)\n if not os.access(path,os.W_OK):\n path = ''\n if len(path) > 0: path += '/'\n pcmd = \"%s -trim %s %s%s7.%s\" % (convert, png7, path, filetype, filetype)\n if verbose: print(\"Running: \", pcmd)\n mystatus = os.system(pcmd)\n if protectOriginals:\n png7 = 'png7_trimmed.png'\n shutil.move(path+'%s7.%s'%(filetype,filetype), png7)\n if labelImages and nLabelImages > (7-1):\n newpng7 = png7.replace('.'+filetype, '_label07.'+filetype)\n if labelImages == True:\n if labelBasename:\n label = os.path.basename(png7)\n else:\n label = png7\n else:\n label = labelImages[7-1].replace('//','////')\n if len(grep(png7,label)[0]) == 0 or forceLabel:\n # Then we have not already labeled this png.\n pcmd = '%s %s -background %s -fill %s %s -pointsize %d label:\"%s\" +swap %s -append %s' % (convert, png7, labelBackground, labelColor, font, pointsize, label, gravity, newpng7)\n if verbose: print(\"Running: \", pcmd)\n os.system(pcmd)\n if labelOverwrite:\n os.remove(png7)\n os.rename(newpng7,png7)\n else:\n png7 = newpng7\n cmd += ' ' + png7\n if removeSourcePngs:\n removeFiles.append(png7)\n if (png8 is not None): \n if trim:\n path = os.path.dirname(png8)\n if not os.access(path,os.W_OK):\n path = ''\n if len(path) > 0: path += '/'\n pcmd = \"%s -trim %s %s%s8.%s\" % (convert, png8, path,filetype,filetype)\n if verbose: print(\"Running: \", pcmd)\n mystatus = os.system(pcmd)\n if protectOriginals:\n png8 = 'png8_trimmed.png'\n shutil.move(path+'%s8.%s'%(filetype,filetype), png8)\n if labelImages and nLabelImages > (8-1):\n newpng8 = png8.replace('.'+filetype, '_label08.'+filetype)\n if labelImages == True:\n if labelBasename:\n label = os.path.basename(png8)\n else:\n label = png8\n else:\n label = labelImages[8-1].replace('//','////')\n if len(grep(png8,label)[0]) == 0 or forceLabel:\n # Then we have not already labeled this png.\n pcmd = '%s %s -background %s -fill %s %s -pointsize %d label:\"%s\" +swap %s -append %s' % (convert, png8, labelBackground, labelColor, font, pointsize, label, gravity, newpng8)\n if verbose: print(\"Running: \", pcmd)\n os.system(pcmd)\n if labelOverwrite:\n os.remove(png8)\n os.rename(newpng8,png8)\n else:\n png8 = newpng8\n cmd += ' ' + png8\n if removeSourcePngs:\n removeFiles.append(png8)\n if (png9 is not None): \n if trim:\n path = os.path.dirname(png9)\n if not os.access(path,os.W_OK):\n path = ''\n if len(path) > 0: path += '/'\n pcmd = \"%s -trim %s %s%s9.%s\" % (convert, png9, path, filetype, filetype)\n if verbose: print(\"Running: \", pcmd)\n mystatus = os.system(pcmd)\n if protectOriginals:\n png9 = 'png9_trimmed.png'\n shutil.move(path+'%s9.%s'%(filetype,filetype), png9)\n if labelImages and nLabelImages > (9-1):\n newpng9 = png9.replace('.'+filetype, '_label09.'+filetype)\n if labelImages == True:\n if labelBasename:\n label = os.path.basename(png9)\n else:\n label = png9\n else:\n label = labelImages[9-1].replace('//','////')\n if len(grep(png9,label)[0]) == 0 or forceLabel:\n # Then we have not already labeled this png.\n pcmd = '%s %s -background %s -fill %s %s -pointsize %d label:\"%s\" +swap %s -append %s' % (convert, png9, labelBackground, labelColor, font, pointsize, label, gravity, newpng9)\n if verbose: print(\"Running: \", pcmd)\n os.system(pcmd)\n if labelOverwrite:\n os.remove(png9)\n os.rename(newpng9,png9)\n else:\n png9 = newpng9\n cmd += ' ' + png9\n if removeSourcePngs:\n removeFiles.append(png9)\n if (png10 is not None): \n if trim:\n path = os.path.dirname(png10)\n if not os.access(path,os.W_OK):\n path = ''\n if len(path) > 0: path += '/'\n pcmd = \"%s -trim %s %s%s10.%s\" % (convert, png10, path, filetype, filetype)\n if verbose: print(\"Running: \", pcmd)\n mystatus = os.system(pcmd)\n if protectOriginals:\n png10 = 'png10_trimmed.png'\n shutil.move(path+'%s10.%s'%(filetype,filetype), png10)\n if labelImages and nLabelImages > (10-1):\n newpng10 = png10.replace('.'+filetype, '_label10.'+filetype)\n if labelImages == True:\n if labelBasename:\n label = os.path.basename(png10)\n else:\n label = png10\n else:\n label = labelImages[10-1].replace('//','////')\n if len(grep(png10,label)[0]) == 0 or forceLabel:\n # Then we have not already labeled this png.\n pcmd = '%s %s -background %s -fill %s %s -pointsize %d label:\"%s\" +swap %s -append %s' % (convert, png10, labelBackground, labelColor, font, pointsize, label, gravity, newpng10)\n if verbose: print(\"Running: \", pcmd)\n os.system(pcmd)\n if labelOverwrite:\n os.remove(png10)\n os.rename(newpng10,png10)\n else:\n png10 = newpng10\n cmd += ' ' + png10\n if removeSourcePngs:\n removeFiles.append(png10)\n if (png11 is not None): \n if trim:\n path = os.path.dirname(png11)\n if not os.access(path,os.W_OK):\n path = ''\n if len(path) > 0: path += '/'\n pcmd = \"%s -trim %s %s%s11.%s\" % (convert, png11, path, filetype, filetype)\n if verbose: print(\"Running: \", pcmd)\n mystatus = os.system(pcmd)\n if protectOriginals:\n png11 = 'png11_trimmed.png'\n shutil.move(path+'%s11.%s'%(filetype,filetype), png11)\n if labelImages and nLabelImages > (11-1):\n newpng11 = png11.replace('.'+filetype, '_label11.'+filetype)\n if labelImages == True:\n if labelBasename:\n label = os.path.basename(png11)\n else:\n label = png11\n else:\n label = labelImages[11-1].replace('//','////')\n if len(grep(png11,label)[0]) == 0 or forceLabel:\n # Then we have not already labeled this png.\n pcmd = '%s %s -background %s -fill %s %s -pointsize %d label:\"%s\" +swap %s -append %s' % (convert, png11, labelBackground, labelColor, font, pointsize, label, gravity, newpng11)\n if verbose: print(\"Running: \", pcmd)\n os.system(pcmd)\n if labelOverwrite:\n os.remove(png11)\n os.rename(newpng11,png11)\n else:\n png11 = newpng11\n cmd += ' ' + png11\n if removeSourcePngs:\n removeFiles.append(png11)\n if (png12 is not None): \n if trim:\n path = os.path.dirname(png12)\n if not os.access(path,os.W_OK):\n path = ''\n if len(path) > 0: path += '/'\n pcmd = \"%s -trim %s %s%s12.%s\" % (convert, png12, path, filetype, filetype)\n if verbose: print(\"Running: \", pcmd)\n mystatus = os.system(pcmd)\n if protectOriginals:\n png12 = 'png12_trimmed.png'\n shutil.move(path+'%s12.%s'%(filetype,filetype), png12)\n if labelImages and nLabelImages > (12-1):\n newpng12 = png12.replace('.'+filetype, '_label12.'+filetype)\n if labelImages == True:\n if labelBasename:\n label = os.path.basename(png12)\n else:\n label = png12\n else:\n label = labelImages[12-1].replace('//','////')\n if len(grep(png12,label)[0]) == 0 or forceLabel:\n # Then we have not already labeled this png.\n pcmd = '%s %s -background %s -fill %s %s -pointsize %d label:\"%s\" +swap %s -append %s' % (convert, png12, labelBackground, labelColor, font, pointsize, label, gravity, newpng12)\n if verbose: print(\"Running: \", pcmd)\n os.system(pcmd)\n if labelOverwrite:\n os.remove(png12)\n os.rename(newpng12,png12)\n else:\n png12 = newpng12\n cmd += ' ' + png12\n if removeSourcePngs:\n removeFiles.append(png12)\n if (png13 is not None): \n if trim:\n path = os.path.dirname(png13)\n if not os.access(path,os.W_OK):\n path = ''\n if len(path) > 0: path += '/'\n pcmd = \"%s -trim %s %s%s12.%s\" % (convert, png13, path, filetype, filetype)\n if verbose: print(\"Running: \", pcmd)\n mystatus = os.system(pcmd)\n if protectOriginals:\n png13 = 'png13_trimmed.png'\n shutil.move(path+'%s12.%s'%(filetype,filetype), png13)\n if labelImages and nLabelImages > (13-1):\n newpng13 = png13.replace('.'+filetype, '_label13.'+filetype)\n if labelImages == True:\n if labelBasename:\n label = os.path.basename(png13)\n else:\n label = png13\n else:\n label = labelImages[13-1].replace('//','////')\n if len(grep(png13,label)[0]) == 0 or forceLabel:\n # Then we have not already labeled this png.\n pcmd = '%s %s -background %s -fill %s %s -pointsize %d label:\"%s\" +swap %s -append %s' % (convert, png13, labelBackground, labelColor, font, pointsize, label, gravity, newpng13)\n if verbose: print(\"Running: \", pcmd)\n os.system(pcmd)\n if labelOverwrite:\n os.remove(png13)\n os.rename(newpng13,png13)\n else:\n png13 = newpng13\n cmd += ' ' + png13\n if removeSourcePngs:\n removeFiles.append(png13)\n if (png14 is not None): \n if trim:\n path = os.path.dirname(png14)\n if not os.access(path,os.W_OK):\n path = ''\n if len(path) > 0: path += '/'\n pcmd = \"%s -trim %s %s%s12.%s\" % (convert, png14, path, filetype, filetype)\n if verbose: print(\"Running: \", pcmd)\n mystatus = os.system(pcmd)\n if protectOriginals:\n png14 = 'png14_trimmed.png'\n shutil.move(path+'%s12.%s'%(filetype,filetype), png14)\n if labelImages and nLabelImages > (14-1):\n newpng14 = png14.replace('.'+filetype, '_label14.'+filetype)\n if labelImages == True:\n if labelBasename:\n label = os.path.basename(png14)\n else:\n label = png14\n else:\n label = labelImages[14-1].replace('//','////')\n if len(grep(png14,label)[0]) == 0 or forceLabel:\n # Then we have not already labeled this png.\n pcmd = '%s %s -background %s -fill %s %s -pointsize %d label:\"%s\" +swap %s -append %s' % (convert, png14, labelBackground, labelColor, font, pointsize, label, gravity, newpng14)\n if verbose: print(\"Running: \", pcmd)\n os.system(pcmd)\n if labelOverwrite:\n os.remove(png14)\n os.rename(newpng14,png14)\n else:\n png14 = newpng14\n cmd += ' ' + png14\n if removeSourcePngs:\n removeFiles.append(png14)\n if (png15 is not None): \n if trim:\n path = os.path.dirname(png15)\n if not os.access(path,os.W_OK):\n path = ''\n if len(path) > 0: path += '/'\n pcmd = \"%s -trim %s %s%s12.%s\" % (convert, png15, path, filetype, filetype)\n if verbose: print(\"Running: \", pcmd)\n mystatus = os.system(pcmd)\n if protectOriginals:\n png15 = 'png15_trimmed.png'\n shutil.move(path+'%s12.%s'%(filetype,filetype), png15)\n if labelImages and nLabelImages > (15-1):\n newpng15 = png15.replace('.'+filetype, '_label15.'+filetype)\n if labelImages == True:\n if labelBasename:\n label = os.path.basename(png15)\n else:\n label = png15\n else:\n label = labelImages[15-1].replace('//','////')\n if len(grep(png15,label)[0]) == 0 or forceLabel:\n # Then we have not already labeled this png.\n pcmd = '%s %s -background %s -fill %s %s -pointsize %d label:\"%s\" +swap %s -append %s' % (convert, png15, labelBackground, labelColor, font, pointsize, label, gravity, newpng15)\n if verbose: print(\"Running: \", pcmd)\n os.system(pcmd)\n if labelOverwrite:\n os.remove(png15)\n os.rename(newpng15,png15)\n else:\n png15 = newpng15\n cmd += ' ' + png15\n if removeSourcePngs:\n removeFiles.append(png15)\n cmd += ' ' + outname\n if verbose: print(\"Running %s\" % (cmd))\n mystatus = os.system(cmd)\n if (mystatus != 0 and mystatus != 256):\n # MacOS location of montage\n print(\"Switching to /opt/local/bin/montage\")\n cmd = cmd.replace('montage ','/opt/local/bin/montage ')\n if verbose: print(\"Running %s\" % (cmd))\n mystatus = os.system(cmd)\n if (mystatus != 0 and mystatus != 256):\n # MacOS location of montage\n print(\"Switching to /usr/local/bin/montage\")\n cmd = cmd.replace('/opt/local/bin/montage','/usr/local/bin/montage')\n if verbose: print(\"Running %s\" % (cmd))\n mystatus = os.system(cmd)\n if addLabel != '' and addLabel != False:\n print(\"Calling addTopLabelToPng\")\n if os.path.exists(addLabel):\n f = open(addLabel,'r')\n lines = f.readlines()\n f.close()\n for line in lines[::-1]:\n line = line.replace('#','')\n addTopLabelToPng(outname, line, pointsize, font='Courier-Bold', gravity=gravity)\n else:\n addTopLabelToPng(outname, addLabel, pointsize, font='Courier-Bold', gravity=gravity)\n for png2 in removeFiles:\n print(\"Removing \", png2)\n os.remove(png2)\n return outname", "title": "" }, { "docid": "8d9d19bdf6b8eeabe17c85731e8875ba", "score": "0.52351093", "text": "def get_patches(raster_images, vector_images, *args, aug_random_slide = False, aug_color = False, aug_rotate = False, **kwargs):\n patch_size = kwargs['patch_size']\n # change ranster_images into 4d, vector_images into 3d\n if raster_images.ndim == 3:\n raster_images = np.expand_dims(raster_images, 0)\n if vector_images.ndim == 2:\n vector_images = np.expand_dims(vector_images, 0)\n \n image_num = raster_images.shape[0]\n raster_patch_list = []\n vector_patch_list = []\n \n for img_idx in range(image_num):\n raster_image = raster_images[img_idx]\n vector_image = vector_images[img_idx]\n \n rows_num, cols_num = raster_image.shape[0]//patch_size, raster_image.shape[1]//patch_size\n for i in range(rows_num):\n for j in range(cols_num):\n# idx = i * cols_num + j\n raster_patch = raster_image[i*patch_size:(i+1)*patch_size, j*patch_size:(j+1)*patch_size]\n raster_patch_list.append(raster_patch)\n\n vector_patch = vector_image[i*patch_size:(i+1)*patch_size, j*patch_size:(j+1)*patch_size]\n vector_patch_list.append(vector_patch)\n\n if aug_random_slide:\n patch_slide_num = args[0][0]\n is2 = int(1.0 * patch_size)\n xm, ym = raster_image.shape[0] - is2, raster_image.shape[1] - is2\n \n raster_patch_slide = np.zeros((patch_slide_num, is2, is2, 3))\n vector_patch_slide = np.zeros((patch_slide_num, is2, is2))\n \n for i in range(patch_slide_num):\n xc = random.randint(0, xm)\n yc = random.randint(0, ym)\n \n im = raster_image[xc:xc + is2, yc:yc + is2]\n mk = vector_image[xc:xc + is2, yc:yc + is2]\n \n if aug_color:\n if random.uniform(0, 1) > args[0][1]:\n im = im[:,:,::-1]\n\n if aug_rotate:\n if random.uniform(0, 1) > args[0][2]:\n im = im[::-1]\n mk = mk[::-1]\n\n raster_patch_slide[i] = im\n vector_patch_slide[i] = mk\n\n raster_patch_list.extend(raster_patch_slide)\n vector_patch_list.extend(vector_patch_slide)\n return raster_patch_list, vector_patch_list", "title": "" }, { "docid": "6440caf221b7a951a6eed7cfac610e5b", "score": "0.5225819", "text": "def __patch_finder(self):\n tumor_patches = pd.DataFrame(columns=['patch_path', 'is_tumor'])\n normal_patches = pd.DataFrame(columns=['patch_path', 'is_tumor'])\n tumor_patch_paths_list = []\n normal_patch_paths_list = []\n\n for folder in os.listdir(self.image_patch_dir):\n\n if osp.basename(folder).startswith('tumor'):\n for subfolder in os.listdir(osp.join(self.image_patch_dir, folder)):\n # print(subfolder)\n\n tumor_patch_paths_in_folder = glob.glob(\n osp.join(self.image_patch_dir, folder, subfolder, '*.png'))\n # print(len(tumor_patch_paths_in_folder))\n tumor_patch_paths_list.extend(tumor_patch_paths_in_folder)\n # print(len(tumor_patch_paths_list))\n\n else:\n\n for subfolder in os.listdir(osp.join(self.image_patch_dir, folder)):\n # print(subfolder)\n normal_patch_paths_in_folder = glob.glob(\n osp.join(self.image_patch_dir, folder, subfolder, '*.png'))\n normal_patch_paths_list.extend(\n normal_patch_paths_in_folder)\n # print(normal_patch_paths_list)\n\n print(len(tumor_patch_paths_list))\n tumor_patch_paths_series = pd.Series(tumor_patch_paths_list)\n tumor_patches['patch_path'] = tumor_patch_paths_series.values\n tumor_patches['is_tumor'] = 1\n print(len(tumor_patches))\n\n normal_patch_paths_series = pd.Series(normal_patch_paths_list)\n normal_patches['patch_path'] = normal_patch_paths_series.values\n normal_patches['is_tumor'] = 0\n print(len(normal_patches))\n\n return (tumor_patches, normal_patches)", "title": "" }, { "docid": "1f528d0eb31a60901c87a081b3450a6a", "score": "0.5162926", "text": "def create_patches(images, masks, size):\n\n patchesimages = image.extract_patches_2d(images, (size, size), 10, 0)\n patchesmasks = image.extract_patches_2d(masks, (size, size), 10, 0)\n return patchesimages, patchesmasks", "title": "" }, { "docid": "c4f9afc148e743131f1c118d944616ab", "score": "0.51229763", "text": "def montageTwoPngLists(pnglist1, pnglist2, pnglist3=None, pnglist4=None,\n pdfname='', sidebyside=True, geometry='auto',\n filter='', trim=False, spacing=2, background='black',\n sourceFilter=[], maxpages=0, cleanup=True, filetype='png',\n mode='identical',labelImages='',labelOverwrite=False, startpage=0, \n verbose=False, computeDiff=False, diffThreshold=0, sortByTime=False, \n pointsize=36, removeString='', stopMatchAt='', listUnused=True, \n appendUnused=False, montage='montage', stopAtFailedMatch=False):\n print(\"Received filetype='%s'\" % (filetype))\n if (type(pnglist1) == str):\n # assume we want all pngs in this directory\n if (pnglist1.find('*') < 0 and os.path.isdir(pnglist1)):\n if sortByTime:\n pnglist1 = sorted(glob.glob(pnglist1+'/*.'+filetype), key=os.path.getmtime, reverse=False)\n else:\n pnglist1 = sorted(glob.glob(pnglist1+'/*.'+filetype))\n else:\n png1 = pnglist1\n if sortByTime:\n pnglist1 = sorted(glob.glob(pnglist1), key=os.path.getmtime, reverse=False)\n else:\n pnglist1 = sorted(glob.glob(pnglist1))\n if maxpages > 0:\n print(\"Found %d plots for pnglist1: %s\" % (len(pnglist1),pnglist1[:maxpages]))\n else:\n print(\"Found %d plots for pnglist1\" % (len(pnglist1)))\n if len(pnglist1) == 0:\n return\n print(pnglist1[:3])\n if (type(pnglist2) == str):\n # assume we want all pngs in this directory\n if (pnglist2.find('*') < 0):\n if (pnglist2.split(',')[0].find('replace') < 0):\n if sortByTime:\n pnglist2 = sorted(glob.glob(pnglist2+'/*.'+filetype), key=os.path.getmtime, reverse=False)\n else:\n pnglist2 = sorted(glob.glob(pnglist2+'/*.'+filetype))\n else:\n # only use files in common between 2 paths\n newpnglist1 = []\n newpnglist2 = []\n for png in pnglist1:\n png2 = png.replace(pnglist2.split(',')[1], pnglist2.split(',')[2])\n if (os.path.exists(png2)):\n newpnglist1.append(png)\n newpnglist2.append(png2)\n pnglist1 = newpnglist1\n pnglist2 = newpnglist2\n print(\"Found %d = %d common files\" % (len(pnglist1), len(pnglist2)))\n else:\n png2 = pnglist2\n if sortByTime:\n pnglist2 = sorted(glob.glob(pnglist2), key=os.path.getmtime, reverse=False)\n else:\n pnglist2 = sorted(glob.glob(pnglist2))\n if maxpages > 0:\n print(\"Found %d plots for pnglist2: %s\" % (len(pnglist2),pnglist2[:maxpages]))\n else:\n print(\"Found %d plots for pnglist2\" % (len(pnglist2)))\n if len(pnglist2) == 0:\n return\n print(pnglist2[:3])\n if (pnglist3 is not None):\n if (type(pnglist3) == str):\n # assume we want all pngs in this directory\n if (pnglist3.find('*') < 0):\n pnglist3 = glob.glob(pnglist3+'/*.'+filetype)\n else:\n pnglist3 = glob.glob(pnglist3)\n if sortByTime:\n pnglist3 = sorted(pnglist3, key=os.path.getmtime)\n else:\n pnglist3 = sorted(pnglist3)\n if (pnglist4 is not None):\n if (type(pnglist4) == str):\n # assume we want all pngs in this directory\n if (pnglist4.find('*') < 0):\n pnglist4 = glob.glob(pnglist4+'/*.'+filetype)\n else:\n pnglist4 = glob.glob(pnglist4)\n if sortByTime:\n pnglist4 = sorted(pnglist4, key=os.path.getmtime)\n else:\n pnglist4 = sorted(pnglist4)\n if (filter != '' or sourceFilter != []):\n if type(sourceFilter) == str:\n if not os.path.exists(sourceFilter):\n print(\"Could not find sourceFilter file = %s\" % (sourceFilter))\n return\n f = open(sourceFilter,'r')\n lines = f.readlines()\n f.close()\n sourceFilterFile = sourceFilter\n sourceFilter = []\n for line in lines:\n sourceFilter.append(line.replace('\\n',''))\n print(\"Read %d strings from %s\" % (len(sourceFilter),sourceFilterFile))\n newpnglist1 = []\n for png in pnglist1:\n if (sourceFilter != []):\n for src in sourceFilter:\n if (png.find(src) >= 0):\n newpnglist1.append(png)\n break\n else:\n if (png.find(filter) < 0):\n newpnglist1.append(png)\n pnglist1 = newpnglist1\n newpnglist2 = []\n for png in pnglist2:\n if (sourceFilter != []):\n for src in sourceFilter:\n if (png.find(src) >= 0):\n newpnglist2.append(png)\n break\n else:\n if (png.find(filter) < 0):\n newpnglist2.append(png)\n pnglist2 = newpnglist2\n if (pnglist3 is not None):\n newpnglist3 = []\n for png in pnglist3:\n if (sourceFilter != []):\n for src in sourceFilter:\n if (png.find(src) >= 0):\n newpnglist3.append(png)\n break\n else:\n if (png.find(filter) < 0):\n newpnglist3.append(png)\n pnglist3 = newpnglist3\n if (pnglist4 is not None):\n newpnglist4 = []\n for png in pnglist4:\n if (sourceFilter != []):\n for src in sourceFilter:\n if (png.find(src) >= 0):\n newpnglist4.append(png)\n break\n else:\n if (png.find(filter) < 0):\n newpnglist4.append(png)\n pnglist4 = newpnglist4\n if labelImages == 'dir':\n labelImages = [os.path.dirname(pnglist1[0]), os.path.dirname(pnglist2[0])]\n l1 = len(pnglist1)\n l2 = len(pnglist2)\n if (l1 != l2):\n if mode == 'identical':\n print(\"The two lists are not of equal length (%d vs. %d). Set mode='truncate' or 'match'.\" % (l1, l2))\n return\n elif mode == 'truncate':\n if l1 > l2:\n pnglist1 = pnglist1[:l2]\n else:\n pnglist2 = pnglist2[:l1]\n elif mode == 'match':\n pnglist2basename = [os.path.basename(i) for i in pnglist2]\n pnglist2basename_nopagenumber = []\n pnglist2stopMatchAt = []\n for p in pnglist2basename:\n if p.find('_') > 0:\n pnglist2basename_nopagenumber.append('_'.join(p.split('_')[1:]))\n else:\n pnglist2basename_nopagenumber.append(p)\n if stopMatchAt != '':\n if type(stopMatchAt) == list:\n newname = p\n for stopAt in stopMatchAt:\n print(\"A) Looking for %s in %s\" % (stopAt,p))\n if p.find(stopAt) >= 0:\n newname = p[:p.find(stopAt)]\n print(\"Set newname to %s\" % (newname))\n break\n pnglist2stopMatchAt.append(newname)\n else:\n pnglist2stopMatchAt.append(p[:p.find(stopMatchAt)])\n newpnglist1 = []\n newpnglist2 = []\n list2idx = []\n for ipng1,png1 in enumerate(pnglist1):\n if removeString != '':\n if png1.find(removeString) > 0:\n print(\"Removing string: %s from %s\" % (removeString,png1))\n matchName = os.path.basename(png1).replace(removeString,'')\n else:\n matchName = os.path.basename(png1)\n if ipng1 == 0:\n print(\"Set matchName to: \", matchName)\n if stopMatchAt == '':\n if matchName not in pnglist2basename:\n # check for match if either are preceeded by 'page001_'\n if png1.find('page') >= 0:\n print(\"Found 'page' in %s\" % (png1))\n checkname = '_'.join(os.path.basename(png1).split('_')[1:])\n if checkname not in pnglist2basename_nopagenumber:\n print(\"match failed for %s in list of %d\" % (checkname,len(pnglist2basename_nopagenumber)))\n continue\n elif matchName not in pnglist2basename_nopagenumber:\n print(\"match failed for %s in list of %d\" % (png1,len(pnglist2basename_nopagenumber)))\n continue\n \n else:\n if type(stopMatchAt) == list:\n foundMatch = False\n for sA,stopAt in enumerate(stopMatchAt):\n loc = matchName.find(stopAt)\n if loc > 0:\n giantList = []\n for i in pnglist2basename:\n loc = i.find(stopAt)\n giantList.append(i[:loc])\n if sA==0 and ipng1==0:\n print(\" pnglist2basename[0] = \", pnglist2basename[0])\n print(\"B) %s: Looking for %s in found list: %s\" % (stopAt,matchName[:loc], giantList[:1]))\n else:\n giantList = pnglist2basename\n if sA == 0:\n print(\"B) Looking for %s in %s\" % (matchName[:loc], giantList[:1]))\n if matchName[:loc] not in giantList:\n # check for match if either are preceeded by 'page001_'\n checkname = '_'.join(os.path.basename(png1).split('_')[1:])\n if checkname in pnglist2basename_nopagenumber:\n foundMatch = True\n break\n else:\n foundMatch = True\n break\n if not foundMatch:\n if listUnused:\n print(\"match failed for %s in list of %d\" % (checkname,len(pnglist2basename_nopagenumber)))\n if stopAtFailedMatch:\n return\n continue\n else:\n loc = matchName.find(stopMatchAt)\n giantList = []\n for i in pnglist2basename:\n myloc = i.find(stopMatchAt)\n giantList.append(i[:myloc])\n if matchName[:loc] not in giantList:\n # check for match if either are preceeded by 'page001_'\n checkname = '_'.join(os.path.basename(png1).split('_')[1:])\n if checkname not in pnglist2basename_nopagenumber:\n print(\"match failed for %s in list of %d\" % (checkname,len(pnglist2basename_nopagenumber)))\n if stopAtFailedMatch:\n return\n continue\n newpnglist1.append(png1)\n if stopMatchAt == '':\n list2idx.append(pnglist2basename.index(matchName))\n else:\n print(\"pnglist2stopMatchAt=\", pnglist2stopMatchAt)\n print(\"matchName=\", matchName)\n print(\"loc=%d = %s\" % (loc,matchName[:loc]))\n print(\"Looking for matchName[:loc]=\", matchName[:loc])\n print(\"len(pnglist2stopMatchAt) = \", len(pnglist2stopMatchAt))\n print(\"pnglist2stopMatchAt[0] = \", pnglist2stopMatchAt[0])\n list2idx.append(pnglist2stopMatchAt.index(matchName[:loc]))\n match = pnglist2[list2idx[-1]]\n print(\"Found match: %s = %s\" % (os.path.basename(png1), match))\n newpnglist2.append(match)\n unused = sorted(list(set(range(len(pnglist2basename))) - set(list2idx)))\n if verbose or listUnused:\n if len(unused) > 0:\n print(\"unused pngs: \", np.array(pnglist2basename)[unused])\n for i in unused:\n if listUnused:\n print(\"No match found in list2 for \", np.array(pnglist2basename)[i])\n if sortByTime:\n pnglist1 = sorted(newpnglist1, key=os.path.getmtime)\n idx = []\n for p in pnglist1:\n idx.append(newpnglist1.index(p))\n pnglist2 = list(np.array(newpnglist2)[idx])\n else:\n pnglist1 = newpnglist1\n pnglist2 = newpnglist2\n if appendUnused:\n for i in unused:\n print(\"Appending unused png: \", pnglist2basename[i])\n pnglist1.append(pnglist2basename[i])\n pnglist2.append(pnglist2basename[i])\n else:\n print(\"unrecognized mode: \", mode)\n return\n if (pnglist3 is not None):\n l3 = len(pnglist3)\n if (l1 != l3):\n print(\"The three lists are not of equal length (%d vs. %d vs. %d)\" % (l1, l2, l3))\n return\n if (pnglist4 is not None):\n l4 = len(pnglist4)\n if (l1 != l4):\n print(\"The four lists are not of equal length (%d vs. %d vs. %d vs %d)\" % (l1, l2, l3, l4))\n return\n \n plotfiles = []\n dirname = os.path.dirname(pdfname)\n if (dirname != ''):\n if (os.path.exists(dirname) == False):\n print(\"Creating directory: \", dirname)\n os.mkdir(dirname)\n dirname += '/'\n png3 = None\n png4 = None\n if (maxpages <= 0):\n npages = len(pnglist1)\n else:\n npages = maxpages\n print(\"Making %d-%d = %d pages\" % (npages,startpage, npages-startpage))\n if (pdfname == ''):\n pdfname = '%dpages.pdf' % (npages)\n for i in range(startpage,npages):\n page = dirname + os.path.basename(pdfname).rstrip('.pdf') + \".page%03d.%s\" % (i,filetype)\n plotfiles.append(page)\n if (pnglist3 is not None):\n png3 = pnglist3[i]\n if (pnglist4 is not None):\n png4 = pnglist4[i]\n if verbose: \n print(\"Calling montagePngs('%s','%s',outname='%s',filetype='%s',verbose=%s,trim=%s)\" % (pnglist1[i],pnglist2[i],page,filetype,verbose,trim))\n if computeDiff:\n if os.stat(pnglist1[i]).st_size < 1:\n print(\"WARNING: zero-size png1: \", pnglist1[i])\n elif os.stat(pnglist2[i]).st_size < 1:\n print(\"WARNING: zero-size png2: \", pnglist2[i])\n else:\n mydiff = comparePngs(pnglist1[i], pnglist2[i], cleanup=True)\n if mydiff > diffThreshold:\n print(\"%d: difference = %f. Rebuilding diff plot.\" % (i+1, mydiff))\n mydiff = comparePngs(pnglist1[i], pnglist2[i], cleanup=False)\n montagePngs(pnglist1[i], pnglist2[i], outname=page, background=background,\n sidebyside=sidebyside, spacing=spacing, geometry=geometry, \n png3=png3, png4=png4, labelImages=labelImages, trim=trim, pointsize=pointsize,\n labelOverwrite=labelOverwrite,verbose=verbose, filetype=filetype, montage=montage)\n if ((i+1) % 10 == 0):\n print(\"Done page %d\" % (i+1))\n buildPdfFromPngs(plotfiles, pdfname=pdfname, cleanup=cleanup, filetype=filetype)\n if cleanup:\n for i in plotfiles:\n if os.path.exists(i):\n os.remove(i)\n return(pdfname)", "title": "" }, { "docid": "3d2bcc1e07be55a4215f5833b35c509e", "score": "0.5097262", "text": "def training_patch_paths(self):\n all_patches = self.__patch_finder()\n tumor_patches = all_patches[0]\n print(tumor_patches)\n # print(tumor_patches)\n\n normal_patches = all_patches[1]\n print(normal_patches)\n\n # exclude normal patches from not fully annotated tumor slides\n for i in range(len(self.exclude_normal_list)):\n normal_patches = normal_patches[\n ~normal_patches.patch_path.str.contains(self.exclude_normal_list[i])]\n\n print(len(normal_patches))\n\n # oversample the tumor patches\n\n tumor_patches = resample(tumor_patches, replace=True,\n n_samples=len(normal_patches), random_state=123)\n\n # get the time stamp\n\n time_of_saving = datetime.now().strftime(\"%d-%m-%Y_%I-%M-%S_%p\")\n tumor_patches.to_csv('%s/tumor_patches_%s.csv' %\n (self.path_to_save_model, time_of_saving))\n\n ###########################################################################\n # separate training and validation patches for both normal and tumor slides\n ###########################################################################\n training_patches_normal = normal_patches\n validation_patches_normal = pd.DataFrame(\n columns=['patch_path', 'is_tumor'])\n training_patches_tumor = tumor_patches\n validation_patches_tumor = pd.DataFrame(\n columns=['patch_path', 'is_tumor'])\n\n for i in range(len(self.validation_slides_normal)):\n training_patches_normal = training_patches_normal[\n ~training_patches_normal.patch_path.str.contains(self.validation_slides_normal[i])]\n\n for i in range(len(self.validation_slides_normal)):\n to_be_append_normal = normal_patches[\n normal_patches.patch_path.str.contains(self.validation_slides_normal[i])]\n validation_patches_normal = validation_patches_normal.append(\n to_be_append_normal, ignore_index=True)\n\n for i in range(len(self.validation_slides_tumor)):\n training_patches_tumor = training_patches_tumor[\n ~training_patches_tumor.patch_path.str.contains(self.validation_slides_tumor[i])]\n\n for i in range(len(self.validation_slides_tumor)):\n to_be_append_tumor = tumor_patches[\n tumor_patches.patch_path.str.contains(self.validation_slides_tumor[i])]\n validation_patches_tumor = validation_patches_tumor.append(\n to_be_append_tumor, ignore_index=True)\n # print(to_be_append_tumor)\n # print(validation_patches_tumor)\n\n # validation_patches_tumor = tumor_patches[~training_patches_tumor.patch_path]\n print(len(training_patches_tumor))\n print(len(training_patches_normal))\n print(len(validation_patches_tumor))\n print(len(validation_patches_normal))\n\n # ### keep only 3DHISTECH images################################\n if self.IIIdhistech_only:\n training_patches_tumor_3dhistech = training_patches_tumor[training_patches_tumor['patch_path'].map(\n lambda x: int(osp.splitext(osp.basename(x))[0][6:9]) < 71 or int(osp.splitext(osp.basename(x))[0][6:9]) > 100)]\n\n training_patches_normal_3dhistech = training_patches_normal[training_patches_normal['patch_path'].map(lambda x: int(osp.splitext(osp.basename(x))[0][6:9]) < 71 or int(\n osp.splitext(osp.basename(x))[0][6:9]) > 100 if osp.basename(x).startswith('tumor') else int(osp.splitext(osp.basename(x))[0][7:10]) < 101)]\n\n validation_patches_tumor_3dhistech = validation_patches_tumor[validation_patches_tumor['patch_path'].map(\n lambda x: int(osp.splitext(osp.basename(x))[0][6:9]) < 71 or int(osp.splitext(osp.basename(x))[0][6:9]) > 100)]\n\n validation_patches_normal_3dhistech = validation_patches_normal[validation_patches_normal['patch_path'].map(lambda x: int(osp.splitext(osp.basename(\n x))[0][6:9]) < 71 or int(osp.splitext(osp.basename(x))[0][6:9]) > 100 if osp.basename(x).startswith('tumor') else int(osp.splitext(osp.basename(x))[0][7:10]) < 101)]\n # get the number of patches\n print(len(training_patches_tumor_3dhistech))\n print(len(training_patches_normal_3dhistech))\n print(len(validation_patches_tumor_3dhistech))\n print(len(validation_patches_normal_3dhistech))\n\n # get the patches for training and validation\n\n training_patches = pd.concat(\n [training_patches_tumor_3dhistech, training_patches_normal_3dhistech])\n validation_patches = pd.concat(\n [validation_patches_tumor_3dhistech, validation_patches_normal_3dhistech])\n\n else:\n\n training_patches = pd.concat(\n [training_patches_tumor, training_patches_normal])\n validation_patches = pd.concat(\n [validation_patches_tumor, validation_patches_normal])\n\n return (training_patches, validation_patches)", "title": "" }, { "docid": "5c82035841fa0ce75c1a8f0d796cd107", "score": "0.50802016", "text": "def rename_slide_parts(self, rIds):\n for idx, rId in enumerate(rIds):\n slide_part = self.related_parts[rId]\n slide_part.partname = PackURI(\n '/ppt/slides/slide%d.xml' % (idx+1)\n )", "title": "" }, { "docid": "aa38572054083ca2632d31905ac806da", "score": "0.5049692", "text": "def make_patches(dataset_path, export_path, patch_size=(512, 512), no_overlap=False):\n # make output directories\n dataset_images_path = os.path.join(dataset_path, 'images')\n dataset_masks_path = os.path.join(dataset_path, 'masks')\n new_images_path = os.path.join(export_path, 'images')\n new_masks_path = os.path.join(export_path, 'masks')\n\n chk_mkdir(new_masks_path, new_images_path)\n\n for image_filename in os.listdir(dataset_images_path):\n # reading images\n im = io.imread(os.path.join(dataset_images_path, image_filename))\n masked_im = io.imread(os.path.join(dataset_masks_path, image_filename))\n # make new folders\n\n x_start = list()\n y_start = list()\n\n if no_overlap:\n x_step = patch_size[0]\n y_step = patch_size[1]\n else:\n x_step = patch_size[0] // 2\n y_step = patch_size[1] // 2\n\n for x_idx in range(0, im.shape[0] - patch_size[0] + 1, x_step):\n x_start.append(x_idx)\n\n if im.shape[0] - patch_size[0] - 1 > 0:\n x_start.append(im.shape[0] - patch_size[0] - 1)\n\n for y_idx in range(0, im.shape[1] - patch_size[1] + 1, y_step):\n y_start.append(y_idx)\n\n if im.shape[1] - patch_size[1] - 1 > 0:\n y_start.append(im.shape[1] - patch_size[1] - 1)\n\n for num, (x_idx, y_idx) in enumerate(product(x_start, y_start)):\n new_image_filename = os.path.splitext(image_filename)[0] + '_%d.png' % num\n # saving a patch of the original image\n io.imsave(\n os.path.join(new_images_path, new_image_filename),\n im[x_idx:x_idx + patch_size[0], y_idx:y_idx + patch_size[1], :]\n )\n # saving the corresponding patch of the mask\n io.imsave(\n os.path.join(new_masks_path, new_image_filename),\n masked_im[x_idx:x_idx + patch_size[0], y_idx:y_idx + patch_size[1]]\n )", "title": "" }, { "docid": "66a557b49fee228cf6967fdd4572344a", "score": "0.5040691", "text": "def update_slides(self,score_month,score_year,brand_name,demense_name):\n pyear,pmonth = common.previous_period(year=score_year,month=score_month)\n content = prs.slides\n targets = {\n '<YYYY>': str(score_year),\n '<MONTH>': datetime.date(1900, score_month, 1).strftime('%B'),\n '<BRAND>': brand_name,\n '<DEMENSE>':demense_name,\n '<Y-1>': str(pyear),\n '<M-1>': datetime.date(1900, pmonth, 1).strftime('%B'),\n }\n for slide in content:\n self._update_slides(slide.shapes,targets)\n\n self._update_slides(prs.slide_master.shapes,targets,' ')\n prs.save(outputFile)", "title": "" }, { "docid": "bffd1e091063e760780fad7cf92c7918", "score": "0.50278246", "text": "def generate_patches(self):\n num_patches = len(self.positive_sample_info) + len(self.negative_sample_info)\n\n debug('Extracting patches')\n start = time.process_time()\n\n self.extract_positive_patches()\n self.extract_negative_patches()\n\n end = time.process_time()\n debug(f'Extracted {num_patches} patches in {end-start} seconds')", "title": "" }, { "docid": "e6d447b6b21d1889f470e6610201c81d", "score": "0.50045466", "text": "def genebmp(dirName,fn):\r\n print ('load dicom files in :',f, 'scan name:',fn)\r\n #directory for patches\r\n \r\n bmp_dir = os.path.join(dirName, scanbmp)\r\n# print bmp_dir\r\n# remove_folder(bmp_dir) \r\n if os.path.exists(bmp_dir) ==False :\r\n os.mkdir(bmp_dir) \r\n #list dcm files\r\n fileList = os.listdir(dirName)\r\n for filename in fileList:\r\n# print(filename)\r\n if \".dcm\" in filename.lower() and filename==fn: # check whether the file's DICOM\r\n FilesDCM =(os.path.join(dirName,filename)) \r\n# \r\n ds = dicom.read_file(FilesDCM)\r\n endnumslice=filename.find('.dcm')\r\n imgcore=filename[0:endnumslice]+'.'+typei\r\n# imgcore=filename[0:endnumslice]+'.dcm'\r\n\r\n# print imgcore\r\n bmpfile=os.path.join(bmp_dir,imgcore)\r\n scipy.misc.imsave(bmpfile, ds.pixel_array)\r\n if np.shape(ds.pixel_array)[0] !=dimtabx:\r\n orig= Image.open(bmpfile,'r') \r\n ncr=orig.resize((dimtabx,dimtaby),PIL.Image.ANTIALIAS)\r\n del orig\r\n ncr.save(bmpfile)\r\n# print np.shape(ds.pixel_array)\r\n \r\n# ds.save_as(bmpfile)\r\n\r\n #chek if lung mask present \r\n if lungmask == filename:\r\n \r\n lung_dir = os.path.join(dirName, lungmask)\r\n lung_bmp_dir = os.path.join(lung_dir,lungmaskbmp)\r\n lunglist = os.listdir(lung_dir)\r\n# remove_folder(lung_bmp_dir)\r\n# if lungmaskbmp not in lunglist:\r\n if os.path.exists(lung_bmp_dir) ==False :\r\n os.mkdir(lung_bmp_dir)\r\n# print(lung_bmp_dir)\r\n for lungfile in lunglist:\r\n# print(lungfile)\r\n if \".dcm\" in lungfile.lower(): # check whether the file's DICOM\r\n lungDCM =os.path.join(lung_dir,lungfile) \r\n dslung = dicom.read_file(lungDCM)\r\n endnumslice=lungfile.find('.dcm')\r\n lungcore=lungfile[0:endnumslice]+'.'+typei\r\n lungcoref=os.path.join(lung_bmp_dir,lungcore)\r\n scipy.misc.imsave(lungcoref, dslung.pixel_array)\r\n if np.shape(dslung.pixel_array)[0] !=dimtabx:\r\n orig= Image.open(lungcoref,'r')\r\n ncr=orig.resize((dimtabx,dimtaby),PIL.Image.ANTIALIAS)\r\n del orig\r\n ncr.save(lungcoref)", "title": "" }, { "docid": "e4eb4418e7a64d1808dffa55a4c02564", "score": "0.50043976", "text": "def _extract_patches(tuples, max_per_frame=1, trials_per_tuple=100, flow_threshold=25.0, jumpcut_threshold=np.inf,\n workers=0):\n\n tick_t = timer()\n print('===> Extracting patches...')\n\n if workers != 0:\n parallel = Parallel(n_jobs=workers, backend='threading', verbose=5)\n tuples_per_job = len(tuples) // workers + 1\n result = parallel(\n delayed(_extract_patches_worker)(tuples[i:i + tuples_per_job], max_per_frame, trials_per_tuple,\n flow_threshold, jumpcut_threshold) for i in\n range(0, len(tuples), tuples_per_job))\n patches = sum(result, [])\n else:\n patches = _extract_patches_worker(tuples, max_per_frame, trials_per_tuple, flow_threshold, jumpcut_threshold)\n\n tock_t = timer()\n print(\"Done. Took ~{}s\".format(round(tock_t - tick_t)))\n\n return patches", "title": "" }, { "docid": "693470b2346e1cdb09eb19c3b301c3d2", "score": "0.49989954", "text": "def slides_to_pic(slides, labels,original_pic_dim):#)num_pix_x, num_pix_y):\n # Setting up the basic dimonsions of the pictures.\n num_pix_x = slides.shape[2]\n num_pix_y = slides.shape[3]\n num_rows = original_pic_dim[0]\n num_cols = original_pic_dim[1]\n rows_float = num_rows / num_pix_x\n cols_float = num_cols / num_pix_y\n rows_odd = math.ceil((2*rows_float)%2) - 1\n cols_odd = math.ceil((2*cols_float)%2) - 1\n rows = math.floor(rows_float)\n cols = math.floor(cols_float)\n x_end = 2*rows + rows_odd\n y_end = 2*cols + cols_odd\n # Create empty list of boxes to draw\n boxes_tb_drawn =[]\n # Loading the pictures and in this case only adding the lowenergy part.\n #totalimsize = int((len(slides[0]) - 1)/2)\n #slides_re = slides[:,:totalimsize].reshape(-1,num_pix_x,num_pix_y)\n # Create the resulting initial picture.\n pics = []\n for channel in range(slides.shape[1]):\n pic = np.zeros((num_rows,num_cols))\n c=0\n for x in range(0,x_end):#2*rows-1):\n extendx = x== x_end-1\n xs =int( ((x%2)*num_pix_x/2 + math.floor(x/2)*num_pix_x) * (1-extendx) + extendx * (num_rows-num_pix_x))\n for y in range(0,y_end):#2*cols-1):\n extendy = y==y_end-1\n ys =int( ((y%2)*num_pix_y/2 + math.floor(y/2)*num_pix_y) * (1-extendy) + extendy * (num_cols-num_pix_y))\n pic[xs:xs+num_pix_x, ys:ys+num_pix_y] = slides[c,channel]\n if labels[c] ==1 and channel==0:\n boxes_tb_drawn.append([xs,ys])\n c+=1\n pics.append(pic)\n return np.array(pics), boxes_tb_drawn", "title": "" }, { "docid": "d6c59cd223048b55597b3e461a1385a7", "score": "0.4975566", "text": "def extract_positive_patches(self):\n gsd = C.TRAIN.SAMPLES.GENERATOR.GSD\n progress = tqdm(self.positive_sample_info, desc='Generating positive patches')\n for i, row in enumerate(progress):\n name, center_x, center_y, original_angle, length, width = row\n\n path = os.path.join(C.TRAIN.SAMPLES.DIR, name)\n if os.path.isfile(path):\n warn(\"Skipping {}: file already exists. Remove existing files first to regenerate.\")\n continue\n dirname = os.path.dirname(path)\n os.makedirs(dirname, exist_ok=True)\n\n data = self.get_patch_xyr(center_x, center_y, 0, 0, -original_angle, radius_in_pixels=self.radius)\n obb = OrientedBoundingBox.from_rot_length_width((0, 0), 0, length / gsd, width / gsd)\n\n patch = Patch(\n name=name,\n obb=obb,\n volumetric=data[3:],\n rgb=data[:3],\n label=1,\n dr_dc_angle=(0, 0, 0),\n ori_xy=(center_x, center_y))\n\n with open(path, 'wb') as handle:\n pickle.dump(patch, handle, protocol=pickle.HIGHEST_PROTOCOL)", "title": "" }, { "docid": "0ed6e878be9bcf733a30714df9b68d3b", "score": "0.49720022", "text": "def _cache_patches_worker(cache_dir, patches):\n for p in patches:\n patch_id = str(random.randint(1e10, 1e16))\n frames = load_patch(p)\n for i in range(3):\n file_name = '{}_{}.jpg'.format(patch_id, i)\n frames[i].save(join(cache_dir, file_name))", "title": "" }, { "docid": "9c427caf68beffbca72412551c95099f", "score": "0.497115", "text": "def slice_folder(self, input_path):\n print(\"Parsing folder: \" + input_path)\n if input_path[-1] == self.sep:\n path_out = input_path[:-1] + suffix_sliced\n else:\n path_out = input_path + suffix_sliced\n try:\n os.mkdir(path_out)\n except OSError:\n pass\n for files in sorted(os.listdir(input_path)):\n ext = files[-3:].lower()\n if ext in images:\n file_to_add = os.path.join(input_path, files)\n im = Image.open(file_to_add)\n (w, h) = im.size\n if w > h:\n if self.verbose: # Note: If verbose\n print(\" Slicing \", file_to_add, str(w) + \"x\" + str(h))\n img_a, img_b = self.image_slice(file_to_add)\n if self.reverse: # If it's a manga, then the first page is the right page\n img_a, img_b = img_b, img_a\n for c, im in enumerate([img_a, img_b]):\n im.save(f\"{path_out}/{files[:-4]}_{c}.jpg\")\n else:\n shutil.copy(file_to_add, path_out)\n return path_out", "title": "" }, { "docid": "617cd285a15113de7a2bc40a21b9770a", "score": "0.49052998", "text": "def _mdgel_series(self):\n # only a single page, scaled according to metadata in second page\n self.pages.useframes = False\n self.pages.keyframe = 0\n self.pages.load()\n md = self.mdgel_metadata\n if md[\"FileTag\"] in (2, 128):\n dtype = numpy.dtype(\"float32\")\n scale = md[\"ScalePixel\"]\n scale = scale[0] / scale[1] # rational\n if md[\"FileTag\"] == 2:\n # squary root data format\n def transform(a):\n return a.astype(\"float32\") ** 2 * scale\n\n else:\n\n def transform(a):\n return a.astype(\"float32\") * scale\n\n else:\n transform = None\n page = self.pages[0]\n return [\n TiffPageSeries(\n [page], page.shape, dtype, page.axes, transform=transform, stype=\"MDGel\"\n )\n ]", "title": "" }, { "docid": "02f469dccda8f0047046ba3f29b17cd8", "score": "0.49051732", "text": "def patch_images(dark_images: np.ndarray,\n light_images: np.ndarray,\n size: int, stride: int) -> (np.ndarray, np.ndarray):\n dark_patches = []\n light_patches = []\n for i in range(dark_images.shape[0]):\n for p in range(0, dark_images.shape[1] - size + stride, stride):\n for q in range(0, dark_images.shape[2] - size + stride, stride):\n if (p + size <= dark_images.shape[1]) and (q + size <= dark_images.shape[2]):\n dark_patches.append(dark_images[i, p:p + size, q:q + size])\n light_patches.append(light_images[i, p:p + size, q:q + size])\n # print(dark_images[i, p:p + size, q:q + size].shape)\n\n return np.array(dark_patches), np.array(light_patches)", "title": "" }, { "docid": "7df7ef25fae1a446f2fc08671d940e61", "score": "0.49042118", "text": "def images_example(path='train_images.pickle'):\r\n patch_size = (8, 8)\r\n\r\n with open('train_images.pickle', 'rb') as f:\r\n train_pictures = pickle.load(f)\r\n\r\n patches = sample_patches(train_pictures, psize=patch_size, n=20000)\r\n\r\n plt.figure()\r\n plt.imshow(train_pictures[0])\r\n plt.title(\"Picture Example\")\r\n\r\n plt.figure()\r\n for i in range(4):\r\n plt.subplot(2, 2, i + 1)\r\n plt.imshow(patches[:, i].reshape(patch_size), cmap='gray')\r\n plt.title(\"Patch Example\")\r\n plt.show()", "title": "" }, { "docid": "7df7ef25fae1a446f2fc08671d940e61", "score": "0.49042118", "text": "def images_example(path='train_images.pickle'):\r\n patch_size = (8, 8)\r\n\r\n with open('train_images.pickle', 'rb') as f:\r\n train_pictures = pickle.load(f)\r\n\r\n patches = sample_patches(train_pictures, psize=patch_size, n=20000)\r\n\r\n plt.figure()\r\n plt.imshow(train_pictures[0])\r\n plt.title(\"Picture Example\")\r\n\r\n plt.figure()\r\n for i in range(4):\r\n plt.subplot(2, 2, i + 1)\r\n plt.imshow(patches[:, i].reshape(patch_size), cmap='gray')\r\n plt.title(\"Patch Example\")\r\n plt.show()", "title": "" }, { "docid": "b9ad06ab878622ed4389133e5a2070c2", "score": "0.48981017", "text": "def images_example(path='train_images.pickle'):\n patch_size = (8, 8)\n\n with open(path, 'rb') as f:\n train_pictures = pickle.load(f)\n\n patches = sample_patches(train_pictures, psize=patch_size, n=20000)\n\n plt.figure()\n plt.imshow(train_pictures[0])\n plt.title(\"Picture Example\")\n\n plt.figure()\n for i in range(4):\n plt.subplot(2, 2, i + 1)\n plt.imshow(patches[:, i].reshape(patch_size), cmap='gray')\n plt.title(\"Patch Example\")\n plt.show()", "title": "" }, { "docid": "62d490580e1b39beb3cda197ec9d17c3", "score": "0.4897035", "text": "def load_patch(patch):\n paths = (patch['left_frame'], patch['middle_frame'], patch['right_frame'])\n i, j = (patch['patch_i'], patch['patch_j'])\n imgs = [load_img(x) for x in paths]\n h, w = config.PATCH_SIZE\n return tuple(crop_image(x, i, j, h, w) for x in imgs)", "title": "" }, { "docid": "5ad3150a652bab55763a4f7c5edf8e20", "score": "0.48958212", "text": "def getSlideshowImages():", "title": "" }, { "docid": "9766e1f097a49598d8dffe6c2f239103", "score": "0.48944816", "text": "def get_patches(self, cv_kpts):\n # generate sampling grids.\n n_pixel = np.square(self.patch_size)\n self.output_grid = np.zeros((n_pixel, 3), dtype=np.float32)\n for i in range(n_pixel):\n self.output_grid[i, 0] = (i % self.patch_size) * 1. / self.patch_size * 2 - 1\n self.output_grid[i, 1] = (i / self.patch_size) * 1. / self.patch_size * 2 - 1\n self.output_grid[i, 2] = 1\n\n scale_index = [[] for i in range(len(self.pyr))]\n for idx, val in enumerate(cv_kpts):\n octave, layer, _ = unpack_octave(val)\n scale_val = (int(octave) - self.first_octave) * (self.n_octave_layers + 3) #+ int(layer)\n if scale_val >= len(self.pyr):\n print(\"Index[\", idx, \"] The scale value is \", scale_val, \", but the pyr is\", len(self.pyr))\n scale_val = len(self.pyr) - 1\n scale_index[scale_val].append(idx)\n\n all_patches = []\n for idx, val in enumerate(scale_index):\n tmp_cv_kpts = [cv_kpts[i] for i in val]\n scale_img = self.pyr[idx]\n # radius = self.sift_descr_scl_fctr * size * np.sqrt(2) * (self.sift_descr_width + 1) * 0.5\n patches = get_interest_region(scale_img, tmp_cv_kpts, self.output_grid,\n self.sift_descr_width, self.sift_descr_scl_fctr,\n self.patch_size)\n if patches is not None:\n all_patches.append(patches)\n\n if self.down_octave:\n all_patches = np.concatenate(all_patches[::-1], axis=0)\n else:\n all_patches = np.concatenate(all_patches, axis=0)\n assert len(cv_kpts) == all_patches.shape[0]\n return all_patches", "title": "" }, { "docid": "2b93e4eb66499e5c4575b7c17d53c5fb", "score": "0.48930573", "text": "def genebmp(dirName):\r\n print ('generate bmp files from dicom files in :',f)\r\n #directory for patches\r\n bmp_dir = os.path.join(dirName, bmpname)\r\n remove_folder(bmp_dir) \r\n os.mkdir(bmp_dir)\r\n bgdirf = os.path.join(dirName, bgdir)\r\n remove_folder(bgdirf) \r\n os.mkdir(bgdirf)\r\n \r\n #list dcm files\r\n fileList = os.listdir(dirName)\r\n\r\n for filename in fileList:\r\n# print(filename)\r\n if \".dcm\" in filename.lower(): # check whether the file's DICOM\r\n FilesDCM =(os.path.join(dirName,filename)) \r\n# \r\n ds = dicom.read_file(FilesDCM)\r\n endnumslice=filename.find('.dcm')\r\n imgcore=filename[0:endnumslice]+'.'+typei\r\n# print imgcore\r\n bmpfile=os.path.join(bmp_dir,imgcore)\r\n scipy.misc.imsave(bmpfile, ds.pixel_array)\r\n \r\n posend=endnumslice\r\n while filename.find('-',posend)==-1:\r\n posend-=1\r\n debnumslice=posend+1\r\n slicenumber=int((filename[debnumslice:endnumslice])) \r\n namescan=os.path.join(sroidir,imgcore) \r\n textw='n: '+f+' scan: '+str(slicenumber)\r\n orign = Image.open(bmpfile)\r\n imscanc= orign.convert('RGB')\r\n tablscan = np.array(imscanc)\r\n if np.shape(tablscan)[0]==768L:\r\n# print ('size of image:',np.shape(tablscan)[0])\r\n if f not in listOverSize:\r\n print ('size of image:',np.shape(tablscan)[0])\r\n listOverSize.append(f)\r\n scipy.misc.imsave(namescan, tablscan)\r\n tagviews(namescan,textw,0,20) \r\n# print lungmask, filename\r\n \r\n \r\n lung_dir = os.path.join(dirName, lungmask)\r\n# print lung_dir\r\n lung_bmp_dir = os.path.join(lung_dir,lungmaskbmp)\r\n lunglist = os.listdir(lung_dir)\r\n remove_folder(lung_bmp_dir)\r\n# if lungmaskbmp not in lunglist:\r\n os.mkdir(lung_bmp_dir)\r\n# print(lung_bmp_dir)\r\n for lungfile in lunglist:\r\n# print(lungfile)\r\n if \".dcm\" in lungfile.lower(): # check whether the file's DICOM\r\n lungDCM =os.path.join(lung_dir,lungfile) \r\n dslung = dicom.read_file(lungDCM)\r\n endnumslice=lungfile.find('.dcm')\r\n lungcore=lungfile[0:endnumslice]+'.'+typei\r\n lungcoref=os.path.join(lung_bmp_dir,lungcore)\r\n scipy.misc.imsave(lungcoref, dslung.pixel_array)\r\n bgdirflm=os.path.join(bgdirf,lungcore)\r\n# print lungcoref,bgdirflm\r\n shutil.copyfile(lungcoref,bgdirflm)\r\n o = Image.open(bgdirflm,'r')\r\n t=np.array(o)\r\n# print bgdirflm\r\n del o\r\n np.putmask(t,t>0,100)\r\n scipy.misc.imsave(bgdirflm, t)", "title": "" }, { "docid": "e8e286ef705e0f0b7044ca6362b19717", "score": "0.48802644", "text": "def extract_data(filename, num_images, preprocess=None):\n imgs = []\n for i in range(1, num_images+1):\n imageid = \"satImage_%.3d\" % i\n image_filename = filename + imageid + \".png\"\n if os.path.isfile(image_filename):\n #print ('Loading ' + image_filename)\n img = mpimg.imread(image_filename)\n if preprocess:\n img = preprocess(img)\n print('image ' + str(i) + ' of ' + str(num_images) + ' preprocessed')\n imgs.append(img)\n else:\n print ('File ' + image_filename + ' does not exist')\n\n num_images = len(imgs)\n IMG_WIDTH = imgs[0].shape[0]\n IMG_HEIGHT = imgs[0].shape[1]\n N_PATCHES_PER_IMAGE = (IMG_WIDTH/IMG_PATCH_SIZE)*(IMG_HEIGHT/IMG_PATCH_SIZE)\n\n img_patches = [img_crop(imgs[i], IMG_PATCH_SIZE, IMG_PATCH_SIZE) for i in range(num_images)]\n data = [img_patches[i][j] for i in range(len(img_patches)) for j in range(len(img_patches[i]))]\n\n return numpy.asarray(data)", "title": "" }, { "docid": "d6c09bd2e3cfb54cbeb0e99bbb5df0be", "score": "0.48777187", "text": "def divide(self, res):\n \n lp = []\n for i in range(len(self.lp)-1):\n lp.extend(self._division2(self.lp[i], self.lp[i+1], res))\n \n #--- Remove doble points\n b = [lp[i] for i in range(len(lp)-1) if lp[i]!=lp[i+1]]\n b.append(lp[len(lp)-1]) #-- Add the last point\n \n return Figure(b)", "title": "" }, { "docid": "0850daf5e1594f064689d4739a4a4bf9", "score": "0.48749578", "text": "def process_all_files(list_indices):\n patches = try_to_load_as_pickled_object('data/patches_' + str(list_indices[0]) + '.pkl')\n labels = try_to_load_as_pickled_object('data/labels_patches_' + str(list_indices[0]) + '.pkl')\n for i in list_indices[1:]:\n tmp1 = try_to_load_as_pickled_object('data/patches_' + str(i) + '.pkl')\n patches = np.append(patches, tmp1, axis=0)\n tmp2 = try_to_load_as_pickled_object('data/labels_patches_' + str(i) + '.pkl')\n labels = np.append(labels, tmp2, axis=0)\n print(\"loaded data \" + str(i))\n return patches, labels\n #save_obj(patches, 'data/patches')\n #save_obj(labels, 'data/labels_patches')", "title": "" }, { "docid": "4d18989c0e0cd4d434e345728394c752", "score": "0.4848752", "text": "def _initialize_slides(self, prs, root_entry):\n\n # Initialize data structures\n self.slides = []\n self.layouts = []\n self.refs = {}\n\n # Iterate through slides\n for i,slide in enumerate(root_entry.data):\n # Get slide layout\n layout_vals = slide.get_values(tag=\"layout\", join=True)\n\n if (len(layout_vals) > 1):\n raise ValueError(\"slide may only have one layout attribute\"\\\n \"\\n{}\".format(self.ppp.error_info(slide)))\n\n # Add layout to list and remove layout entry from slide\n self.layouts.append(layout_vals[0])\n slide.remove(tag=\"layout\")\n\n # Ceate new slide with layout\n prs_layout = prs.slide_layouts[self.template[layout_vals[0]][\"idx\"]]\n self.slides.append(prs.slides.add_slide(prs_layout))\n\n # Get slide reference label\n label_vals = slide.get_values(tag=\"label\", join=True)\n\n # Reference label points to current slide and remove label entry\n if (len(label_vals) == 1):\n self.refs[label_vals[0]] = self.slides[-1]\n slide.remove(tag=\"label\")\n elif (len(label_vals) > 1):\n raise ValueError(\"slide may only have one label attribute\"\\\n \"\\n{}\".format(self.ppp.error_info(slide)))", "title": "" }, { "docid": "73d77e26e48121481a18f9474b850ed0", "score": "0.48472056", "text": "def test():\n pics = glob.glob(\"*.png\")\n filename = 'test.pptx'\n img2ppt( pics, filename)", "title": "" }, { "docid": "a8feb13ad2919a539daca4a954fb1ff2", "score": "0.48357326", "text": "def patches(self, stored=False) -> Generator['Patch', None, None]:\n\n for _, group in itertools.groupby(self.patch_elements(stored=stored), key=lambda e: e.version):\n # keep latest sub-patch version of each element\n elements = {}\n for elem in group:\n if elem.name not in elements:\n elements[elem.name] = elem\n yield Patch._create(list(elements.values()))", "title": "" }, { "docid": "48c0f3bc4819490425d909758777b9e5", "score": "0.4798717", "text": "def getSlideshowFolder():", "title": "" }, { "docid": "be68db6cbda7251e10b84ff3bc4cfdc5", "score": "0.4794798", "text": "def load_files(path, WIN_WIDTH=settings.WIN_WIDTH, extensions=settings.FILE_EXTENSIONS, return_notelists=False, overlap=True):\n\n extensions = settings.FILE_EXTENSIONS\n if WIN_WIDTH % 2 != 0:\n return None\n\n score_list = []\n melody_list = []\n map_score_window = []\n notelist_scores = []\n\n file_list = []\n # recurse all directories\n print(extensions)\n for root, subdirs, files in os.walk(path):\n # take just the files with the proper extension\n file_list += [os.path.join(root, fp)\n for fp in files if fp.endswith(extensions)]\n\n if len(file_list) == 0:\n raise Exception(\"No files found with this extension!\")\n\n # extract random files\n if settings.DATASET_PERC < 1:\n num_files = int(settings.DATASET_PERC * len(file_list))\n np.random.seed(1987)\n file_list = np.random.choice(\n file_list, num_files, replacement=False)\n\n for f in sorted(file_list):\n print(\"I've found a new file: \" + f)\n\n note_array = load_piece(f)\n # load pianorolls score and melody\n pr = utils.pianoroll_utils.make_pianorolls(\n note_array, output_idxs=return_notelists)\n score = pr[0]\n melody = pr[1]\n if len(pr) == 4:\n notelist_scores.append(pr[2])\n\n # split in windows\n score_splitted = split_windows(score,\n WIN_WIDTH, overlap)\n melody_splitted = split_windows(melody,\n WIN_WIDTH, overlap)\n # update the map\n counter = len(score_list)\n map_score_window.append(\n [c + counter for c in range(len(score_splitted))])\n\n # update the output list\n score_list += score_splitted\n melody_list += melody_splitted\n\n # reshaping output\n score_out = np.ndarray(shape=(len(score_list), 1, settings.WIN_HEIGHT, WIN_WIDTH),\n buffer=np.array(score_list), dtype=settings.floatX)\n\n melody_out = np.ndarray(shape=(len(melody_list), 1, settings.WIN_HEIGHT, WIN_WIDTH),\n buffer=np.array(melody_list), dtype=settings.floatX)\n\n if return_notelists:\n return score_out, melody_out, map_score_window, np.array(notelist_scores)\n else:\n return score_out, melody_out, map_score_window", "title": "" }, { "docid": "695927e6f8026650988421e61a0ccd34", "score": "0.4779739", "text": "def extract_patch_batch(self, images, start_x, start_y):\n number_of_instances = images.shape[0]\n patch_ground_truth = np.zeros((number_of_instances, patch_width, patch_width, channels))\n\n for i in range(images.shape[0]):\n images[i], patch_ground_truth[i] = self.extract_patch(images[i], start_x, start_y)\n\n return images, patch_ground_truth", "title": "" }, { "docid": "a0323d5661c76347b842bf265ce8a349", "score": "0.47576848", "text": "def _get_patches(bands, patch_dim=64):\n patches = image.extract_patches(bands,\n (patch_dim, patch_dim, 13),\n patch_dim)\n hs, ws = patches.shape[0], patches.shape[1]\n patches = patches.reshape(-1, patch_dim, patch_dim, 13)\n\n last_row = bands[bands.shape[0]-patch_dim:, :, :]\n last_column = bands[:, bands.shape[1]-patch_dim:, :]\n corner = np.asarray([bands[bands.shape[0]-patch_dim:,\n bands.shape[1]-patch_dim:,\n :]])\n\n last_column = image.extract_patches(last_column,\n (patch_dim, patch_dim, 13),\n patch_dim).reshape(-1,\n patch_dim,\n patch_dim,\n 13)\n last_row = image.extract_patches(last_row,\n (patch_dim, patch_dim, 13),\n patch_dim).reshape(-1,\n patch_dim,\n patch_dim,\n 13)\n\n lc = last_column.shape[0]\n lr = last_row.shape[0]\n\n patches = np.vstack((patches, last_column, last_row, corner))\n return patches, hs, ws, lc, lr, bands.shape[0], bands.shape[1]", "title": "" }, { "docid": "a259de79f0fb1595ba1e3c704cecea56", "score": "0.47515467", "text": "def generate_patches(opt, validation_city):\n # load day 1 and 2 bands\n d1_path = get_path([opt.dataset_dir, 'images/', validation_city, '/imgs_1/*'])\n d2_path = get_path([opt.dataset_dir, 'images/', validation_city, '/imgs_2/*'])\n d1_bands = glob.glob(d1_path)\n d2_bands = glob.glob(d2_path)\n\n # sort bands to ensure that B01 -> B12 order\n d1_bands.sort()\n d2_bands.sort()\n\n # load band 2 from d1 bands to get template image dimensions\n template = rasterio.open(d1_bands[2])\n\n # TEMPORARY FIX: switching width and height seems to fix image generation\n city_dir = get_path([opt.dataset_dir, 'images/', validation_city])\n imgs_stacked = city_loader([city_dir, template.width, template.height, opt])\n\n d1 = imgs_stacked[0]\n d2 = imgs_stacked[1]\n\n # move image depth\n d1 = d1.transpose(1, 2, 0)\n d2 = d2.transpose(1, 2, 0)\n\n patches1, hs, ws, lc, lr, h, w = _get_patches(d1, patch_dim=opt.patch_size)\n patches1 = patches1.transpose(0, 3, 1, 2)\n\n print('Patches 1 Created')\n\n patches2, hs, ws, lc, lr, h, w = _get_patches(d2, patch_dim=opt.patch_size)\n patches2 = patches2.transpose(0, 3, 1, 2)\n\n print('Patches 2 Created')\n return patches1, patches2, hs, ws, lc, lr, h, w", "title": "" }, { "docid": "db2abc0ad0eaa9af0a3214a64a9d9503", "score": "0.47486103", "text": "def load_left_right_pngs(dir_name, expected_number):\n left_images = []\n files = glob.glob(os.path.join(dir_name, \"left\", \"*.png\"))\n files.sort()\n for file in files:\n image = cv2.imread(file)\n left_images.append(image)\n assert len(left_images) == expected_number\n\n right_images = []\n files = glob.glob(os.path.join(dir_name, \"right\", \"*.png\"))\n files.sort()\n for file in files:\n image = cv2.imread(file)\n right_images.append(image)\n assert len(right_images) == expected_number\n\n return left_images, right_images", "title": "" }, { "docid": "cee49886b91da00a1d7aeecab16dd1bf", "score": "0.47476342", "text": "def getAllWallpapers():\n\n \n # Find all current wallpapers\n mydir = getattr(sys, '_MEIPASS','.')+'/'\n myWallpapers = [mydir + image for image in os.listdir(mydir) if image.endswith(\".jpg\")]\n\n return myWallpapers", "title": "" }, { "docid": "010ea2959b73515030cce5297936252d", "score": "0.4739947", "text": "def _extract_patches_worker(tuples, max_per_frame=1, trials_per_tuple=100, flow_threshold=0.0,\n jumpcut_threshold=np.inf):\n\n patch_h, patch_w = config.PATCH_SIZE\n n_tuples = len(tuples)\n all_patches = []\n jumpcuts = 0\n flowfiltered = 0\n total_iters = n_tuples * trials_per_tuple\n\n pil_to_numpy = lambda x: np.array(x)[:, :, ::-1]\n\n for tup_index in range(n_tuples):\n tup = tuples[tup_index]\n\n left, middle, right = (load_img(x) for x in tup)\n img_w, img_h = left.size\n\n left = pil_to_numpy(left)\n middle = pil_to_numpy(middle)\n right = pil_to_numpy(right)\n\n selected_patches = []\n\n for _ in range(trials_per_tuple):\n\n i = random.randint(0, img_h - patch_h)\n j = random.randint(0, img_w - patch_w)\n\n left_patch = left[i:i + patch_h, j:j + patch_w, :]\n right_patch = right[i:i + patch_h, j:j + patch_w, :]\n middle_patch = middle[i:i + patch_h, j:j + patch_w, :]\n\n if is_jumpcut(left_patch, middle_patch, jumpcut_threshold) or \\\n is_jumpcut(middle_patch, right_patch, jumpcut_threshold):\n jumpcuts += 1\n continue\n\n avg_flow = simple_flow(left_patch, right_patch)\n if random.random() > avg_flow / flow_threshold:\n flowfiltered += 1\n continue\n\n selected_patches.append({\n \"left_frame\": tup[0],\n \"middle_frame\": tup[1],\n \"right_frame\": tup[2],\n \"patch_i\": i,\n \"patch_j\": j,\n \"avg_flow\": avg_flow\n })\n\n sorted(selected_patches, key=lambda x: x['avg_flow'], reverse=True)\n all_patches += selected_patches[:max_per_frame]\n # print(\"===> Tuple {}/{} ready.\".format(tup_index+1, n_tuples))\n\n print('===> Processed {} tuples, {} patches extracted, {} discarded as jumpcuts, {} filtered by flow'.format(\n n_tuples, len(all_patches), 100.0 * jumpcuts / total_iters, 100.0 * flowfiltered / total_iters\n ))\n\n return all_patches", "title": "" }, { "docid": "685986cc8b969136f0dbe3c2f522c89a", "score": "0.4738939", "text": "def processFile(fileName):\n global pnmflips\n global renumbers\n global nFiles\n\n # s = os.system('djpeg -greyscale -fast -scale 1/8 raw/' + fileName\n #+ ' | pnmfile')\n b = fileName[:-4].lower()\n n = b[-4:-1]\n if renumbers.has_key(n):\n n = renumbers[n]\n print b[-4:-1], '->', n\n else:\n n += '0' # multiply by 10 to make 4 figures\n\n if noExplode and b[-1] == 'm':\n return\n elif b[-1] == 'm':\n # Multipicture -- 3x3 grid of frames.\n print fileName, ' -- split'\n os.system('djpeg raw/' + fileName + ' >tmp.ppm')\n i = 0\n for y in [0, 240, 480]:\n for x in [0, 320, 640]:\n os.system('pnmcut %d %d 320 240 tmp.ppm' % (x, y)\n + ' | pnmtopng '\n '>' + b + '.f' + str(i) + '.png')\n i += 1\n nFiles += 1\n elif explodeOnly:\n return\n elif pnmflips.has_key(fileName[4:7]):\n flip = pnmflips[fileName[4:7]]\n print fileName, '-- flip', flip\n os.system('djpeg raw/' + fileName\n + ' | pnmflip ' + flip\n + ' | cjpeg -prog >' + n + '.' + b + 'r.jpg')\n # removed + ' | pnmscale 0.5'\n nFiles += 1\n else:\n print fileName\n os.system('djpeg raw/' + fileName\n + ' | cjpeg -prog >' + n + '.' + b + 'r.jpg')\n nFiles += 1", "title": "" }, { "docid": "f8204b528a663162766c515e6efd6155", "score": "0.4736585", "text": "def img2ppt( pics, outfile ):\n pres = Presentation()\n for pic in pics:\n slide = pres.slides.add_slide( pres.slide_layouts[1] )\n slide.shapes.add_picture( pic, Inches(0.6), Inches(2))\n pres.save(outfile)", "title": "" }, { "docid": "9649fe575e1518209191a6282d0c5ae5", "score": "0.47363", "text": "def render_slides(slides):\n handle, export_file = tempfile.mkstemp('.mp4', dir='./tmp')\n cmd = f'ffmpeg -v -1 -y -f image2pipe -vcodec ppm -i pipe: -r 30 -b 5000k \"{export_file}\"'\n ffmpeg = subprocess.Popen(cmd, shell=True, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)\n pipe = ffmpeg.stdin\n ffmpeg.stdout.close()\n\n prev_slide = slides[0]\n prev_slide.load()\n\n while slides:\n slide = slides.popleft()\n slide.load()\n\n for frame in range(TRANSITION_FRAMES):\n save_frame(Image.blend(prev_slide.frame(frame+TRANSITION_FRAMES+prev_slide.slide_frames),\n slide.frame(frame),\n frame/TRANSITION_FRAMES), pipe)\n for frame in range(slide.slide_frames):\n save_frame(slide.frame(frame+TRANSITION_FRAMES), pipe)\n\n prev_slide = slide\n\n pipe.close()\n time.sleep(5) # time to finsih writing to disk\n\n return mp.VideoFileClip(export_file)", "title": "" }, { "docid": "8e42f0810053555183a8d32475f845c7", "score": "0.47236136", "text": "def warp_worms(expt_dir, positions):\n expt_dir = pathlib.Path(expt_dir)\n annotation_dir = expt_dir / 'annotations'\n warp_dir = expt_dir / 'warped_images'\n \n if not warp_dir.exists():\n warp_dir.mkdir(exist_ok = True)\n\n for worm, timepoints in positions.items():\n annotation_file = list(annotation_dir.glob('*'+worm+'.pickle'))[0]\n annotations = pickle.load(open(annotation_file, 'rb'))\n positions, tcks = annotations\n for tp, imgs in timepoints.items():\n center_tck, width_tck = tcks[tp]['pose']\n for img in imgs:\n warp_file = warp_dir / worm / (img.name)\n \n warp_image(center_tck, width_tck, img, warp_file)", "title": "" }, { "docid": "18d7cca33c63785e55de57e696ba4427", "score": "0.47192267", "text": "def regroup_all_raw_data(path_src):\n try:\n os.mkdir(os.path.join(path_src, 'processed'))\n except OSError:\n sys.stdout.write('WARN: processed folder already exists\\n')\n\n if not os.path.exists(os.path.join(path_src, 'raw_data')):\n sys.stdout.write('*** ERROR: Make sure pictures are inside raw_data folder here %s ***\\n' % os.getcwd())\n os.mkdir(os.path.join(os.getcwd(), 'raw_data'))\n exit(0)\n\n output_cmd = subprocess.Popen('find raw_data/ -name *.CR2 -type f', shell=True, stdout=subprocess.PIPE).communicate()\n pictures_path = list(map(os.path.abspath, output_cmd[0].splitlines()))\n\n total_pics = len(pictures_path)\n pics = 1\n\n for pic_path in pictures_path:\n n = 1\n new_pic_name = rename_pictures(pic_path)\n while os.path.exists(os.path.join(path_src, 'processed', new_pic_name.replace('X', str(n)))):\n n += 1\n os.rename(pic_path, os.path.join(path_src, 'processed', new_pic_name.replace('X', str(n))))\n sys.stdout.write('({}/{}) MV {} to {}\\n'.format(pics, total_pics, pic_path, os.path.join(path_src, 'processed', new_pic_name.replace('X', str(n)))))\n pics += 1", "title": "" }, { "docid": "6847c7cf81430f4ed5bfc3dbc5145566", "score": "0.4706909", "text": "def process_images(images: (list, np.ndarray), patch_size, stride):\n demo_patches = extract_patches_ong_img(images[0], patch_size=patch_size, stride=stride)\n n_patches = demo_patches.shape[0]\n X = np.zeros([len(images) * n_patches, *demo_patches.shape[1:]])\n del demo_patches\n for i, one_image in enumerate(images):\n X[i * n_patches: i * n_patches + n_patches] = extract_patches_ong_img(one_image, patch_size=patch_size, stride=stride)\n\n X = X.reshape(X.shape[0], -1)\n return X.T # n_features * n_samples", "title": "" }, { "docid": "36a4a2b52820a3898b7b45b7ae514944", "score": "0.4701995", "text": "def generate_filename(self):\n folders = glob(os.path.join(self.path, \"Slide_*\"))\n\n for f in folders:\n organ = os.path.basename(f).split(\"Slide_\")[-1]\n organ = mapping[organ]\n f_gt = f.replace(\"Slide\", \"GT\")\n for raw_path in glob(os.path.join(f, \"*.png\")):\n basename = os.path.basename(raw_path)\n basename = basename.replace(\"Slide_\", \"image\")\n basename = basename.replace(\".png\", \"_mask.txt\")\n gt__path = os.path.join(f_gt, basename)\n yield raw_path, gt__path, organ", "title": "" }, { "docid": "5b55e7060f164dcacc454074ea31bc40", "score": "0.4694604", "text": "def pavgene (namedirtopcf):\r\n print('generate patches on: ',f)\r\n# print namedirtopcf\r\n namemask1=os.path.join(namedirtopcf,lungmask)\r\n namemask=os.path.join(namemask1,lungmaskbmp)\r\n# print namemask\r\n bmpdir = os.path.join(namedirtopcf,scanbmp)\r\n# print bmpdir\r\n patchpathf=os.path.join(namedirtopcf,patchpath)\r\n jpegpathf=os.path.join(namedirtopcf,jpegpath)\r\n remove_folder(patchpathf)\r\n os.mkdir(patchpathf)\r\n remove_folder(jpegpathf)\r\n os.mkdir(jpegpathf)\r\n listbmp= os.listdir(bmpdir)\r\n# print(listbmp)\r\n if os.path.exists(namemask):\r\n listlungbmp= os.listdir(namemask) \r\n else:\r\n tflung=False\r\n listlungbmp=[]\r\n for img in listbmp:\r\n# print img\r\n endnumslice=img.find('.bmp')\r\n posend=endnumslice\r\n while img.find('-',posend)==-1:\r\n posend-=1\r\n debnumslice=posend+1\r\n slicenumber=(img[debnumslice:endnumslice])\r\n# print('sln:',slicenumber,'img:', img,debnumslice,endnumslice \r\n slns='_'+str(int(slicenumber))+'.'+typei\r\n# print(slns)\r\n for llung in listlungbmp:\r\n tflung=False\r\n# print(llung)\r\n# print(listlungbmp)\r\n\r\n if llung.find(slns) >0:\r\n tflung=True\r\n lungfile = os.path.join(namemask,llung)\r\n# print(lungfile)\r\n imlung = Image.open(lungfile)\r\n tablung = np.array(imlung)\r\n np.putmask(tablung,tablung>0,1)\r\n\r\n break\r\n if not tflung:\r\n errorfile.write('lung mask not found '+slns+' in: '+f) \r\n print('lung mask not found ',slns,' in: ',f)\r\n tablung = np.ones((dimtabx, dimtaby), dtype='i')\r\n \r\n bmpfile = os.path.join(bmpdir,img)\r\n im = Image.open(bmpfile)\r\n imc= im.convert('RGB')\r\n tabf = np.array(imc) \r\n# pavgene (im,tabim,tablung,slicenumber)\r\n \r\n nz= np.count_nonzero(tablung)\r\n if nz>0:\r\n \r\n atabf = np.nonzero(tablung)\r\n #tab[y][x] convention\r\n xmin=atabf[1].min()\r\n xmax=atabf[1].max()\r\n ymin=atabf[0].min()\r\n ymax=atabf[0].max()\r\n else:\r\n xmin=0\r\n xmax=0\r\n ymin=0\r\n ymax=0\r\n \r\n# atabf = np.nonzero(tablung)\r\n# #tab[y][x] convention\r\n# xmin=atabf[1].min()\r\n# xmax=atabf[1].max()\r\n# ymin=atabf[0].min()\r\n# ymax=atabf[0].max()\r\n i=xmin\r\n while i <= xmax:\r\n j=ymin\r\n # j=maxj\r\n while j<=ymax:\r\n # print(i,j)\r\n tabpatch=tablung[j:j+dimpavy,i:i+dimpavx]\r\n area= tabpatch.sum() \r\n \r\n# check if area above threshold\r\n targ=float(area)/pxy\r\n if targ>thrpatch:\r\n \r\n crorig = im.crop((i, j, i+dimpavx, j+dimpavy))\r\n imagemax=crorig.getbbox()\r\n # detect black patch\r\n # print (imagemax)\r\n if imagemax!=None:\r\n namepatch=patchpathf+'/p_'+slicenumber+'_'+str(i)+'_'+str(j)+'.'+typei\r\n if contrast:\r\n tabcont=normi(crorig)\r\n scipy.misc.imsave(namepatch, tabcont)\r\n else:\r\n crorig.save(namepatch)\r\n #we draw the rectange\r\n x=0\r\n while x < dimpavx:\r\n y=0\r\n while y < dimpavy:\r\n tabf[y+j][x+i]=[255,0,0]\r\n if x == 0 or x == dimpavx-1 :\r\n y+=1\r\n else:\r\n y+=dimpavy-1\r\n x+=1 \r\n j+=dimpavy \r\n i+=dimpavx\r\n # im = plt.matshow(tabf)\r\n # plt.colorbar(im,label='with pavage')\r\n scipy.misc.imsave(jpegpathf+'/'+'s_'+slicenumber+'.bmp', tabf)", "title": "" }, { "docid": "4d10bf9e135ffd80e3ccb7429452a86b", "score": "0.46922275", "text": "def move_slides(self, no_of_extra_slide):\n index_map = {}\n ## Object for Input file\n inputFile = './templates/BDRC_2016_Input.pptx'\n inputFileprs = os.getcwd() + '/' + inputFile\n prs = Presentation(inputFileprs)\n max_length = prs.slides.__len__()\n outputFileprs = outputFile\n desprs = Presentation(outputFileprs)\n no_of_extra_slide.keys()\n ## Manage Slide index order sortted format\n source_index_list = no_of_extra_slide.keys()\n source_index_list.sort()\n\n for item, source_index in enumerate(source_index_list):\n if item != 0:\n extra_slide = no_of_extra_slide[\n source_index_list[item - 1]] - 1\n next_key = source_index + extra_slide\n else:\n next_key = source_index\n nos_of_new_record = no_of_extra_slide[source_index]\n for i in range(1, nos_of_new_record):\n key = next_key + i\n value = max_length + i - 1\n index_map[key] = value\n max_length = max_length + no_of_extra_slide[source_index] - 1\n\n ## Manage to arrange slide order with extra nos of slide count and index\n for new_index, old_index in index_map.iteritems():\n self.move_slide(desprs, old_index, new_index)\n desprs.save(outputFile)", "title": "" }, { "docid": "c7f20f00893f5ca41933a203258abaf2", "score": "0.46910876", "text": "def convert_geotiff_folder(dir_path: Path,\n output_dir: Optional[Path]=None,\n block_size=256,\n max_nodata: float=0.1,\n unpaired=True):\n \n \n if output_dir is None:\n output_dir = dir_path.parent / (dir_path.name + \"_np\")\n \n output_dir.mkdir(parents=True)\n (output_dir / \"x\").mkdir()\n \n if not os.path.exists(dir_path / \"y\"):\n # Unlabelled setting\n it = MultiGeoTiffIterable(dir_path / \"x\", block_size=block_size, max_nodata=max_nodata)\n \n for i in trange(len(it)):\n np.save(output_dir / \"x\" / f\"{i}.npy\", it[i])\n \n elif unpaired == False:\n # Labelled setting\n (output_dir / \"y\").mkdir()\n it = MultiGeoTiffIterable(dir_path / \"x\", target_dir_path=dir_path / \"y\", block_size=block_size, max_nodata=max_nodata)\n \n for i in trange(len(it)):\n x, y = it[i]\n np.save(output_dir / \"x\" / f\"{i}.npy\", x)\n np.save(output_dir / \"y\" / f\"{i}.npy\", y)\n else:\n (output_dir / \"y\").mkdir()\n # Unpaired setting\n for folder in [\"x\", \"y\"]:\n print(folder)\n it = MultiGeoTiffIterable(dir_path / folder, block_size=block_size, max_nodata=max_nodata)\n for i in trange(len(it)):\n np.save(output_dir / folder / f\"{i}.npy\", it[i])", "title": "" }, { "docid": "2917170ab8bf94ff59a9d26a45352ee8", "score": "0.46903116", "text": "def export_patches(self, output_dir: Path, data_root: Path = None) -> None:\n groups = self.df.groupby(\"setting\")\n for setting_idx, group in groups:\n s = self.settings[setting_idx]\n if data_root is not None:\n s.slide_path = data_root / s.slide_path\n self._export_patches_for_setting(\n group, output_dir, s.slide_path, s.level, s.patch_size, s.loader\n )", "title": "" }, { "docid": "422a233c4be151469b6f4ff79b2de87b", "score": "0.46814555", "text": "def extract_all_patches(image, window_shape, stride, num_of_patches, rotations, output_path, im_name, rep_num, mode):\n non_doctored_windows = view_as_windows(image, window_shape, step=stride)\n non_doctored_patches = []\n for m in range(non_doctored_windows.shape[0]):\n for n in range(non_doctored_windows.shape[1]):\n non_doctored_patches += [non_doctored_windows[m][n][0]]\n # select random some patches, rotate and save them\n save_patches(non_doctored_patches, num_of_patches, mode, rotations, output_path, im_name, rep_num,\n patch_type='authentic')", "title": "" }, { "docid": "f79358ea528e871b13a5e7be5b75de15", "score": "0.46793705", "text": "def process_images(imdir, extension='.jpg'):\n impaths = glob.glob(os.path.join(imdir, '*' + extension))\n im_patches = [extract_patches(ip) for ip in impaths]\n return np.concatenate(im_patches, 0)", "title": "" }, { "docid": "ef87ce81caf7f71c51bc60742dcf1898", "score": "0.46771538", "text": "def do_extract_patches(self, layers, size=3, stride=1):\n results = []\n for l, f in layers:\n # Use a Theano helper function to extract \"neighbors\" of specific size, seems a bit slower than doing\n # it manually but much simpler!\n patches = theano.tensor.nnet.neighbours.images2neibs(f, (size, size), (stride, stride), mode='valid')\n # Make sure the patches are in the shape required to insert them into the model as another layer.\n patches = patches.reshape((-1, patches.shape[0] // f.shape[1], size, size)).dimshuffle((1, 0, 2, 3))\n # Calculate the magnitude that we'll use for normalization at runtime, then store...\n results.extend([patches] + self.compute_norms(T, l, patches))\n return results", "title": "" }, { "docid": "89d0f0c36b714af649bf6bcd68550549", "score": "0.4675163", "text": "def mask_to_submission_lists(image_filenames):\n patch_size = 16\n labels = []\n for image_filename in image_filenames: \n im = mpimg.imread(image_filename)\n for j in range(0, im.shape[1], patch_size):\n for i in range(0, im.shape[0], patch_size):\n patch = im[i:i + patch_size, j:j + patch_size]\n label = patch_to_label(patch)\n labels.append(label)\n return labels", "title": "" }, { "docid": "e10927f1b0db54772398e3fd7282ea00", "score": "0.46710137", "text": "def extract_patches(impath, step=2):\n img = imageio.imread(impath, as_gray=True).astype(np.float32)\n h, w = img.shape\n patches = []\n for i in range(0, h - 7, step):\n for j in range(0, w - 7, step):\n patch = np.reshape(img[i:i + 8, j:j + 8], (64,)) + np.random.rand(64)\n patches.append(patch)\n return np.array(patches)", "title": "" }, { "docid": "104008eef9de7775fb342cedfe8ce573", "score": "0.46545854", "text": "def get_monthly_prism_ppt_data(year,month, plotPPTBounds = False):\n \n prism_dir = \"PRISM_ppt_stable_4kmM3_198101_202001_bil//\"\n \n if(month<10):\n prism_file_path = \"PRISM_ppt_stable_4kmM3_\"+str(year)+\"0\"+str(month)+\"_bil.bil\"\n else:\n prism_file_path = \"PRISM_ppt_stable_4kmM3_\"+str(year)+str(month)+\"_bil.bil\" \n \n ppt_data = read_prism_bil(join(root, prism_dir, prism_file_path))\n \n hdr_dict = read_prism_hdr(join(root, prism_dir, prism_file_path).replace('.bil', '.hdr'))\n \n hdr_dict[\"ULXMAP\"] = float(hdr_dict[\"ULXMAP\"])\n hdr_dict[\"ULYMAP\"] = float(hdr_dict[\"ULYMAP\"])\n hdr_dict['NROWS'] = int(hdr_dict['NROWS'])\n hdr_dict['NCOLS'] = int(hdr_dict['NCOLS'])\n hdr_dict['XDIM'] = float(hdr_dict['XDIM'])\n hdr_dict['YDIM'] = float(hdr_dict['YDIM'])\n \n p1 = (hdr_dict[\"ULXMAP\"] - (hdr_dict['XDIM']/2), \n hdr_dict[\"ULYMAP\"] + (hdr_dict['YDIM']/2))\n\n p2 = (hdr_dict[\"ULXMAP\"] + (hdr_dict['NCOLS']*hdr_dict['XDIM']),\n hdr_dict[\"ULYMAP\"] + (hdr_dict['XDIM']/2))\n\n p3 = (hdr_dict[\"ULXMAP\"] + (hdr_dict['NCOLS']*hdr_dict['XDIM']),\n hdr_dict[\"ULYMAP\"] - (hdr_dict['NROWS']*hdr_dict['YDIM']))\n\n p4 = (hdr_dict[\"ULXMAP\"] - (hdr_dict['XDIM']/2),\n hdr_dict[\"ULYMAP\"] - hdr_dict['NROWS']*hdr_dict['YDIM'])\n \n lon_point_list = (p1[0], p2[0], p3[0], p4[0])\n lat_point_list = (p1[1], p2[1], p3[1], p4[1])\n \n ppt_bounds = Polygon(zip(lon_point_list, lat_point_list))\n \n if(plotPPTBounds):\n crs = {'init': 'epsg:4326'}\n m = folium.Map(zoom_start=10, tiles='cartodbpositron')\n polygon = gpd.GeoDataFrame(index=[0], crs=crs, geometry=[ppt_bounds]) \n \n folium.GeoJson(polygon).add_to(m)\n folium.LatLngPopup().add_to(m)\n m.save(\"mymap.html\")\n\n return ppt_bounds, ppt_data, hdr_dict", "title": "" }, { "docid": "6a9b225e8e36eae6808bdd89309186dc", "score": "0.46446556", "text": "def get_patches_2(img_arr, size_h=None, stride_h=None, size_w=None, stride_w=None): \n # check size and stride\n if size_w % stride_w != 0:\n raise ValueError(\"size % stride must be equal 0\")\n\n if size_h % stride_h != 0:\n raise ValueError(\"size % stride must be equal 0\")\n\n patches_list = []\n overlapping_h = 0\n overlapping_w = 0\n\n if stride_h != size_h:\n overlapping_h = (size_h // stride_h) - 1\n\n if stride_w != size_w:\n overlapping_w = (size_w // stride_w) - 1\n\n if img_arr.ndim == 3:\n i_max = img_arr.shape[0] // stride_h - overlapping_h\n j_max = img_arr.shape[1] // stride_w - overlapping_w\n\n for i in range(i_max):\n for j in range(j_max):\n # print(i*stride_h, i*stride_h+size_h)\n # print(j*stride_w, j*stride_w+size_w)\n # img[starty:starty+cropy,startx:startx+cropx]\n patches_list.append(\n img_arr[\n i * stride_h: i * stride_h + size_h,\n j * stride_w : j * stride_w + size_w\n ]\n )\n\n elif img_arr.ndim == 4:\n i_max = img_arr.shape[1] // stride - overlapping\n for im in img_arr:\n for i in range(i_max):\n for j in range(i_max):\n # print(i*stride, i*stride+size)\n # print(j*stride, j*stride+size)\n patches_list.append(\n im[\n i * stride : i * stride + size,\n j * stride : j * stride + size,\n ]\n )\n\n else:\n raise ValueError(\"img_arr.ndim must be equal 3 or 4\")\n\n return np.stack(patches_list)", "title": "" }, { "docid": "71af816a5a666ca6fc9523e606480f83", "score": "0.463769", "text": "def divide_into_patches(image: np.ndarray, number_of_patches_in_x: int,\n number_of_patches_in_y: int) -> List[Patch]:\n\n if not isinstance(number_of_patches_in_x, int) or not isinstance(\n number_of_patches_in_y, int):\n raise TypeError(\"number_of_patches_in_x and number_of_patches_in_y \"\n \"should be integers\")\n x_window_size, x_stride = divmod(image.shape[1], number_of_patches_in_x)\n y_window_size, y_stride = divmod(image.shape[0], number_of_patches_in_y)\n window_size = WindowSize(x_window_size + x_stride, y_window_size + y_stride)\n stride = Stride(x_window_size, y_window_size)\n patches = []\n for y_dim_patch_number in range(0, number_of_patches_in_y):\n for x_dim_patch_number in range(0, number_of_patches_in_x):\n left_border_x = int(0 + stride.x * x_dim_patch_number)\n right_border_x = int(window_size.x + stride.x * x_dim_patch_number)\n upper_border_y = int(0 + stride.y * y_dim_patch_number)\n lower_border_y = int(window_size.y + stride.y * y_dim_patch_number)\n patch = image[upper_border_y:lower_border_y,\n left_border_x:right_border_x]\n patch = Patch(patch)\n patches.append(patch)\n return patches", "title": "" }, { "docid": "ea4a856f50a0b0c52ba5b3ef9b4c3559", "score": "0.462963", "text": "def get_sliced_images_path_pattern(self, imageid):", "title": "" }, { "docid": "da38764b6850b49270e4bb52fc5ed99e", "score": "0.46231073", "text": "def convert_image_to_numpy(xls_no_polyp, xls_10_mm_polyp, xls_6_9_mm_polyp, folder, npy_dir):\n df_xls_no_polyp = pd.read_excel(xls_no_polyp)\n df_xls_10_mm_polyp = pd.read_excel(xls_10_mm_polyp)\n df_xls_6_9_mm_polyp = pd.read_excel(xls_6_9_mm_polyp)\n images_path = os.listdir(folder)\n tmp_np = []\n tmp_10_p = []\n tmp_6_9_p = []\n list_np = df_xls_no_polyp.iloc[:, 0].values.tolist()\n list_10_p = df_xls_10_mm_polyp.iloc[:, 0].values.tolist()\n list_6_9_p = df_xls_6_9_mm_polyp.iloc[:, 0].values.tolist()\n for l in images_path:\n if l in list_np:\n tmp_np.append(l)\n elif l in list_10_p:\n tmp_10_p.append(l)\n elif l in list_6_9_p:\n tmp_6_9_p.append(l)\n size_no_polyp, size_10_polyp, size_6_9_polyp = count_polyps_size(tmp_np, tmp_10_p, tmp_6_9_p, folder)\n print('size_10_polyp: ', size_10_polyp)\n print('size_no_polyp: ', size_no_polyp)\n print('size_6_9_polyp: ', size_6_9_polyp)\n on_convert_image_to_numpy(tmp_np, npy_dir, size_no_polyp, folder, 'no_polyp')\n print('finished converting no polyp')\n on_convert_image_to_numpy(tmp_10_p, npy_dir, size_10_polyp, folder, 'polyp_10')\n print('finished converting 10 mm polyp')\n on_convert_image_to_numpy(tmp_6_9_p, npy_dir, size_6_9_polyp, folder, 'polyp_6_9')", "title": "" }, { "docid": "c23ee612577340aaeeaf16096a2409dd", "score": "0.4618595", "text": "def prepare_plots(self):\n MultiQubit_Spectroscopy_Analysis.prepare_plots(self)\n\n for qb_name in self.qb_names:\n\n # Copy the original plots in order to have both the analyzed and the\n # non-analyzed plots\n fig_id_original = f\"projected_plot_{qb_name}_Magnitude_{qb_name}_volt\"\n fig_id_analyzed = f\"ResonatorSpectroscopyFluxSweep_{fig_id_original}\"\n self.plot_dicts[fig_id_analyzed] = deepcopy(self.plot_dicts[\n f\"projected_plot_{qb_name}_Magnitude_Magnitude_{qb_name}_volt\"])\n\n # Change the fig_id of the copied plot in order to distinguish it\n # from the original\n self.plot_dicts[fig_id_analyzed]['fig_id'] = fig_id_analyzed\n\n # Plot the left dips\n self.plot_dicts[f\"{fig_id_analyzed}_left_dips\"] = {\n 'fig_id': fig_id_analyzed,\n 'plotfn': self.plot_line,\n 'xvals': self.analysis_data[qb_name]['left_dips_frequency'],\n 'yvals': self.analysis_data[qb_name]['volts'],\n 'marker': 'o',\n 'linestyle': 'none',\n 'color': 'C4',\n 'setlabel': 'Left dips',\n 'do_legend': True,\n 'legend_ncol': 2,\n 'legend_bbox_to_anchor': (1.2, -0.2),\n 'legend_pos': 'upper right'\n }\n\n # Plot the right dips\n self.plot_dicts[f\"{fig_id_analyzed}_right_dips\"] = {\n 'fig_id': fig_id_analyzed,\n 'plotfn': self.plot_line,\n 'xvals': self.analysis_data[qb_name]['right_dips_frequency'],\n 'yvals': self.analysis_data[qb_name]['volts'],\n 'marker': 'o',\n 'linestyle': 'none',\n 'color': 'C1',\n 'setlabel': 'Right dips',\n 'do_legend': True,\n 'legend_ncol': 2,\n 'legend_bbox_to_anchor': (1.2, -0.2),\n 'legend_pos': 'upper right'\n }\n\n # Plot the left LSS\n self.plot_dicts[f\"{fig_id_analyzed}_left_lss\"] = {\n 'fig_id': fig_id_analyzed,\n 'plotfn': self.plot_line,\n 'xvals': np.array([self.fit_res[qb_name]['left_lss_freq']]),\n 'yvals': np.array([\n self.fit_res[qb_name]['left_lss'],\n ]),\n 'marker': '<',\n 'linestyle': 'none',\n 'color': 'C4',\n 'line_kws': {\n 'ms': self.get_default_plot_params()['lines.markersize'] * 3\n },\n 'setlabel': 'Left LSS',\n 'do_legend': True,\n 'legend_ncol': 2,\n 'legend_bbox_to_anchor': (1.2, -0.2),\n 'legend_pos': 'upper right'\n }\n\n # Plot the left USS\n self.plot_dicts[f\"{fig_id_analyzed}_left_uss\"] = {\n 'fig_id': fig_id_analyzed,\n 'plotfn': self.plot_line,\n 'xvals': np.array([self.fit_res[qb_name]['left_uss_freq']]),\n 'yvals': np.array([self.fit_res[qb_name]['left_uss']]),\n 'marker': '>',\n 'linestyle': 'none',\n 'color': 'C4',\n 'line_kws': {\n 'ms': self.get_default_plot_params()['lines.markersize'] * 3\n },\n 'setlabel': 'Left USS',\n 'do_legend': True,\n 'legend_ncol': 2,\n 'legend_bbox_to_anchor': (1.2, -0.2),\n 'legend_pos': 'upper right'\n }\n\n # Plot the right LSS\n self.plot_dicts[f\"{fig_id_analyzed}_right_lss\"] = {\n 'fig_id': fig_id_analyzed,\n 'plotfn': self.plot_line,\n 'xvals': np.array([self.fit_res[qb_name]['right_lss_freq']]),\n 'yvals': np.array([self.fit_res[qb_name]['right_lss']]),\n 'marker': '<',\n 'linestyle': 'none',\n 'color': 'C1',\n 'line_kws': {\n 'ms': self.get_default_plot_params()['lines.markersize'] * 3\n },\n 'setlabel': 'Right LSS',\n 'do_legend': True,\n 'legend_ncol': 2,\n 'legend_bbox_to_anchor': (1.2, -0.2),\n 'legend_pos': 'upper right'\n }\n\n # Plot the right USS\n self.plot_dicts[f\"{fig_id_analyzed}_right_uss\"] = {\n 'fig_id': fig_id_analyzed,\n 'plotfn': self.plot_line,\n 'xvals': np.array([self.fit_res[qb_name]['right_uss_freq']]),\n 'yvals': np.array([self.fit_res[qb_name]['right_uss']]),\n 'marker': '>',\n 'linestyle': 'none',\n 'color': 'C1',\n 'line_kws': {\n 'ms': self.get_default_plot_params()['lines.markersize'] * 3\n },\n 'setlabel': 'Right USS',\n 'do_legend': True,\n 'legend_ncol': 2,\n 'legend_bbox_to_anchor': (1.2, -0.2),\n 'legend_pos': 'upper right'\n }\n\n # Mark the designated sweet spot\n self.plot_dicts[f\"{fig_id_analyzed}_designated_sweet_spot\"] = {\n 'fig_id': fig_id_analyzed,\n 'plotfn': self.plot_line,\n 'xvals': np.array(\n [self.fit_res[qb_name]\n [f'{qb_name}_sweet_spot_RO_frequency']]\n ),\n 'yvals': np.array(\n [self.fit_res[qb_name]\n [f'{qb_name}_sweet_spot']]\n ),\n 'marker': 'x',\n 'linestyle': 'none',\n 'color': 'C3',\n 'line_kws': {\n 'fillstyle': 'none',\n 'ms': self.get_default_plot_params()['lines.markersize'] * 3\n },\n 'setlabel': f'{qb_name} sweet spot',\n 'do_legend': True,\n 'legend_ncol': 2,\n 'legend_bbox_to_anchor': (1.2, -0.2),\n 'legend_pos': 'upper right'\n }\n\n # Plot textbox containing relevant information\n textstr = (\n f\"Left LSS = {self.fit_res[qb_name]['left_lss']:.3f} V\"\n f\"\\nLeft USS = {self.fit_res[qb_name]['left_uss']:.3f} V\"\n f\"\\nRight LSS = {self.fit_res[qb_name]['right_lss']:.3f} V\"\n f\"\\nRight USS = {self.fit_res[qb_name]['right_uss']:.3f} V\")\n\n self.plot_dicts[f'{fig_id_analyzed}_text_msg'] = {\n 'fig_id': fig_id_analyzed,\n 'ypos': -0.3,\n 'xpos': -0.2,\n 'horizontalalignment': 'left',\n 'verticalalignment': 'top',\n 'plotfn': self.plot_text,\n 'text_string': textstr\n }", "title": "" }, { "docid": "7935e28c82804e4c14c1a8e59a7f6466", "score": "0.46067646", "text": "def extract_lab_and_boundary(file_name):\n t = get_time_from_filename(file_name)\n\n im = cv2.imread(file_name)\n im_lab = cv2.cvtColor(im, cv2.COLOR_BGR2LAB)\n im_gray = cv2.cvtColor(im, cv2.COLOR_BGR2GRAY)\n\n resized_y, resized_x = im_gray.shape\n slide_crop_coordinates = extraction_args['slide_coordinates']\n\n min_y = slide_crop_coordinates[0] * resized_y\n max_y = slide_crop_coordinates[1] * resized_y\n min_x = slide_crop_coordinates[2] * resized_x\n max_x = slide_crop_coordinates[3] * resized_x\n slide = im_gray[min_y: max_y, min_x: max_x]\n slidehist = cv2.calcHist([slide], [0], None, [256], [0, 256])\n\n # Plotting the slide in order to check the slide location once\n # if t < 2:\n # plt.subplot(2,1,1)\n # plt.imshow(slide, cmap=plt.cm.Greys_r)\n # plt.subplot(2,1,2)\n # plt.plot(slidehist)\n # plt.xlim([0,256])\n\n # plt.show()\n\n histogram_boundaries = get_histogram_min_max_with_percentile(slidehist, False)\n\n # return t, histogram_boundaries\n return t, im_lab, histogram_boundaries", "title": "" }, { "docid": "d136340a1aafbb5342ea53fbeab0c51f", "score": "0.4605846", "text": "def allFigures(self):\n \n self.clean()\n self.saveData()\n self.figureImg(\"fig_01_img.png\")\n self.figureAvg(\"fig_02_avg.png\")\n self.figureDriftRAW(\"fig_03_drift1.png\")\n self.figureDriftDGOR(\"fig_04_drift2.png\")", "title": "" }, { "docid": "170f903051ce383513e7b1cb8c2888a4", "score": "0.4596534", "text": "def read(root, start = 0):\n logger.debug(\"Read {0}\".format(root))\n patches = read_patches(open(find_patch_file(root)), start) \n projections = read_projections(\"{0}/txt/\".format(root), start)\n\n return patches, projections", "title": "" }, { "docid": "6e98e44aed821a58291c464aca84a242", "score": "0.45935205", "text": "def load_mask_from_folder(self):\n try:\n foldername = QtWidgets.QFileDialog.getExistingDirectory(self, 'Select folder', './CoordinateManager/Images/')\n list_dir_raw = sorted(os.listdir(foldername))\n \n list_dir = [file for file in list_dir_raw if file[-3:] in ['png', 'jpg']]\n list_nr = len(list_dir)\n image_sequence = np.zeros([768, 1024, list_nr])\n \n for i in range(list_nr):\n single_mask =plt.imread(foldername + '/' + list_dir[i])\n # jpg has RGB channels\n single_mask_gray = rgb2gray(single_mask) \n check, valid_single_mask = self.check_mask_format_valid(single_mask_gray)\n if check:\n image_sequence[:,:,i] = valid_single_mask\n else: \n return\n \n self.DMD_actuator.send_data_to_DMD(image_sequence)\n \n self.load_mask_container_stack.setCurrentIndex(0)\n except:\n print(\"Fail to load.\")", "title": "" }, { "docid": "b529759641b702d5a3230117447ddea8", "score": "0.45907736", "text": "def createMorph(selectedImages,selectedPMs):\n \n #save the temporary results here later:\n os_temp_path = tempfile.gettempdir()\n \n #get the measurements for the first image (our targets)\n mainImage = OriginalImage.objects.all().get(id=selectedImages[0])\n potentialids = [] \n #now get the associated measurements\n measures = Measurement.objects.all().filter(id__in=selectedPMs[0]).filter(mogelijkemeting__shapedefining=True)\n measures = [j for j in measures]\n measures.sort(key=lambda x: x.mogelijkemeting.name)\n \n coordsx = []\n coordsy = []\n for k, measurement in enumerate(measures):\n coordsx.append(float(measurement.x))\n coordsy.append(float(measurement.y))\n potentialids.append(measurement.mogelijkemeting.id)\n r1 = vtk.vtkJPEGReader()\n r1.SetFileName(settings.DATADIR + mainImage.id + \".jpg\")\n r1.Update() \n\n # flip y coord (VTK has opposite convention), create 3-d coords (z=0)\n ydim = r1.GetOutput().GetDimensions()[1]\n coords = [(x, ydim - y, 0) for (x,y) in zip(coordsx, coordsy)]\n \n # convert everything to vtkPoints\n lmt = vtk.vtkPoints()\n lmt.SetNumberOfPoints(len(coords))\n for i, coord in enumerate(coords):\n lmt.SetPoint(i,coord)\n \n #The target is clear, let's get to work, get the source images...\n images = []\n #we don't need the first image or its measures anymore, because they don't need to be transformed or morphed\n selectedImages.pop(0)\n selectedPMs.pop(0)\n for id in selectedImages:\n images.append(OriginalImage.objects.all().get(id=id))\n\n transformations = []\n morphtransformations = []\n \n #Create a new database object for the target image to associate the bitmaps with\n img = OriginalImage(project=mainImage.project, name='MorphedImage')\n img.save()\n imp = Image.open(settings.DATADIR + mainImage.id + '.jpg')\n imp.save(settings.DATADIR + img.id + '.jpg', 'JPEG') \n orig_bitmaps = Bitmap.objects.all().filter(image=mainImage)\n \n for bm in orig_bitmaps:\n #store bitmaps of mainImage as sub of img\n bitmap = Bitmap(project=img.project, name='warpedbitmap', image=img, \n mogelijkemeting=bm.mogelijkemeting, imagewidth=bm.imagewidth, \n imageheight=bm.imageheight, minx=bm.minx, miny=bm.miny, maxx=bm.maxx, maxy=bm.maxy)\n bitmap.save()\n \n bitmap_image = Image.open(settings.DATADIR + bm.id + '.gif')\n bitmap_image = bitmap_image.convert(\"RGBA\")\n bitmap_image.save(settings.DATADIR + bitmap.id + '.gif', transparency=0)\n \n #now get the other images and perform our transformations\n for i in range(len(images)):\n measures = Measurement.objects.all().filter(id__in=selectedPMs[i]).filter(mogelijkemeting__shapedefining=True)#get measurements\n measures = [j for j in measures]\n measures.sort(key=lambda x: x.mogelijkemeting.name)\n coordsx = []\n coordsy = [] \n for k, measurement in enumerate(measures):\n coordsx.append(float(measurement.x))\n coordsy.append(float(measurement.y))\n if potentialids[k] != measurement.mogelijkemeting.id: #the potentialmeasurements do not match up to the ones in the target image\n return img, 0\n r = vtk.vtkJPEGReader()\n r.SetFileName(settings.DATADIR + images[i].id + \".jpg\")\n r.Update()\n\n ydim = r.GetOutput().GetDimensions()[1]\n coordso = [(x, ydim - y, 0) for (x,y) in zip(coordsx, coordsy)]\n lms = vtk.vtkPoints()\n lms.SetNumberOfPoints(len(coordso))\n for k, coord in enumerate(coordso):\n lms.SetPoint(k,coord)\n\n transformation = vtk.vtkLandmarkTransform()\n transformation.SetTargetLandmarks(lmt) \n lmt.Modified()\n transformation.SetSourceLandmarks(lms)\n lms.Modified()\n #size matters, so set the mode to Rigid Body (also known as do not scale please)\n transformation.SetModeToRigidBody()\n transformation.Inverse()\n transformation.Update()\n out = vtk.vtkPoints()#this will be the source of our morph transform\n transformation.TransformPoints(lms,out)\n transformations.append(transformation)\n ir = vtk.vtkImageReslice()\n # we're not using linear, because we want to improve the quality of the bitmaps\n ir.SetInterpolationModeToNearestNeighbor()\n ir.SetResliceTransform(transformation)\n ir.SetInput(r.GetOutput())\n ir.SetInformationInput(r1.GetOutput())\n w = vtk.vtkJPEGWriter()\n w.SetFileName(os_temp_path+'/translated'+images[i].id+'.jpg')\n w.SetInput(ir.GetOutput())\n w.Write()\n r2 = vtk.vtkJPEGReader()\n r2.SetFileName(os_temp_path+'/translated'+images[i].id+'.jpg')\n r2.Update() \n \n # the mighty morphing ThinPlateSplineTransform\n morphtransform = vtk.vtkThinPlateSplineTransform()\n morphtransform.SetBasisToR2LogR()\n morphtransform.SetSourceLandmarks(lms)\n lms.Modified()\n morphtransform.SetTargetLandmarks(lmt)\n lmt.Modified()\n morphtransform.Inverse()\n morphtransform.Update()\n morphtransformations.append(morphtransform)\n\n #ir.SetInput(r2.GetOutput())\n #ir.SetInformationInput(r1.GetOutput())\n \n bitmaps = Bitmap.objects.all().filter(image=images[i])\n \n #now perform the total transformation on all bitmaps\n for bm in bitmaps:\n location = settings.DATADIR + bm.id + \".gif\"\n im = Image.open(location)\n im = im.convert(\"RGBA\")\n im.save(settings.DATADIR + bm.id + \".png\", \"PNG\")\n\n r3 = vtk.vtkPNGReader()\n r3.SetFileName(settings.DATADIR + bm.id + '.png')\n r3.Update()\n \n ir2 = vtk.vtkImageReslice()\n ir2.SetInterpolationModeToNearestNeighbor()\n ir2.SetResliceTransform(morphtransform)\n ir2.SetInput(r3.GetOutput())\n ir2.SetInformationInput(r2.GetOutput())\n \n w3 = vtk.vtkPNGWriter()\n w3.SetFileName(os_temp_path+'/morphed'+bm.id+'.png')\n w3.SetInput(ir2.GetOutput())\n w3.Write()\n \n bitmap = Bitmap(project=img.project, name='warpedbitmap', image=img, \n mogelijkemeting=bm.mogelijkemeting, imagewidth=bm.imagewidth, \n imageheight=bm.imageheight, minx=bm.minx, miny=bm.miny, maxx=bm.maxx, maxy=bm.maxy)\n bitmap.save()\n \n im = Image.open(os_temp_path+'/morphed'+bm.id+'.png')\n im = im.convert(\"RGBA\")\n im.save(settings.DATADIR + bitmap.id + '.gif', transparency=0)\n \n\n return img, 1", "title": "" }, { "docid": "4654a734d1161324a4743ca5cddeda78", "score": "0.45854816", "text": "def _extract_pmids(pth):\n pmid_set_tmp = set()\n with open(pth.path, 'r', encoding='latin1') as f:\n rp_tmp = []\n for line in f:\n prefix, rest = _split2prefix_and_rest(line)\n if prefix == 'RP':\n rp_tmp.append(rest)\n elif prefix == 'RC':\n pass\n elif prefix == 'RX':\n if _is_small_scale_reference(rp_tmp):\n match = _search4pmid(rest)\n if match:\n pmid = _extract_pmid_from_match(match)\n pmid_set_tmp.add(pmid)\n else:\n app.logger.warn('Ignored LARGE SCALE ref: {}'.format(rest))\n app.logger.info('RP tokens for above reference: {}'.format(rp_tmp))\n else:\n rp_tmp = []\n known_pmids, new_pmids = _compare_pmid_sets(pmid_set_tmp)\n _log_known_pmids(known_pmids, pth)\n reference_model_list = _compile_reference_models(new_pmids, known_pmids, pth)\n return reference_model_list", "title": "" }, { "docid": "d618a72c129ad3ecbb4a65d0cff8c00e", "score": "0.45845008", "text": "def slides(id):\n if request.method == \"POST\":\n save_song(id, request)\n song = Song.query.get(id)\n song = clean_lyrics(song)\n arrangement = clean_arrangement(song.default_arrangement)\n return render_template(\n \"slides_single_song.html.j2\", song=song, arrangement=arrangement, id=id\n )", "title": "" }, { "docid": "8c2024d5ab7b17eb1bd166b4dca7310a", "score": "0.45838833", "text": "def extract_patches_from_image(data_path, label_name, patch_size, nb_samples):\n data = sio.loadmat(data_path)\n image = data['scan'].squeeze()\n\n label = data[label_name].squeeze()\n assert image.shape == label.shape\n\n image = per_image_standardization(image)\n image = image_padded(image, patch_size)\n\n coordinates = sampler(label, nb_samples)\n assert coordinates.shape[1] == nb_samples\n print('Number of voxels selected: ', coordinates.shape)\n\n patches_picked = []\n label_picked = label[coordinates[0, :], coordinates[1, :], coordinates[2, :]]\n\n for i in range(nb_samples):\n patches_picked.append(extract_orthogonal_patches(image, coordinates[:, i], patch_size))\n\n output_patches = np.array(patches_picked)\n\n return output_patches, label_picked", "title": "" }, { "docid": "26f0822488b36d8261e64d04ca34f511", "score": "0.45828974", "text": "def buildRMDSSheets(self):\r\n l_fMDout = listdir(self.pr_MDout)\r\n\r\n # load ChEMBL table\r\n ldchem = toolbox.matrixToList(self.p_dataset)\r\n for dchem in ldchem:\r\n dfile = {}\r\n dfile[\"prot RMSD\"] = \"\"\r\n dfile[\"lig RMSD\"] = \"\"\r\n dfile[\"RMSF residue\"] = \"\"\r\n\r\n ChEMBL_id = dchem[\"CMPD_CHEMBLID\"]\r\n typeAff = dchem[\"STANDARD_TYPE\"]\r\n # extract RMSD files\r\n for fMDout in l_fMDout:\r\n ChEMBL_id_folder = fMDout.split(\"_\")[0]\r\n if ChEMBL_id == ChEMBL_id_folder:\r\n \r\n \r\n # protein\r\n p_RMSD_prot = self.pr_MDout + fMDout + \"/RMSDs/protein/protRMSD\"\r\n if path.exists(p_RMSD_prot):\r\n dfile[\"prot RMSD\"] = p_RMSD_prot\r\n else:\r\n print \"Error in \" + fMDout + \": prot RMSD missing\"\r\n self.computeRMSDProt(self.pr_MDout + fMDout + \"/\")\r\n\r\n # lig need to define here\r\n p_RMSD_lig = self.pr_MDout + fMDout + \"/RMSDs/ligand/ligRMSD\"\r\n if path.exists(p_RMSD_lig):\r\n dfile[\"lig RMSD\"] = p_RMSD_lig\r\n else:\r\n print \"Error in \" + fMDout + \": lig RMSD missing\"\r\n p_RMSD_lig = self.computeRMSDLig(self.pr_MDout + fMDout + \"/\")\r\n dfile[\"lig RMSD\"] = p_RMSD_lig\r\n\r\n \r\n # RMSF with the binding site represented\r\n p_RMSF_res = self.pr_MDout + fMDout + \"/RMSDs/residues/resRMSD_BS\"\r\n if path.exists(p_RMSF_res):\r\n dfile[\"RMSF residue\"] = p_RMSF_res\r\n else:\r\n print \"Error in \" + fMDout + \": lig RMSF BS missing\"\r\n p_RMSF_res = self.computeRMSFresBS(self.pr_MDout + fMDout + \"/\")\r\n dfile[\"lig RMSD\"] = p_RMSF_res\r\n \r\n # build figure 3 panels\r\n runExternalSoft.RMSD3panels(dfile[\"prot RMSD\"], dfile[\"lig RMSD\"], dfile[\"RMSF residue\"], ChEMBL_id, pathFolder.createFolder(self.pr_out + typeAff + \"/\"))", "title": "" }, { "docid": "8944d0d9305159d36c0d523c7a89e5ca", "score": "0.45788488", "text": "def test_mask_based_patch_extractor_ndpi(\n sample_ndpi: Path,\n caplog: pytest.LogCaptureFixture,\n) -> None:\n res = 0\n patch_size = stride = (400, 400)\n input_img = Path(sample_ndpi)\n wsi = OpenSlideWSIReader(input_img=input_img)\n slide_dimensions = wsi.info.slide_dimensions\n\n # Generating a test mask to read patches from\n mask_dim = (int(slide_dimensions[0] / 10), int(slide_dimensions[1] / 10))\n wsi_mask = np.zeros(mask_dim[::-1], dtype=np.uint8) # reverse as dims are (w, h)\n # masking two column to extract patch from\n wsi_mask[:, :2] = 255\n\n # patch extraction based on the column mask\n patches = patchextraction.get_patch_extractor(\n input_img=input_img,\n input_mask=wsi_mask,\n method_name=\"slidingwindow\",\n patch_size=patch_size,\n resolution=res,\n units=\"level\",\n stride=None,\n )\n\n # read the patch from the second row (y) in the first column\n patch = wsi.read_rect(\n location=(0, int(patch_size[1])),\n size=patch_size,\n resolution=res,\n units=\"level\",\n )\n\n # because we are using column mask to extract patches, we can expect\n # that the patches[1] is the from the second row (y) in the first column.\n assert np.all(patches[1] == patch)\n assert patches[0].shape == (patch_size[0], patch_size[1], 3)\n\n # Test None option for mask\n _ = patchextraction.get_patch_extractor(\n input_img=input_img,\n input_mask=None,\n method_name=\"slidingwindow\",\n patch_size=patch_size,\n resolution=res,\n units=\"level\",\n stride=stride[0],\n )\n\n # Test passing a VirtualWSI for mask\n mask_wsi = VirtualWSIReader(wsi_mask, info=wsi._m_info, mode=\"bool\")\n _ = patchextraction.get_patch_extractor(\n input_img=wsi,\n input_mask=mask_wsi,\n method_name=\"slidingwindow\",\n patch_size=patch_size,\n resolution=res,\n units=\"level\",\n stride=None,\n )\n\n # Test `otsu` option for mask\n _ = patchextraction.get_patch_extractor(\n input_img=input_img,\n input_mask=\"otsu\",\n method_name=\"slidingwindow\",\n patch_size=patch_size[0],\n resolution=res,\n units=\"level\",\n stride=stride,\n )\n\n _ = patchextraction.get_patch_extractor(\n input_img=wsi_mask, # a numpy array to build VirtualSlideReader\n input_mask=\"morphological\",\n method_name=\"slidingwindow\",\n patch_size=patch_size,\n resolution=res,\n units=\"level\",\n stride=stride,\n )\n\n # Test passing an empty mask\n wsi_mask = np.zeros(mask_dim, dtype=np.uint8)\n\n _ = patchextraction.get_patch_extractor(\n input_img=input_img,\n input_mask=wsi_mask,\n method_name=\"slidingwindow\",\n patch_size=patch_size,\n resolution=res,\n units=\"level\",\n stride=stride,\n )\n assert \"No candidate coordinates left\" in caplog.text", "title": "" }, { "docid": "f16f1ab71e00978256a637153f4f2b86", "score": "0.4578261", "text": "def to_tex_tikzpicture(self, output_dir=''):\n slides_file = os.path.join(output_dir, 'slides.tex')\n slides_output = []\n line_file_format = os.path.join(output_dir, 'line-{}{:02d}.tex')\n page_count = 0\n\n sections = self.merge_melody_lyrics()\n self.group_underlines(sections)\n\n for i, (tag, lines) in enumerate(sections):\n # new section\n line_count = 0\n for j, (nodes, bars, ties, slurs, underlines_list, triplets) in enumerate(lines):\n # new page\n if line_count % 2 == 0:\n page_count += 1\n if page_count > 1:\n slides_output.append('\\n')\n slides_output.append('%%%%% PAGE {} %%%%%'.format(page_count))\n slides_output.append(r'\\newpage')\n slides_output.append('')\n if j == 0: # first page in section\n slides_output.append('% <{}>'.format(tag))\n slides_output.append(r'\\begin{nmntag}')\n slides_output.append(r'\\textmd{$<$\\hspace{-0pt}' + tag + r'\\hspace{-0pt}$>$}')\n slides_output.append(r'\\end{nmntag}')\n else:\n slides_output.append(r'\\begin{nmnblank}')\n slides_output.append(r'\\end{nmnblank}')\n\n # new line\n line_lyrics = ''\n line_output = []\n line_output.append(r'\\begin{tikzpicture}')\n line_output.append(r\"\"\"\\tikzstyle{every node}=[inner sep=0pt]\n\\tikzstyle{dot}=[circle,fill=white,inner sep=0pt,text width=1.5pt]\n\\tikzstyle{lyrics}=[node distance=15pt]\n\\tikzstyle{tie}=[line width=0.5pt,bend left=45,min distance=4pt,max distance=5pt]\n\\tikzstyle{underline}=[line width=0.5pt]\n\\tikzstyle{tie0}=[line width=0.5pt,out=50,in=180,max distance=20pt]\n\\tikzstyle{tie1}=[line width=0.5pt,out=130,in=0,max distance=20pt]\"\"\")\n line_output.append('\\n\\n% nodes')\n line_output.append(r'\\node at (0pt, 12pt) {}; % for space adjustment')\n\n pos = 0\n first_text_idx = None\n for k, (time, start_beat, idx_list) in enumerate(bars):\n # new bar\n if k > 0:\n pos -= 2.5\n line_output.append(r'\\node at ({}pt,0) {{|}};'.format(pos))\n pos += 7.5\n for idx in idx_list:\n node = nodes[idx]\n note = node.value\n line_output.append('')\n if node.type != NodeType.NOTE:\n pos -= 2.5\n if node.type == NodeType.DASH:\n line_output.append(r'\\node at ({}pt,-1pt) {{-}};'.format(pos))\n elif node.type == NodeType.DOT:\n line_output.append(r'\\node[dot] at ({}pt,0) {{}};'.format(pos))\n pos += 7.5\n continue\n # name\n line_output.append(r'\\node (a{}) at ({}pt,0) {{{}}};'.format(idx, pos, note.name))\n # acc\n acc_dict = {-1: 'flat', 0: 'natural', 1: 'sharp'}\n if note.acc is not None:\n line_output.append(r'\\node at ($(a{}.north west)+(-1pt,0)$){{\\tiny$\\{}$}};'\n .format(idx, acc_dict[note.acc]))\n # octave\n if note.octave > 0:\n line_output.append(r'\\node[dot,above of=a{},node distance=6pt] {{}};'.format(idx))\n elif note.octave < 0:\n node_distance = 7\n if node.lines <= -3:\n node_distance = 10\n elif node.lines == -2:\n node_distance = 9\n elif node.lines == -1:\n node_distance = 8\n line_output.append(r'\\node[dot,below of=a{},node distance={}pt] {{}};'\n .format(idx, node_distance))\n # text\n height = -17\n if node.text:\n if node.text in '每悔':\n height += 1\n text = '{0}{1}{0}'.format('\\phantom{|}', node.text)\n if first_text_idx is None:\n first_text_idx = idx\n line_output.append(r'\\node[lyrics] (t{0}) at ($(a{0})+(0,{2}pt)$) {{{1}}};'\n .format(idx, text, height))\n line_lyrics += node.text\n elif first_text_idx is None:\n text = r'\\phantom{{{}}}'.format('天')\n line_output.append(r'\\node[lyrics] (t{0}) at ($(a{0})+(0,{2}pt)$) {{{1}}};'\n .format(idx, text, height))\n first_text_idx = idx\n pos += 10\n\n # ties\n line_output.append('\\n\\n% ties')\n for idx0, idx1 in ties:\n dis = 2\n if nodes[idx0].value.octave >= 1:\n dis = 5\n line_output.append(r'\\draw[tie] (a{}.north) ++(0,{}pt) coordinate (tmp) to (a{}.north |- tmp);'\n .format(idx0, dis, idx1))\n\n # underlines\n line_output.append('\\n\\n% underlines')\n for depth, underlines in enumerate(underlines_list):\n if depth == 0:\n continue\n for idx0, idx1 in underlines:\n line_output.append(r'\\draw[underline] (a{}.south west) ++(0,-{}pt)'.format(idx0, depth * 1.5)\n + r' coordinate (tmp) to (a{}.south east |- tmp);'.format(idx1))\n\n # triplets\n line_output.append('\\n\\n% triplets')\n for triplet in triplets:\n dis0, dis1 = 2, 9\n if nodes[triplet[0]].value.octave >= 1 or nodes[triplet[2]].value.octave >= 1:\n dis0 = 5\n if nodes[triplet[1]].value.octave >= 1:\n dis1 = 12\n line_output.append(r'\\node[above of=a{},node distance={}pt] (tri) {{\\tiny{{3}}}};'\n .format(triplet[1], dis1))\n line_output.append(r'\\draw[tie0] (a{}.north) +(0,{}pt) to ($(tri.west)+(-1pt,0)$);'\n .format(triplet[0], dis0))\n line_output.append(r'\\draw[tie1] (a{}.north) +(0,{}pt) to ($(tri.east)+(+1pt,0)$);'\n .format(triplet[2], dis0))\n\n line_output.append('')\n line_output.append(r'\\end{tikzpicture}')\n line_output.append('')\n assert line_output[0] == r'\\begin{tikzpicture}' and pos > 0\n line_output[0] = line_output[0] + '[xscale={}]'.format(110 / pos)\n\n line_file = line_file_format.format(chr(ord('a') + i), j)\n with open(line_file, 'w', encoding='utf-8') as f:\n f.write('\\n'.join(line_output))\n line_count += 1\n\n slides_output.append('\\n% {}'.format(line_lyrics))\n slides_output.append(r'\\begin{nmnline}')\n slides_output.append(r'\\input{{{}}}'.format(line_file.split('/')[-1]))\n slides_output.append(r'\\end{nmnline}')\n\n with open(slides_file, 'w', encoding='utf8') as f:\n f.write('\\n'.join(slides_output))", "title": "" }, { "docid": "618008af2e4ed922f39a820a91f16f8f", "score": "0.4574483", "text": "def generate_powerpoint_pptx(self):\n # Generate the JSON for the report\n report_json = json.loads(self.generate_json())\n # Create document writer using the specified template\n if self.template_loc:\n try:\n self.spenny_ppt = Presentation(self.template_loc)\n except Exception:\n # TODO: Return error on webpage\n pass\n else:\n # TODO: Return error on webpage\n pass\n self.ppt_color_info = pptx.dml.color.RGBColor(\n self.informational_color_hex[0],\n self.informational_color_hex[1],\n self.informational_color_hex[2])\n self.ppt_color_low = pptx.dml.color.RGBColor(\n self.low_color_hex[0],\n self.low_color_hex[1],\n self.low_color_hex[2])\n self.ppt_color_medium = pptx.dml.color.RGBColor(\n self.medium_color_hex[0],\n self.medium_color_hex[1],\n self.medium_color_hex[2])\n self.ppt_color_high = pptx.dml.color.RGBColor(\n self.high_color_hex[0],\n self.high_color_hex[1],\n self.high_color_hex[2])\n self.ppt_color_critical = pptx.dml.color.RGBColor(\n self.critical_color_hex[0],\n self.critical_color_hex[1],\n self.critical_color_hex[2])\n # Loop through the dict of findings to create slides based on findings\n # Initialize findings stats dict\n findings_stats = {\n 'Critical': 0,\n 'High': 0,\n 'Medium': 0,\n 'Low': 0,\n 'Informational': 0\n }\n # Calculate finding stats\n for finding in report_json['findings'].values():\n findings_stats[finding['severity']] += 1\n # Slide styles (From Master Style counting top to bottom from 0..n)\n SLD_LAYOUT_TITLE = 0\n SLD_LAYOUT_TITLE_AND_CONTENT = 1\n SLD_LAYOUT_FINAL = 12\n # Add title slide\n slide_layout = self.spenny_ppt.slide_layouts[SLD_LAYOUT_TITLE]\n slide = self.spenny_ppt.slides.add_slide(slide_layout)\n shapes = slide.shapes\n title_shape = shapes.title\n body_shape = shapes.placeholders[1]\n title_shape.text = 'Ghostwriter'\n text_frame = body_shape.text_frame\n # Use text_frame.text for first line/paragraph or\n # text_frame.paragraphs[0]\n text_frame.text = report_json['client']['full_name']\n p = text_frame.add_paragraph()\n p.text = report_json['client']['full_name']\n # Add Agenda slide\n slide_layout = self.spenny_ppt.slide_layouts[\n SLD_LAYOUT_TITLE_AND_CONTENT]\n slide = self.spenny_ppt.slides.add_slide(slide_layout)\n shapes = slide.shapes\n title_shape = shapes.title\n title_shape.text = 'Agenda'\n body_shape = shapes.placeholders[1]\n text_frame = body_shape.text_frame\n # Add Introduction slide\n slide_layout = self.spenny_ppt.slide_layouts[\n SLD_LAYOUT_TITLE_AND_CONTENT]\n slide = self.spenny_ppt.slides.add_slide(slide_layout)\n shapes = slide.shapes\n title_shape = shapes.title\n title_shape.text = 'Introduction'\n body_shape = shapes.placeholders[1]\n text_frame = body_shape.text_frame\n # Add Methodology slide\n slide_layout = self.spenny_ppt.slide_layouts[\n SLD_LAYOUT_TITLE_AND_CONTENT]\n slide = self.spenny_ppt.slides.add_slide(slide_layout)\n shapes = slide.shapes\n title_shape = shapes.title\n title_shape.text = 'Methodology'\n body_shape = shapes.placeholders[1]\n text_frame = body_shape.text_frame\n # Add Attack Path Overview slide\n slide_layout = self.spenny_ppt.slide_layouts[\n SLD_LAYOUT_TITLE_AND_CONTENT]\n slide = self.spenny_ppt.slides.add_slide(slide_layout)\n shapes = slide.shapes\n title_shape = shapes.title\n title_shape.text = 'Attack Path Overview'\n body_shape = shapes.placeholders[1]\n text_frame = body_shape.text_frame\n # Add Findings Overview Slide\n slide_layout = self.spenny_ppt.slide_layouts[\n SLD_LAYOUT_TITLE_AND_CONTENT]\n slide = self.spenny_ppt.slides.add_slide(slide_layout)\n shapes = slide.shapes\n title_shape = shapes.title\n body_shape = shapes.placeholders[1]\n title_shape.text = 'Findings Overview'\n text_frame = body_shape.text_frame\n for stat in findings_stats:\n p = text_frame.add_paragraph()\n p.text = '{} Findings'.format(stat)\n p.level = 0\n p = text_frame.add_paragraph()\n p.text = str(findings_stats[stat])\n p.level = 1\n # Add Findings Overview Slide 2\n # If there are findings then write a table of findings and\n # severity ratings\n if len(report_json['findings']) > 0:\n # Delete the default text placeholder\n textbox = shapes[1]\n sp = textbox.element\n sp.getparent().remove(sp)\n # Add a table\n rows = len(report_json['findings']) + 1\n columns = 2\n left = Inches(1.5)\n top = Inches(2)\n width = Inches(8)\n height = Inches(0.8)\n table = shapes.add_table(\n rows,\n columns,\n left,\n top,\n width,\n height).table\n # Set column width\n table.columns[0].width = Inches(9.0)\n table.columns[1].width = Inches(1.5)\n # Write table headers\n cell = table.cell(0, 0)\n cell.text = 'Finding'\n cell.fill.solid()\n cell.fill.fore_color.rgb = pptx.dml.color.\\\n RGBColor(0x2D, 0x28, 0x69)\n cell = table.cell(0, 1)\n cell.text = 'Severity'\n cell.fill.solid()\n cell.fill.fore_color.rgb = pptx.dml.color.\\\n RGBColor(0x2D, 0x28, 0x69)\n # Write findings rows\n row_iter = 1\n for finding in report_json['findings'].values():\n table.cell(row_iter, 0).text = finding['title']\n risk_cell = table.cell(row_iter, 1)\n # Set risk rating\n risk_cell.text = finding['severity']\n # Set cell color fill type to solid\n risk_cell.fill.solid()\n # Color the risk cell based on corresponding severity color\n if finding['severity'].lower() == \"informational\":\n risk_cell.fill.fore_color.rgb = self.ppt_color_info\n elif finding['severity'].lower() == \"low\":\n risk_cell.fill.fore_color.rgb = self.ppt_color_low\n elif finding['severity'].lower() == \"medium\":\n risk_cell.fill.fore_color.rgb = self.ppt_color_medium\n elif finding['severity'].lower() == \"high\":\n risk_cell.fill.fore_color.rgb = self.ppt_color_high\n elif finding['severity'].lower() == \"critical\":\n risk_cell.fill.fore_color.rgb = self.ppt_color_critical\n # Set cell's font color to white for better contrast with\n # dark background\n paragraph = risk_cell.text_frame.paragraphs[0]\n paragraph.font.color.rgb = pptx.dml.color.\\\n RGBColor(0xFF, 0xFF, 0xFF)\n row_iter += 1\n # Set all cells alignment to center and vertical center\n for cell in table.iter_cells():\n cell.text_frame.paragraphs[0].alignment = PP_ALIGN.CENTER\n cell.vertical_anchor = MSO_ANCHOR.MIDDLE\n else:\n p = text_frame.add_paragraph()\n p.text = 'No findings'\n p.level = 0\n # Create slide for each finding\n for finding in report_json['findings'].values():\n slide_layout = self.spenny_ppt.slide_layouts[\n SLD_LAYOUT_TITLE_AND_CONTENT]\n slide = self.spenny_ppt.slides.add_slide(slide_layout)\n shapes = slide.shapes\n title_shape = shapes.title\n body_shape = shapes.placeholders[1]\n title_shape.text = \"{} [{}]\".format(\n finding['title'],\n finding['severity'])\n text_frame = body_shape.text_frame\n text_frame.text = '{}'.format(finding['description'])\n bullets = finding['description'].splitlines()\n first_bullet = True\n for bullet in bullets:\n if first_bullet:\n text_frame.text = bullet\n first_bullet = False\n else:\n p = text_frame.add_paragraph()\n p.text = bullet\n p.level = 0\n # Add some detailed notes\n notes_slide = slide.notes_slide\n text_frame = notes_slide.notes_text_frame\n p = text_frame.add_paragraph()\n p.text = '{}: {}\\n'.format(\n finding['severity'].capitalize(),\n finding['title'])\n # Add observations slide\n # Observation 1\n # Bullet detail\n slide_layout = self.spenny_ppt.slide_layouts[\n SLD_LAYOUT_TITLE_AND_CONTENT]\n slide = self.spenny_ppt.slides.add_slide(slide_layout)\n shapes = slide.shapes\n title_shape = shapes.title\n body_shape = shapes.placeholders[1]\n title_shape.text = 'Positive Observations'\n text_frame = body_shape.text_frame\n # Add recommendations slide\n # Recommendation 1\n # Bullet detail\n slide_layout = self.spenny_ppt.slide_layouts[\n SLD_LAYOUT_TITLE_AND_CONTENT]\n slide = self.spenny_ppt.slides.add_slide(slide_layout)\n shapes = slide.shapes\n title_shape = shapes.title\n body_shape = shapes.placeholders[1]\n title_shape.text = \"Recommendations\"\n text_frame = body_shape.text_frame\n # Add Conclusion slide\n slide_layout = self.spenny_ppt.slide_layouts[\n SLD_LAYOUT_TITLE_AND_CONTENT]\n slide = self.spenny_ppt.slides.add_slide(slide_layout)\n shapes = slide.shapes\n title_shape = shapes.title\n body_shape = shapes.placeholders[1]\n title_shape.text = 'Control Observations'\n text_frame = body_shape.text_frame\n # Add final slide\n slide_layout = self.spenny_ppt.slide_layouts[SLD_LAYOUT_FINAL]\n slide = self.spenny_ppt.slides.add_slide(slide_layout)\n shapes = slide.shapes\n # title_shape = shapes.title\n body_shape = shapes.placeholders[1]\n text_frame = body_shape.text_frame\n text_frame.clear()\n p = text_frame.paragraphs[0]\n p.line_spacing = 0.7\n p.text = settings.COMPANY_NAME\n p = text_frame.add_paragraph()\n p.text = settings.COMPANY_TWITTER\n p.line_spacing = 0.7\n p = text_frame.add_paragraph()\n p.text = settings.COMPANY_EMAIL\n p.line_spacing = 0.7\n # Finalize document and return it for an HTTP response\n return self.spenny_ppt", "title": "" }, { "docid": "edaee3289d9accda749851e2c910a401", "score": "0.45582038", "text": "def prepare_patches(self, calcs):\n # Make the full list of catalogs to run\n cats = set()\n\n # Use shear-shear and pos-pos only here as they represent\n # catalogs not pairs.\n for i, j, k in calcs:\n if k == SHEAR_SHEAR:\n cats.add((i, SHEAR_SHEAR))\n cats.add((j, SHEAR_SHEAR))\n elif k == SHEAR_POS:\n cats.add((i, SHEAR_SHEAR))\n cats.add((j, POS_POS))\n elif k == POS_POS:\n cats.add((i, POS_POS))\n cats.add((j, POS_POS))\n cats = list(cats)\n cats.sort(key=str)\n\n # This does a round-robin assignment to processes\n for (h, k) in self.split_tasks_by_rank(cats):\n\n print(f\"Rank {self.rank} making patches for {k}-type bin {h}\")\n\n # For shear we just have the one catalog. For position we may\n # have randoms also. We explicitly delete catalogs after loading\n # them to ensure we don't have two in memory at once.\n if k == SHEAR_SHEAR:\n cat = self.get_shear_catalog(h)\n cat.get_patches(low_mem=False)\n del cat\n else:\n cat = self.get_lens_catalog(h)\n cat.get_patches(low_mem=False)\n del cat\n ran_cat = self.get_random_catalog(h)\n\n # support use_randoms = False\n if ran_cat is None:\n continue\n\n ran_cat.get_patches(low_mem=False)\n del ran_cat\n\n # stop other processes progressing to the rest of the code and\n # trying to load things we have not written yet\n if self.comm is not None:\n self.comm.Barrier()", "title": "" }, { "docid": "3d7d3a0f2fd92b5860bad018536d4af5", "score": "0.45545024", "text": "def multipage(filename, figs=None):\n\n pp = PdfPages(filename)\n if figs is None:\n figs = [plt.figure(n) for n in plt.get_fignums()]\n for fig in figs:\n fig.savefig(pp, format='pdf')\n pp.close()", "title": "" }, { "docid": "35da644fb248999e8243ee74698348e6", "score": "0.45540062", "text": "def generate_md_files(self):\n for m in self.MODULES:\n rdf_figure_path = self.LINK_BASE_PATH + m.EXAMPLE_RDF[\"figure-file-path\"]\n rdf_file = self.BASE_PATH + m.EXAMPLE_RDF[\"file-path\"]\n rdf_txt = self.get_file_content(rdf_file)\n\n shex_figure_path = self.LINK_BASE_PATH + m.SHEX[\"figure-file-path\"]\n shex_file = self.BASE_PATH + m.SHEX[\"file-path\"]\n shex_txt = self.get_file_content(shex_file)\n\n markup_files = {}\n # create module markup\n with open(m.TEMPLATE_FILE, 'r') as f:\n md_text = chevron.render(f,\n {'text': m.EXAMPLE_RDF[\"text\"], 'semantic-model-figure-path': rdf_figure_path,\n 'example-rdf': rdf_txt, 'shex-figure-path': shex_figure_path,\n 'shex': shex_txt, 'title': m.NAME})\n print(md_text)\n output_file = self.OUTPUT_DIR + m.MD_FILE_NAME\n markup_files[output_file] = md_text\n\n for file_path, content in markup_files.items():\n file = open(file_path, \"w\")\n file.write(content)\n file.close()", "title": "" }, { "docid": "9a8570e3a6ad962d16342db96f3296db", "score": "0.45475274", "text": "def loadStoredPictures(self):\n\t\timport StringIO\n\t\tif self.debug : logging.debug(\"Suche WMA-Bilder... %s\" % (self.filename))\n\t\tdiaMode = self.guiPlayer.slide_mode\n\t\tdatei = None\n\t\tif diaMode > 1:\n\t\t\tfor image in self.audio.get(\"WM/Picture\", []):\n\t\t\t\t(mime, data, type) = self.unpack_image(image.value)\n\t\t\t\tif not type == 3:\n\t\t\t\t\tif (diaMode == 3 or diaMode == 5) or (type > 3 and type < 7) :\n\t\t\t\t\t\tif self.debug : logging.debug('Bild gefunden. Typ {0}'.format(type) )\n\t\t\t\t\t\tself.setMiscPic(pygame.image.load(self.getTempPic(data)))\n\n\t\treturn", "title": "" }, { "docid": "4913482ad2753acbbaf9cdc73fb3d2cb", "score": "0.4545979", "text": "def get_subimgs(self):\n\n subimgs = []\n for slide, bboxes_sl in zip(self.slides, self.bboxes):\n bb_arr = np.array([i[0] for i in bboxes_sl])\n lbl_arr = np.array([i[1] for i in bboxes_sl])\n for bbox in tqdm(bboxes_sl):\n subimg = self.get_random_crop_around(slide, bbox[0])\n other_bboxes, other_labels = self.get_all_landed_bboxes(subimg, bb_arr, lbl_arr)\n subimg = slide.get_patch(*subimg[0:2])\n subimg = tf.convert_to_tensor(subimg, dtype=tf.float32)\n other_bboxes = tf.convert_to_tensor(other_bboxes, dtype=tf.float32)\n other_labels = tf.convert_to_tensor(other_labels, dtype=tf.int32)\n subimgs.append({'image': subimg, 'objects': {'bbox': other_bboxes / 255, 'label': other_labels}})\n return subimgs", "title": "" }, { "docid": "a833134c153c605a244dbc7e19b92b26", "score": "0.45430592", "text": "def get_presentation_elements_from_path(self):\n self.presentation_elements = []\n Logger.debug(\"Presentation: Creating the presentation elements from folder \" + self.media_path)\n for filename in self.presentation_filenames:\n extension = filename.split(\".\")[-1]\n relative_filename = self.media_path + \"/\" + filename\n Logger.debug(\"Relative filename: %s\", relative_filename)\n\n if extension in self.VIDEO_FORMATS:\n self.presentation_elements.append(PresentationElement(ContentType.Video, relative_filename))\n elif extension in self.IMAGE_FORMATS:\n self.presentation_elements.append(PresentationElement(ContentType.Image, relative_filename))\n elif extension in self.TEXT_FORMATS:\n self.presentation_elements.append(PresentationElement(ContentType.Text, relative_filename))\n else:\n Logger.error(\"Presentation: Unsupported filetype \\\"%s\\\" \", extension)", "title": "" }, { "docid": "05b3b5410c5627f1afa77ffb41b9067a", "score": "0.453223", "text": "def chunker(self, pts, mod):\n\n outarrays = []\n newgrp = []\n for i,p in enumerate(pts):\n if i%mod==0:\n newgrp.append(p)\n if len(newgrp)>1:\n outarrays.append(newgrp)\n newgrp=[] \n else:\n newgrp.append(p)\n \n return outarrays", "title": "" }, { "docid": "58f0318069750eff0e22f316aeef6770", "score": "0.4531553", "text": "def extract_test_patches(self, scan, db, pp, volumes, ind_part):\n \"\"\"\n Arguments:\n scan: selected scan\n db: DatabaseBRATS object\n pp: PreprocessorBRATS object\n volumes: scan volumes\n ind_part: list of voxel indices at which patches will be\n extracted\n Returns:\n extracted test patches\n \"\"\"\n raise NotImplementedError()", "title": "" }, { "docid": "a1f638a16d6017e015e60ad226a8a328", "score": "0.45289516", "text": "def get_xpm_files():\n xpm_files = []\n filedir = get_image_directory()\n all_files = os.listdir(filedir)\n for filename in all_files:\n filepath = filedir + filename\n unused, ext = os.path.splitext(filepath)\n if ext == \".xpm\":\n xpm_files.append(filepath)\n xpm_files.sort()\n return xpm_files", "title": "" }, { "docid": "655de99a45638703cb1fdd38a4718d3b", "score": "0.45269948", "text": "def get_PCPI_files(FRED_files, FIPS_L):\n Files_FIPS_L = [(fileI[0], fileI[0][-9:-4]) for fileI in FRED_files if re.match('PCPI\\d{5}',fileI[0].split('.')[0][-9:]) and fileI[0][-9:-4] in FIPS_L]\n return Files_FIPS_L", "title": "" }, { "docid": "5620f6daa038a30fc76e47c7e4502136", "score": "0.45142993", "text": "def batchPNGtoGeotiff(folder):\n for image in glob.glob(folder + '/*.png'):\n base = os.path.splitext(image)[0]\n kml = base + '.kml'\n out = base + '.tif'\n pngToGeotiff(image, kml, out)", "title": "" }, { "docid": "93c732887c9689224a4dae8398728bed", "score": "0.45113245", "text": "def _get_mpp_tiffslide(\n slide_path: str | Path,\n) -> tuple[float, float]:\n if not HAS_TIFFSLIDE:\n logger.critical(\n \"Cannot read MPP with TiffSlide because TiffSlide is not available\"\n )\n raise CannotReadSpacing()\n\n slide = tiffslide.TiffSlide(slide_path)\n mppx: float | None = None\n mppy: float | None = None\n if (\n tiffslide.PROPERTY_NAME_MPP_X in slide.properties\n and tiffslide.PROPERTY_NAME_MPP_Y in slide.properties\n ):\n mppx = slide.properties[tiffslide.PROPERTY_NAME_MPP_X]\n mppy = slide.properties[tiffslide.PROPERTY_NAME_MPP_Y]\n if mppx is None or mppy is None:\n raise CannotReadSpacing()\n else:\n try:\n mppx = float(mppx)\n mppy = float(mppy)\n return mppx, mppy\n except Exception as err:\n raise CannotReadSpacing() from err\n raise CannotReadSpacing()", "title": "" }, { "docid": "c86fa07a78be8897902ec97bb63d7916", "score": "0.4510664", "text": "def main():\n figure_dir = os.path.dirname(plts.__file__)\n pngs = glob.glob(os.path.join(figure_dir, '*.png'))\n for png in pngs:\n print('copying {} to {}/'.format(os.path.basename(png), _this_dir))\n shutil.copy(png, _this_dir)", "title": "" }, { "docid": "372bb7d0d7ea15ab00a33831406f3b72", "score": "0.4496847", "text": "def expertPaths (self, slist):\n\n # slicelist gets modified, slist doesn't\n slicelist = copy.copy(slist)\n template = self.getTemplate()\n\n # Use the name_in_file attribute to access files\n if hasattr(self, 'name_in_file'):\n realid = self.name_in_file\n else:\n realid = self.id\n\n # Handle rank-0 variables separately\n if self.rank() == 0:\n matchnames = [realid,None,None,None,None,None,None]\n filename = self.getFilePath(matchnames, template)\n\n result = (0, (), (filename, []))\n return result\n\n # Find the number of partitioned axes\n npart = 0\n ndim = 0\n for (axis,start,length,true_length) in self.domain:\n if hasattr(axis,'partition'):\n npart = npart+1\n if npart==1:\n part1 = axis\n npart1 = ndim\n elif npart==2:\n part2 = axis\n npart2 = ndim\n else:\n raise CDMSError, TooManyPartitions + variable.id\n ndim = ndim+1\n\n # If no partitioned axes, just read the data\n if npart==0:\n matchnames = [realid,None,None,None,None,None,None]\n filename = self.getFilePath(matchnames, template)\n result = (0, (), (filename, slicelist))\n\n # If one partitioned axes:\n elif npart==1:\n\n # intersect the slice and partition for that axis\n slice1 = slicelist[npart1]\n (axis,startelem,length,true_length) = self.domain[npart1]\n partition = slicePartition(slice1, self.getPartition(axis))\n if partition==[]:\n return (1, (npart1,), None)\n\n # For each (interval, partslice) in the partition:\n resultlist = []\n (firstinterval, firstslice) = partition[0]\n prevhigh = firstinterval[0]\n for (interval,partslice) in partition:\n\n # If the previous interval high is less than\n # the current interval low value, interpose\n # missing data.\n low = interval[0]\n if prevhigh<low:\n missing_interval = (prevhigh,low)\n missing_slice = sliceIntersect(slice1, missing_interval)\n\n # Note: if the slice has a stride>1, it might not intersect,\n # so don't interpose missing data in this case.\n if missing_slice is not None:\n slicelist[npart1] = missing_slice\n resultlist.append((None,copy.copy(slicelist)))\n prevhigh = interval[1]\n\n # generate the filename\n matchnames = [realid, None, None, None, None,None,None]\n matchnames = self.genMatch(axis, interval, matchnames)\n filename = self.getFilePath(matchnames, template)\n\n # adjust the partslice for the interval offset\n # and replace in the slice list\n filestart = partslice.start-interval[0]\n filestop = partslice.stop-interval[0]\n fileslice = slice(filestart,filestop,partslice.step)\n slicelist[npart1] = fileslice\n\n resultlist.append((filename,copy.copy(slicelist)))\n\n result = (1,(npart1,),resultlist)\n\n # If two partitioned axes, 2-D version of previous case\n if npart==2:\n slice1 = slicelist[npart1]\n slice2 = slicelist[npart2]\n (axis1,startelem1,length1,true_length1) = self.domain[npart1]\n (axis2,startelem2,length2,true_length2) = self.domain[npart2]\n partition1 = slicePartition(slice1, self.getPartition(axis1))\n partition2 = slicePartition(slice2, self.getPartition(axis2))\n if partition1==[] or partition2==[]:\n return (2, (npart1,npart2), None)\n\n # For each (interval, partslice) in the partition:\n resultlist = []\n (firstinterval1, firstslice1) = partition1[0]\n prevhigh1 = firstinterval1[0]\n for (interval1,partslice1) in partition1:\n\n # If the previous interval high is less than\n # the current interval low value, interpose\n # missing data.\n low = interval1[0]\n if prevhigh1<low:\n missing_interval = (prevhigh1,low)\n missing_slice = sliceIntersect(slice1, missing_interval)\n if missing_slice is not None:\n slicelist[npart1] = missing_slice\n resultlist.append( [(None,copy.copy(slicelist))] )\n prevhigh1 = interval1[1]\n\n # generate matchnames\n matchnames = [realid, None, None, None, None,None,None]\n matchnames = self.genMatch(axis1, interval1, matchnames)\n\n # adjust the partslice for the interval offset\n # and replace in the slice list\n filestart = partslice1.start-interval1[0]\n filestop = partslice1.stop-interval1[0]\n fileslice = slice(filestart,filestop,partslice1.step)\n slicelist[npart1] = fileslice\n\n chunklist = []\n (firstinterval2, firstslice2) = partition2[0]\n prevhigh2 = firstinterval2[0]\n for (interval2,partslice2) in partition2:\n\n # If the previous interval high is less than\n # the current interval low value, interpose\n # missing data.\n low = interval2[0]\n if prevhigh2<low:\n missing_interval = (prevhigh2,low)\n missing_slice = sliceIntersect(slice1, missing_interval)\n if missing_slice is not None:\n slicelist[npart2] = missing_slice\n chunklist.append((None,copy.copy(slicelist)))\n prevhigh2 = interval2[1]\n\n # generate the filename\n matchnames = self.genMatch(axis2, interval2, matchnames)\n filename = self.getFilePath(matchnames, template)\n\n filestart = partslice2.start-interval2[0]\n filestop = partslice2.stop-interval2[0]\n fileslice = slice(filestart,filestop,partslice2.step)\n slicelist[npart2] = fileslice\n\n chunklist.append((filename,copy.copy(slicelist)))\n\n resultlist.append(chunklist)\n\n result = (2,(npart1,npart2),resultlist)\n\n return result", "title": "" }, { "docid": "da9c01bb418ced7adaa6bdaf3a03e81f", "score": "0.44958374", "text": "def get_gallery():\n\n # Examples path and required number of files\n web_path = \"/static/examples/\"\n examples_path = \"./web/static/examples\"\n result = []\n\n # Get all directories from path\n directories = os.listdir(examples_path)\n\n for _dir in directories:\n files = os.listdir(examples_path + \"/\" + _dir)\n chosen_files = sample(files, 2)\n\n result.append([{\n \"web\": web_path + _dir + \"/\" + fl,\n \"server\": examples_path + \"/\" + _dir + \"/\" + fl\n } for fl in chosen_files])\n\n return result", "title": "" }, { "docid": "3b816b56981692cf6f9a1d59c01c4164", "score": "0.4492419", "text": "def _export_patches_for_setting(\n self,\n frame: pd.DataFrame,\n output_dir: Path,\n slide_path: Path,\n level: int,\n patch_size: int,\n loader: Loader\n ):\n def get_output_dir_for_label(label: str) -> Path:\n label_str = invert(loader.labels)[label]\n label_dir = Path(output_dir) / label_str\n return label_dir\n\n def make_patch_path(x: int, y: int, label: int) -> Path:\n filename = f\"{Path(slide_path).stem}-{x}-{y}-{level}-{patch_size}.png\"\n label_dir = get_output_dir_for_label(label)\n label_dir.mkdir(parents=True, exist_ok=True)\n return label_dir / filename\n\n def save_patch(region: Region, slide: SlideBase, filepath: Path) -> None:\n image = slide.read_region(region)\n opencv_image = cv2.cvtColor(np.array(image), cv2.COLOR_RGB2BGR)\n cv2.imwrite(str(filepath), np.array(opencv_image))\n\n with loader.load_slide(slide_path) as slide:\n for row in frame.itertuples():\n filepath = make_patch_path(row.x, row.y, row.label)\n region = Region.make(row.x, row.y, patch_size, level)\n save_patch(region, slide, filepath)", "title": "" }, { "docid": "85eb9c99203fac2db0e8dc64bfa1435a", "score": "0.4491039", "text": "def _get_recal_plots(work_dir, align_bam):\n (base, _) = os.path.splitext(align_bam)\n reports = glob.glob(os.path.join(work_dir, \"reports\", \"images\",\n \"%s*-plot.pdf\" % base))\n reports.sort()\n return reports", "title": "" }, { "docid": "be5d7648f11e083f11973e7a9f59d2bc", "score": "0.44812915", "text": "def load_data(data_dir):\n images = []\n labels = []\n\n for root, dirs, files in os.walk(data_dir):\n for file in files:\n if not file.endswith(\".ppm\"):\n continue\n img = cv2.imread(os.path.join(root, file), 1)\n img = cv2.resize(img, dsize=(30, 30))\n images.append(img / 255.)\n labels.append(os.path.basename(root))\n return images, labels", "title": "" } ]
a04d0abb2c5939e86faec2939ceb53d6
Turn off the WLED nightlight switch.
[ { "docid": "5c9433e7d86174c8309e7d0bba78df87", "score": "0.8082955", "text": "async def async_turn_off(self, **kwargs: Any) -> None:\n await self.coordinator.wled.nightlight(on=False)", "title": "" } ]
[ { "docid": "dc32254499822b5c7b3c605da071785c", "score": "0.7715766", "text": "def switch_off(self):\n self.SwitchOff()", "title": "" }, { "docid": "dc32254499822b5c7b3c605da071785c", "score": "0.7715766", "text": "def switch_off(self):\n self.SwitchOff()", "title": "" }, { "docid": "92a77f118b5f29ca19951e15dc24f5d9", "score": "0.7559786", "text": "def turn_off(self, **kwargs):\n self._brightness = 0\n self._send_command(\"turn_off\")", "title": "" }, { "docid": "c73838fba97e738a5ac8035d26987fb3", "score": "0.75553876", "text": "def turn_off(self):\n # turn off the actual power supply here\n self.set_state(DevState.OFF)", "title": "" }, { "docid": "1459f5a5eb3776b5b3e280b2378f60d1", "score": "0.75200534", "text": "def leds_off(self):\n self.ipcon.send_request(self, BrickIMU.FUNCTION_LEDS_OFF, (), '', '')", "title": "" }, { "docid": "b112ccb3cdbf1c2a19b26a8eddb27399", "score": "0.7517431", "text": "def turn_off(self, **kwargs):\n self._light.turn_off()", "title": "" }, { "docid": "1a54d5ecae1b2e03923c4d83970f023d", "score": "0.7509295", "text": "def turn_off(self, **kwargs) -> None:\n try:\n self.wemo.set_state(WEMO_FAN_OFF)\n except ActionException as err:\n _LOGGER.warning(\"Error while turning off device %s (%s)\", self.name, err)\n self._available = False\n\n self.schedule_update_ha_state()", "title": "" }, { "docid": "0acd8488f5215660d528ce954e9bb4cc", "score": "0.74709547", "text": "def turn_off(self, **kwargs: Any) -> None:\n self._send_command([{\"code\": DPCODE_SWITCH, \"value\": False}])", "title": "" }, { "docid": "2e90601ac06f1ded7687c8679336ae01", "score": "0.74378955", "text": "def set_light_off(self):\n self._light = \"OFF\"", "title": "" }, { "docid": "2e90601ac06f1ded7687c8679336ae01", "score": "0.74378955", "text": "def set_light_off(self):\n self._light = \"OFF\"", "title": "" }, { "docid": "2e90601ac06f1ded7687c8679336ae01", "score": "0.74378955", "text": "def set_light_off(self):\n self._light = \"OFF\"", "title": "" }, { "docid": "2e90601ac06f1ded7687c8679336ae01", "score": "0.74378955", "text": "def set_light_off(self):\n self._light = \"OFF\"", "title": "" }, { "docid": "7817d58963f5e74799fc970a1f65bf8c", "score": "0.7428295", "text": "async def async_turn_off(self, **kwargs: Any) -> None:\n _LOGGER.debug(\"Turning off light\")\n await self.device.set_light(False)", "title": "" }, { "docid": "5a0cbd0c42e85953500aa4a9e77b5825", "score": "0.7399994", "text": "async def async_turn_off(self, **kwargs):\n await self._light.set(False)", "title": "" }, { "docid": "502f57a89d9766817eb5817b5aa56ffd", "score": "0.73776287", "text": "def turn_off(self, **kwargs):\n self._device.switch_off()", "title": "" }, { "docid": "8db1cd937549890ce6ce7d61ae34ee25", "score": "0.73385537", "text": "def turn_off():\n\n print(\"Shutting down...\")\n\n GPIO.cleanup()\n subprocess.call(['echo posys | sudo -S poweroff'], shell=True)", "title": "" }, { "docid": "927071c0bc11ca24365a1d64d33d8761", "score": "0.73351955", "text": "def turn_off(self):\n if self.output:\n print(\"turning off\")\n GPIO.output(self.pin, False)\n self.output = False\n else:\n print(\"staying off\")", "title": "" }, { "docid": "4e2ceeca98e89801c950d419b8f62260", "score": "0.7325797", "text": "def turn_off(self):\n self._ls121.write(\"IENBL 0\")", "title": "" }, { "docid": "7ed001f8b62a3b282f77561c0797f5fa", "score": "0.7284337", "text": "async def async_turn_off(self, **kwargs):\n #_LOGGER.info(\"Turning Off Light %s\", self._light_num)\n self._spaclient.set_light(self._light_num, False)", "title": "" }, { "docid": "0765cc1e90c4cff9509bd55aa0a42163", "score": "0.7249237", "text": "def turn_off(self, **kwargs):\n self._device.cameralight = False", "title": "" }, { "docid": "1026269801042a898e6b651912140445", "score": "0.7241728", "text": "async def async_turn_off(self):\n await self.send_command(self._commands['off'])\n \n if self._power_sensor is None:\n self._state = STATE_OFF\n self._source = None\n await self.async_update_ha_state()", "title": "" }, { "docid": "a333c2bd1e1a397aa55ca679aa827bb2", "score": "0.7239784", "text": "def _turnOff(n):\n global ON, STATE\n light = str(n)\n body = '{ \"on\" : false}'\n url_to_call = lights_url + light + '/state'\n rest.send('PUT', url_to_call, body, {'Content-Type': 'application/json'})\n ON = False\n STATE = 0", "title": "" }, { "docid": "15c19fd2769198381f7ad6d56ec591fb", "score": "0.7201994", "text": "def turn_off(self):\n self._emo.power = False", "title": "" }, { "docid": "20eb7fd3f9cb43411f96b7945c0192a8", "score": "0.71921575", "text": "def turn_off(self):\n self.smartbulb.state=self.smartbulb.BULB_STATE_OFF", "title": "" }, { "docid": "96665ee8b4a981f88210104c0ef90e45", "score": "0.7188905", "text": "def shutdown_light(self):\n self.set_rotation(True, speed=0)\n self.set_lamp(0)\n time.sleep(2)\n self.dmx.disconnect()", "title": "" }, { "docid": "77214e87d2b721efe75d6fe337acd16c", "score": "0.718843", "text": "def turn_off(self, **kwargs):\n self._device.state = False", "title": "" }, { "docid": "77214e87d2b721efe75d6fe337acd16c", "score": "0.718843", "text": "def turn_off(self, **kwargs):\n self._device.state = False", "title": "" }, { "docid": "4d395e4b57086f822768d5a56cdba242", "score": "0.7182218", "text": "def turn_off(self, **kwargs):\n self._state = False\n self.led.off(self.group)\n self.update_ha_state()", "title": "" }, { "docid": "cb2570d93a35647bb1ada8b1790451c3", "score": "0.7157535", "text": "def PowerOff(self):\n self._states.power = OFF", "title": "" }, { "docid": "58ae3904c77a9c64b35571bfa85b7784", "score": "0.7150365", "text": "def off(self):\n self.target.report_info(\"Powering off\", dlevel = 1)\n self.target.rtb.rest_tb_target_power_off(\n self.target.rt, ticket = self.target.ticket)\n self.target.report_info(\"Powered off\")", "title": "" }, { "docid": "2998a6f52ffb1e17fa4687de4ca10f52", "score": "0.71313334", "text": "async def async_turn_off(self, **kwargs: Any) -> None:\n await self.coordinator.wled.sync(send=False)", "title": "" }, { "docid": "8dfe2517b16ac6cbe5b7fd632c6c294d", "score": "0.71301246", "text": "def turn_off(self, tegra):\n return self._op('off', tegra)", "title": "" }, { "docid": "d3aedda427e809146027177eb4045c67", "score": "0.7121418", "text": "def off(self):\n for led in self._leds: led.write(b'0\\n')", "title": "" }, { "docid": "a97c7fc651448cf0fe9ec57a1d0d35be", "score": "0.7110977", "text": "def turn_off(self, **kwargs):\n self.set_state(turn_on=False)", "title": "" }, { "docid": "a13e144d58176ca2f154db7c5d56265f", "score": "0.71018744", "text": "async def off(self, ctx: Context):\n if self.nanopanels.get_power():\n self.nanopanels.toggle_power()\n \n await ctx.send('Nanopanels off', delete_after=20)", "title": "" }, { "docid": "0b2a89dd40276f0975319f141a65e62c", "score": "0.70998085", "text": "def leds_off() -> None:\n for i in range(15):\n pianohat.set_led(i, False)", "title": "" }, { "docid": "55aa0442e179fb871dde9fb9518a54ce", "score": "0.7084688", "text": "def led_off(pin):\n GPIO.output(pin, GPIO.LOW)", "title": "" }, { "docid": "66961ae31fbf8304fe2a3843b95bc0fe", "score": "0.707584", "text": "async def async_turn_off(self, **kwargs: Any) -> None:\n await self.coordinator.wled.sync(receive=False)", "title": "" }, { "docid": "99c751906f88a0808ea02132f95fbfb4", "score": "0.706316", "text": "def turn_off(self, **kwargs):\n if self.type == SWITCH_TYPE_SCHEDULE:\n self.robot.disable_schedule()", "title": "" }, { "docid": "1533c62ee0f9e4d049f3ded55c43a705", "score": "0.7031459", "text": "def turn_off(self):\n pass", "title": "" }, { "docid": "5b9142c888a4a64025c408848952cfc2", "score": "0.70285636", "text": "def turnoff(self):\n ok = self.pcontrol.turnoff(self.con_chan)\n if ok:\n self._poweron = False\n return ok", "title": "" }, { "docid": "2c9535613723c0ebbf06efc400bb50d8", "score": "0.6996775", "text": "def turn_off(self) -> None:\n pass", "title": "" }, { "docid": "69a83ed2fbc0222fe4fb93b0e77c8fa4", "score": "0.6996114", "text": "def powerdown(self):\n logging.debug(\"Turning off the board's power\")\n return self.pins['pwr'].off()", "title": "" }, { "docid": "3db31ca53e7c2d829beecf98cb724028", "score": "0.69866383", "text": "def backlight_off(self):\n self.command([b'\\x46'])", "title": "" }, { "docid": "21c89f89dbb6eeed6a069e284d56f195", "score": "0.69836235", "text": "def turn_away_mode_off(self):\n pass", "title": "" }, { "docid": "dc3a99bdd531b16c25019ec8e78a4ea8", "score": "0.6975273", "text": "async def turn_off(self) -> None:\n raise NotImplementedError(\"Device subclass needs to implement this.\")", "title": "" }, { "docid": "dc354696dc7152dd38c9f60761b60d33", "score": "0.69623595", "text": "def turn_off(self, **kwargs):\n if self._switch(not self._active_state):\n self._state = False\n self.update_ha_state()", "title": "" }, { "docid": "23954544eba22419c03ed080c60efdec", "score": "0.6948357", "text": "def off(self):\n print(\"Set {} to off\".format(self.pin))\n self.state = False\n GPIO.output(self.pin, GPIO.HIGH)", "title": "" }, { "docid": "dbcdf6067d583a5354564bf54ca62d32", "score": "0.69400394", "text": "def turnOff(self, sendDynet=True, sendMQTT=True):\n self.turnOn(0, sendDynet, sendMQTT)", "title": "" }, { "docid": "fb7ca88d65c1dd296dc4d2709191aa95", "score": "0.6938181", "text": "def all_LEDs_off(self):\n self.power_state = [False for i in range(6)]", "title": "" }, { "docid": "3d886f5435d05fd8541acce112cfc85c", "score": "0.69315094", "text": "def off(self) -> None:\n if self.__verbose__:\n print(self.__name__,'stopping')\n self.__running__ = False\n GPIO.output(self.__pins__['IA'], 0)\n GPIO.output(self.__pins__['IB'], 0)\n self.__pwm__.off()", "title": "" }, { "docid": "8c4da3b853cb10d129f66460032681c4", "score": "0.69171536", "text": "async def async_turn_off(self, **kwargs):\n if self.switch_type == \"Away Mode\":\n await self.data.set_away_mode(False, self.awayTemperature)\n else:\n await self.data.set_system_switch(self.hub_key, False)\n return True", "title": "" }, { "docid": "69dfb0a5ffddf40f78440b8ba8e8491d", "score": "0.6906725", "text": "def turn_off(self):\n self.set_speed(SPEED_OFF)", "title": "" }, { "docid": "8eaba532eb7a990524bbbf4d58d3d04e", "score": "0.6897648", "text": "def test_turn_off(self, insight):\n insight.off()\n assert insight.get_state(force_update=True) == 0", "title": "" }, { "docid": "1219d00744e2d56e192f489baed6e0b2", "score": "0.6861978", "text": "def turn_off(self, **kwargs: Any) -> None:\n\n _LOGGER.debug(\"Reconnect %s:%s\", self._controller.server, self._controller.port)\n with REQ_LOCK:\n try:\n # Recycle socket on new command to recover mochad connection\n self._controller.reconnect()\n self.switch.send_cmd(\"off\")\n # No read data on CM19A which is rf only\n if self._comm_type == \"pl\":\n self._controller.read_data()\n self._attr_is_on = False\n except (MochadException, OSError) as exc:\n _LOGGER.error(\"Error with mochad communication: %s\", exc)", "title": "" }, { "docid": "180b6f8b8af45079b12405fe5aeb810b", "score": "0.68523073", "text": "def powerOff(self):\n return self.sendRawCMD('PWR00')", "title": "" }, { "docid": "d4dd8188fa211dc4d54b97936fd94f81", "score": "0.6839121", "text": "def turn_lights_off(self):\n url = self.base_url + '/groups/1/action'\n data = '''{\"on\":false}'''\n requests.put(url, data=data)", "title": "" }, { "docid": "a3d500220c1434729a14b07846323a82", "score": "0.68362594", "text": "async def async_turn_off(self) -> None:\n _LOGGER.debug(\"Turning off %s\", self.name)\n if DeviceCapability.SWITCH in self._device.capabilities:\n await self.send_command(DeviceCommand.OFF)\n else:\n await self.send_command(DeviceCommand.SET_SPEED, DeviceState.OFF)", "title": "" }, { "docid": "cfcea78cb69096ca0cba15d3ab462ea0", "score": "0.68256766", "text": "def all_off():\n L1_led.write(0)\n L2_led.write(0)\n L3_led.write(0)\n\n R1_led.write(0)\n R2_led.write(0)\n R3_led.write(0)\n\n U1_led.write(0)\n U2_led.write(0)\n U3_led.write(0)", "title": "" }, { "docid": "c034fa498f0405097ff650f455b69eaa", "score": "0.68233436", "text": "def update_disable_button_power_switch_off(self):\n\n Logger.getLogger().info('update_disable_button_power_switch_off')\n self.__toggle_button__(GuiKey.POWER_ON_TELE, disabled=False, button_color=('black', 'white'))\n self.__toggle_button__(GuiKey.POWER_OFF_TELE, disabled=True, button_color=('black', 'red'))", "title": "" }, { "docid": "d89d3f61d45439b36480ef905011c611", "score": "0.68198645", "text": "def turn_all_off(self):\n # get all lights\n all_lights = self.get_all_lights()\n for light in all_lights:\n url_to_call = self.lights_url+'/'+light+'/state'\n # prepare the \"turn off\" request\n body = '{ \"on\": false }'\n rest.send('PUT', url_to_call, body, {'Content-Type': 'application/json'})", "title": "" }, { "docid": "4b74dab7848f815c34aa6f8626c52485", "score": "0.68145895", "text": "async def async_turn_off(self, **kwargs):\n await self.data.set_smart_plug_state(self.smart_plug_id, \"Off\")\n return True", "title": "" }, { "docid": "3ce9398f157f6dc80430b41978554596", "score": "0.67896736", "text": "async def async_turn_off(self, **kwargs: Any) -> None:\n # This allows transitioning to off, but resets the brightness\n # to 1 for the next set_state(True) command\n if not self._device_control:\n return\n transition_time = None\n if ATTR_TRANSITION in kwargs:\n transition_time = int(kwargs[ATTR_TRANSITION]) * 10\n\n await self._api(\n self._device_control.set_dimmer(\n dimmer=0, transition_time=transition_time\n )\n )\n else:\n await self._api(self._device_control.set_state(False))", "title": "" }, { "docid": "bbeb2ed80610727a009c4881ca1d1777", "score": "0.67848504", "text": "def lamp_off(self):\n\n self.pin.off()\n logging.debug(\"Turning off lamp %s\", self.name)", "title": "" }, { "docid": "5127e0019ac8b4babc2332c48121fe8e", "score": "0.67755234", "text": "def set_power_state_off(self, force=False):\n return self._set_power_state('ForceOff' if force else 'PushPowerButton')", "title": "" }, { "docid": "8a2ef8bbecf8ea3012cb4def911783ea", "score": "0.677457", "text": "def turn_away_mode_off(self):\n self._away = False", "title": "" }, { "docid": "18dad1079c1d4909cbd516638cb9f104", "score": "0.67726266", "text": "async def async_turn_off(self, **kwargs):\n _LOGGER.debug(\"powering off\")\n try:\n await self.onkyo_command(self._iscp.mk_command(iscp.POWER, iscp.OFF))\n except Exception as e:\n _LOGGER.error(e)\n # If the receiver is already off, we may not receive the \"PWR00\" response.\n self._state[POWER] = False\n self.async_schedule_update_ha_state()", "title": "" }, { "docid": "7c77f78dd0288fff6275ed76dab6a173", "score": "0.676656", "text": "def switch_off(self,\n switch):\n\n self.switches[switch].switch_off()", "title": "" }, { "docid": "42d7293fe01e60f868aa7bb6b7df4f82", "score": "0.6762151", "text": "def standbyOff(self, cmd):\n \n self.actor.controllers['turbo'].stopStandby(cmd=cmd)\n self.status(cmd)", "title": "" }, { "docid": "5e5c987a1ea4edd9e61f1358f2349b5e", "score": "0.67554474", "text": "def off(self):\n if not self.healthy:\n self.health_check()\n if self._pre_off_func:\n self._pre_off_func()\n switchboard = self._get_switchboard_if_initialized()\n if switchboard:\n switchboard.close_all_transports()\n if self._power_and_data_share_cable:\n switchboard.add_log_note(\n f\"comm_power.off() called on {self._device_name} set communication \"\n f\"port {self.port_number} to charge as device has a single USB \"\n \"cable for data and power.\")\n if self._power_and_data_share_cable:\n self._hub.switch_power.power_on(self.port_number, data_sync=False)\n else:\n self._hub.switch_power.power_off(self.port_number)", "title": "" }, { "docid": "a83b14e04ecd843f50a976246ddee6dc", "score": "0.67549014", "text": "def nightlights_off_detected(self, entity:str)->None:\n if self._night_light_on == False:\n return\n for night_light in self._night_lights:\n self.turn_off(night_light)\n \n self._night_light_on = False", "title": "" }, { "docid": "75742c829e27df125d132c027d519838", "score": "0.674736", "text": "def off(self):\n self._check_if_stopped()\n\n self._pi.write(self._pin, self._off_level)", "title": "" }, { "docid": "0a97533de761fbf5df33cff298658cc0", "score": "0.6733566", "text": "def off(self):\n print(\"off(): Turning screen on\")\n bl_status = self.status()\n if bl_status == \"1\":\n subprocess.call([\"/home/pi/bin/scr-on.sh\"])", "title": "" }, { "docid": "027770fd772ebd446ae32338e399036d", "score": "0.6721157", "text": "async def async_turn_off(self, **kwargs: Any) -> None:\n await self._smartbridge.turn_off(self.device_id)", "title": "" }, { "docid": "6687cfe92105b2cb83bbc8aecade2b41", "score": "0.671804", "text": "def poweroff( self ):\n\t\tself.__write_command( self.COMMAND_POWERDOWN )", "title": "" }, { "docid": "2a2c1c8b0a5f1262cc6199fa5bc200b8", "score": "0.6715576", "text": "def off(self):\n self.state.put('Off')", "title": "" }, { "docid": "29e47d8be8317549d4d0c930f5f23301", "score": "0.6703783", "text": "async def async_turn_off(self, **kwargs: Any) -> None:\n value_type = self.gateway.const.SetReq.V_LIGHT\n self.gateway.set_child_value(self.node_id, self.child_id, value_type, 0, ack=1)\n if self.assumed_state:\n # optimistically assume that light has changed state\n self._state = False\n self._values[value_type] = STATE_OFF\n self.async_write_ha_state()", "title": "" }, { "docid": "fa16df4a4880a1137dda58817439300f", "score": "0.6693147", "text": "async def async_turn_off(self, **kwargs):\n new_state = (\n self._client.TEMPRANGE_LOW if self._type == TEMP_RANGE else self._client.OFF\n )\n return await self.change_switch(new_state)", "title": "" }, { "docid": "3a6bf1e334aa8f3d9d8d55a466e7eb54", "score": "0.6691998", "text": "def winter_off(self): # pragma: no cover\n self.set_batt_settings({\"wintermode\": \"off\"})", "title": "" }, { "docid": "6adb00d1eaa6efa279268e8c8cf824d1", "score": "0.66677356", "text": "def turn_away_mode_off(self) -> None:\n self._attr_is_away_mode_on = False\n self.schedule_update_ha_state()", "title": "" }, { "docid": "15bb45243f96fc0852c9d511d696273e", "score": "0.6666563", "text": "def handset_offhook(self):\n self.press_phys_button('hookswitch', 'offhook')", "title": "" }, { "docid": "53ee3f9fec64b1715a80a02852054f57", "score": "0.6656845", "text": "def power_off(self):\n self.overseer_api.stop_tm()", "title": "" }, { "docid": "6a5a751fc083c7cc6b5044f8831b831b", "score": "0.66534615", "text": "def turn_off(self):\n\t\tself.presentSprite = theme.sliderPointerSpriteOff\n\t\tself.presentColor = theme.sliderPointerColorOff", "title": "" }, { "docid": "46c971e6d7963fc08d0fa45b99ceab64", "score": "0.66454554", "text": "def turn_off(self):\n self.set_color(0, 0, 0, 0)", "title": "" }, { "docid": "53db9236da3e6d4107af9018aefd9844", "score": "0.6645033", "text": "def turn_off(self, **kwargs: Any) -> None:\n self._gateway.stop_fan()\n self._is_on = False", "title": "" }, { "docid": "32dbec0efcf13e1d60ec836321b8be5e", "score": "0.6643354", "text": "def turnoff(self):\n wn.title(\"Karel se ha apagado\")", "title": "" }, { "docid": "6b34581fee8dfd4481271d945ed39c6e", "score": "0.6618596", "text": "def turn_off_motors():\n\n global _mh\n\n _log.info(\"Shutting down all motors\")\n\n for i in range(1, 5):\n _mh.getMotor(i).run(Adafruit_MotorHAT.RELEASE)", "title": "" }, { "docid": "25881df1e316bb6fa4a572bbb76b8b8d", "score": "0.6615613", "text": "async def async_turn_off(self, **kwargs):\n await self._device.set_off(self._running_time)", "title": "" }, { "docid": "6d5eeb9a1a2ba4b8ab54a32c9b274254", "score": "0.66048163", "text": "def led_off(*target_leds):\n for x in target_leds:\n cube.buffer_cubes[(x[0] % 8) + ((x[1] % 8) * 8) + ((x[2] % 8) * 64)].setOff()", "title": "" }, { "docid": "0163c5d9511667c651a921f3d2e89c4e", "score": "0.66015637", "text": "def turn_off(self):\n\t\tself.present_button = self.off_button_sprite", "title": "" }, { "docid": "00cf93e7eae1078d657b0a3a2b55090e", "score": "0.6597789", "text": "async def async_turn_off(self, **kwargs) -> None:\n result = await self.controller.set_command(\n self.meter_id, CONF_COMMAND_DEACTIVATE\n )\n if result:\n await self.controller.async_get_switches(self.flat_id, True)", "title": "" }, { "docid": "e312bb820b8f2819d5192e56ff34b5b3", "score": "0.6595124", "text": "def turn_off(self, data={}):\n self.active = False\n return self.active", "title": "" }, { "docid": "5db870b3666cc361df6e8a2c2429dab6", "score": "0.6594765", "text": "def off(self):\n GPIO.output(self.pin, self._off_state)\n return", "title": "" }, { "docid": "21de60da3dd27dbf2349b1b5a9b88c8e", "score": "0.65764725", "text": "async def async_turn_off(self, **kwargs):\n if self._switch_type == \"ir_mode\":\n _LOGGER.debug(\"Turning off IR\")\n await self.upv.set_camera_ir(self._device_id, self._ir_off_cmd)\n elif self._switch_type == \"status_light\":\n _LOGGER.debug(\"Changing Status Light to Off\")\n await self.upv.set_device_status_light(\n self._device_id, False, self._device_type\n )\n elif self._switch_type == \"hdr_mode\":\n _LOGGER.debug(\"Turning off HDR mode\")\n await self.upv.set_camera_hdr_mode(self._device_id, False)\n elif self._switch_type == \"high_fps\":\n _LOGGER.debug(\"Turning off High FPS mode\")\n await self.upv.set_camera_video_mode_highfps(self._device_id, False)\n elif self._switch_type == \"light_motion\":\n _LOGGER.debug(\"Turning off Light Motion detection\")\n await self.upv.light_settings(self._device_id, TYPE_RECORD_OFF)\n elif self._switch_type == \"light_dark\":\n _LOGGER.debug(\"Turning off Light Motion when Dark\")\n await self.upv.light_settings(self._device_id, TYPE_RECORD_OFF)\n else:\n _LOGGER.debug(\"Turning off Recording\")\n await self.upv.set_camera_recording(self._device_id, TYPE_RECORD_NEVER)\n await self.protect_data.async_refresh(force_camera_update=True)", "title": "" }, { "docid": "cdbded2ddbac041573262b251e7c969f", "score": "0.6566721", "text": "def turn_off(self, **kwargs: Any) -> None:\n self.set_operation_mode(\"off\")", "title": "" }, { "docid": "d4bc58cee7d6ee1e27c4bd7e552dad4d", "score": "0.65613025", "text": "def turnOffMotors(self):\n self.mh.getMotor(1).run(Adafruit_MotorHAT.RELEASE)\n self.mh.getMotor(2).run(Adafruit_MotorHAT.RELEASE)\n self.mh.getMotor(3).run(Adafruit_MotorHAT.RELEASE)\n self.mh.getMotor(4).run(Adafruit_MotorHAT.RELEASE)", "title": "" }, { "docid": "dccecb61d5187e27a7b4d146a07a7a2f", "score": "0.6556146", "text": "def stop_LEDs(CHS):\n global leds\n\n [led.off() for led in leds.values()]", "title": "" }, { "docid": "6de8625b3b390a09a474b71d42303a20", "score": "0.65482277", "text": "def turnOff(self, verbose=False):\n if verbose:\n print(_C.BOLD + _C.YEL + '----- Turning off ----' + _C.ENDC)\n self.setCurrentLimit(current=0, verbose=verbose)\n\n # self.gpio.disable(1, verbose=verbose)\n # self.gpio.disable(2, verbose=verbose)\n\n signal = self.serial.send_and_recieve(\n adr=self.adr, cmd=0x01, data=0x0811, length=2, verbose=verbose)\n retval = signal['data']\n if retval != 0:\n if verbose:\n print(_C.RED + 'Error in turn off signal: ' +\n str(retval) + _C.ENDC)\n else:\n if verbose:\n print(_C.LIME + 'Turn off signal OK' + _C.ENDC)\n\n self.getOn(verbose=verbose)\n if verbose:\n print(_C.BOLD + _C.YEL + '----------------------' + _C.ENDC)\n if retval == 0 and not self.engaged:\n return retval\n return retval", "title": "" }, { "docid": "f31d116aaf617183b790044d16366c9b", "score": "0.6531342", "text": "def shutoff(self, *_args):\n try:\n self.tablet.hideWebview()\n print \"Tabletview stopped\"\n except:\n pass", "title": "" }, { "docid": "d433dfe296a7175f004e000e7cbd7f28", "score": "0.6510487", "text": "async def async_turn_off(self, **kwargs):\n self._spa.set_heat_mode(\"Rest\")\n _LOGGER.info(\"Heat Mode changed to %s\", self._spa.get_heat_mode())", "title": "" } ]
e6b1fb77624f011eedc523ee9a96c89f
Inline query handler for the bot. Returns the appropriate shows as a result.
[ { "docid": "4389659bef73bc81b8f1323de6e6796e", "score": "0.7259084", "text": "def inline_query_handler(update, context):\n query_text = update.inline_query.query\n query_id = update.inline_query.id\n\n if not query_text:\n query_text = \"all\"\n\n logging.info(\"Query %s\", query_text)\n query_text = query_text.lower()\n query_args = query_text.split()\n\n keyword_arg = query_args[0]\n quality_arg = query_args[1] if len(query_args) > 1 else None\n input_message_content_arg = query_args[2] if len(query_args) > 2 else None\n answer = []\n\n for show in SHOW_CONFIG:\n if keyword_arg in show[\"keywords\"]:\n if quality_arg:\n try:\n quality = quality_arg\n feed = show[\"quality\"][quality]\n except KeyError:\n continue\n else:\n quality = show[\"default_quality\"]\n feed = show[\"quality\"][quality]\n\n if input_message_content_arg is not None:\n input_message_content = input_message_content_arg.lower() in [\n \"true\",\n \"1\",\n \"yes\",\n ]\n else:\n input_message_content = show[\"input_message_content\"]\n\n if quality == \"yt\":\n answer += get_newest_episode_from_yt_feed(feed)\n else:\n answer += get_newest_episode_from_podcast_feed(\n feed, input_message_content\n )\n\n if len(answer) > 0:\n logging.info(list(map(lambda x: x.video_url, answer)))\n context.bot.answer_inline_query(query_id, answer, cache_time=CACHE_TIME)", "title": "" } ]
[ { "docid": "828ae1b9f8c8e39f5f7957021b31a780", "score": "0.7084049", "text": "async def on_inline_query(self, msg):\n\n async def compute_answer():\n \"\"\"\n Function generating the answer for the handler.\n :return: Lyrics as articles\n \"\"\"\n print(msg)\n return get_lyrics_as_inline_keyboard(msg['query'])\n\n self.answerer.answer(msg, compute_answer)", "title": "" }, { "docid": "af631a08e7e8c40a69cc84d16a16dff8", "score": "0.68604994", "text": "def on_inline_query(self, msg):\n def compute():\n logger.debug(\"Computing inline\")\n queryId, fromId, queryString, offset = telepot.glance(msg, 'inline_query', True)\n offset = int(offset) if offset else 0\n command, msgText = input_parser.parseInlineCommand(queryString.lower())\n\n answer = request_manager.processInline(command, msgText, fromId, offset)\n resultList = []\n for inlineAnswer in answer.answerList:\n resultList.append(dict(type=\"article\", title=inlineAnswer.title, id=inlineAnswer.id_, input_message_content=dict(message_text=inlineAnswer.formattedAnswer, parse_mode=\"HTML\"), thumb_url=inlineAnswer.thumbUrl))\n return {\"results\": resultList, \"cache_time\": answer.cacheTime, \"is_personal\": answer.isPersonal, \"next_offset\": answer.nextOffset}\n \n self._answerer.answer(msg, compute)", "title": "" }, { "docid": "4f96afd68aa99b952ed34e2547670ca3", "score": "0.6734146", "text": "def inlinequery(update: Update, context: CallbackContext) -> None:\n query = update.inline_query.query\n\n results = [\n InlineQueryResultArticle(\n id=str(uuid4()),\n title=\"Bot API response\",\n description=\"@responseJSONbot\",\n input_message_content=InputTextMessageContent(f\"{update}\")),\n InlineQueryResultArticle(\n id=str(uuid4()),\n title=\"About\",\n description=\"@responseJSONbot\",\n url=\"https://t.me/theostrich\",\n input_message_content=InputTextMessageContent(f\"{update}\"),\n ),\n ]\n\n update.inline_query.answer(results)", "title": "" }, { "docid": "5336d6eabcf2ad7d1e70261c7a6fe426", "score": "0.6485323", "text": "def handle_inline(inline_query):\n if not nekowat.is_allowed(inline_query.from_user.id):\n nekowat.answer_inline_query(inline_query.id, [])\n return\n\n # Normalize expression\n expression = inline_query.query.lower().strip()\n\n if not expression:\n # Get all images\n wats = nekowat.get_all_wats()\n\n else:\n # Get by expression\n wats = nekowat.get_wats_by_expression(expression)\n\n try:\n responses = []\n\n for index, wat in enumerate(wats):\n r = telebot.types.InlineQueryResultCachedPhoto(\n str(index),\n # Get smallest file for inline reply\n wat['file_ids'][0],\n parse_mode='' # Workaround for Telegram API error\n )\n\n responses.append(r)\n\n nekowat.answer_inline_query(inline_query.id, responses)\n\n except Exception as e:\n print(e)", "title": "" }, { "docid": "f52de476d06867031d83db5d700b8298", "score": "0.6398506", "text": "def inlinequery(update, context):\n user = update.inline_query.from_user['username']\n query = ''.join(c for c in update.inline_query.query if c.isalnum() or c is ' ')\n if query.strip() != '':\n logger.info(f\"User @{user} searched \\\"{query}\\\"\")\n query_results = bt.ask_mw_thesaurus(query)\n update.inline_query.answer(query_results)", "title": "" }, { "docid": "fbd09c26f02a8000ccb05d045af8bda1", "score": "0.6204197", "text": "def query(self):\n # gets the query from the user or parameters\n h, r, t = self.capture_query()\n\n # get ranks for the query\n h_ranked, r_ranked, t_ranked = self.get_ranks(h, r, t)\n\n # output to terminal if query was made non-programatically\n self.show_title(h, r, t)\n self.show_results(h_ranked, \"( _, r, t )\")\n self.show_results(r_ranked, \"( h, _, t )\", 5)\n self.show_results(t_ranked, \"( h, r, _ )\")\n\n # retruns ranks for programatically evokeed queries\n return h_ranked, r_ranked, t_ranked", "title": "" }, { "docid": "352c57be08173ae4df56de3b833f5106", "score": "0.60998756", "text": "def query():\n\n\t# Getting the query text\n\tuser_text = request.form.get(\"description\")\n\n\n\tdb = MySQLdb.connect(\"mysql-server\", \"root\", \"secret\", \"mydb\")\n\tcursor = db.cursor()\n\n\n\t# Default for before a user enters their own query \n\tif user_text is None:\n\t\tuser_query = \"SELECT ncid FROM voters LIMIT 1\"\n\t\tuser_answer = cursor.execute(user_query)\n\n\n\t# Running the query \n\telse:\n\t\tuser_answer = cursor.execute(user_text)\n\t\t\n\t\n\tuser_answer_table = cursor.fetchall()\t\n\n\treturn render_template('query.html', query_result=user_answer_table)", "title": "" }, { "docid": "21eda1c8ed93759ea4b0bc14afb1949e", "score": "0.6069543", "text": "def query():\n return render_template('query_page.html')", "title": "" }, { "docid": "45d5e6bb68a8b7405c430aa2706999a1", "score": "0.5980653", "text": "async def compute_answer():\n print(msg)\n return get_lyrics_as_inline_keyboard(msg['query'])", "title": "" }, { "docid": "830dda012b68f01b73e4e56cd6b75608", "score": "0.5975844", "text": "def answer(self, query):\n pass", "title": "" }, { "docid": "f1f6667dba2d437cadcbe0220e9d8a97", "score": "0.5960328", "text": "def query(self):\n # Clear scrolled text\n self.query_result.delete(1.0, END)\n\n # Check textboxes are not none\n assert self.user_id_txt.get() != None\n assert self.query_limit_txt.get() != None\n\n # Get user id and query limit from textboxes\n user_id = int(self.user_id_txt.get())\n query_limit = int(self.query_limit_txt.get())\n\n # Make query to recommender system.\n recommendation = self.system.query(user_id, query_limit)\n\n # Add header\n recommendation = \"Recommendations for user with id: {}\\n************************************\\n\\n\".format(user_id) + recommendation\n\n # Put text on ScrolledText area\n self.query_result.insert(INSERT, recommendation)", "title": "" }, { "docid": "dec9e986ab9a389da997ed46d5aa6424", "score": "0.59457755", "text": "def query(self):\n pass", "title": "" }, { "docid": "eae085f793541d9cb4eda76d41fdd8d4", "score": "0.5884021", "text": "def query(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"query\")", "title": "" }, { "docid": "eae085f793541d9cb4eda76d41fdd8d4", "score": "0.5884021", "text": "def query(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"query\")", "title": "" }, { "docid": "eae085f793541d9cb4eda76d41fdd8d4", "score": "0.5884021", "text": "def query(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"query\")", "title": "" }, { "docid": "cd94726044815da2b4cf03f1bc2d2696", "score": "0.58514106", "text": "def echo(update, context):\n query = update.message.text\n update.message.reply_text(\"I got query : \" + query)", "title": "" }, { "docid": "760ef48781cab03fd0e877db0fa94e96", "score": "0.58022505", "text": "async def show_result(self, ctx: Context):\n self.embed = discord.Embed()\n string = None\n\n if self.__result.status == AmadeusPromptStatus.INPUT_GIVEN:\n string = await s.get_string(ctx, \"amadeusPromptStatus\", \"INPUT_GIVEN\")\n elif self.__result.status == AmadeusPromptStatus.TIMEOUT:\n string = await s.get_string(ctx, \"amadeusPromptStatus\", \"TIMEOUT\")\n elif self.__result.status == AmadeusPromptStatus.CANCELLED:\n string = await s.get_string(ctx, \"amadeusPromptStatus\", \"CANCELLED\")\n elif self.__result.status == AmadeusPromptStatus.SHOWN:\n string = await s.get_string(ctx, \"amadeusPromptStatus\", \"SHOWN\")\n elif self.__result.status == AmadeusPromptStatus.NEW:\n string = await s.get_string(ctx, \"amadeusPromptStatus\", \"NEW\")\n if string is not None:\n self.embed.title = string.string\n await self.__prepare_footer(ctx)\n self.__result.message = await self.__result.message.edit(embed=self.embed)", "title": "" }, { "docid": "5bd427cb18b729d125b8f33b57b4212d", "score": "0.579944", "text": "def query(self, *args, **kwargs):\n with result_manager(200, self._ResourcePB()) as result:\n self.__query(result, *args, **kwargs)\n return result", "title": "" }, { "docid": "840e3cabfa7f44b4785e1d3ab9f8c2dd", "score": "0.5762502", "text": "def query(self, widget):\r\n query_str = self._frames['table'].get_entry_text()\r\n self._controller.query(query_str)", "title": "" }, { "docid": "90ec86570207f4c34eaaf30a8fa4f26d", "score": "0.56842554", "text": "def __call__(self, query=None):\n\n console = Console()\n\n topics = list(self.topics.keys())\n if query:\n results = self.topicembed.search(query, 10)\n else:\n results = [(x, 1.0) for x in range(10)]\n\n for uid, score in results:\n if score >= 0.1:\n topic = topics[uid]\n console.print(f\"[bright_green]{topic}[/bright_green]\")\n\n # Print example question\n query = f\"select id, question from txtai where similar('{topic}')\"\n result = self.embeddings.search(query, 1)[0]\n console.print(f\"{result['question']} ({result['id']})\\n\")", "title": "" }, { "docid": "46b374e6d870639218135517235746bd", "score": "0.56692916", "text": "def _query(self, *args, **kwargs):", "title": "" }, { "docid": "a4b875875a548d52f2ed9b26a3a0f975", "score": "0.5660484", "text": "def show_result(self, *args, **kwargs):\n pass", "title": "" }, { "docid": "e65c5fb6c649f66c26ddc17c3a394a1a", "score": "0.5652235", "text": "def query(self, **kwargs):\n result = self._get(path='/do/query', params=kwargs)\n return result.get('result')", "title": "" }, { "docid": "17d628801d237a1ab94310a8050973cc", "score": "0.56499946", "text": "def query(self):\n return self.details[KEY_QUERY]", "title": "" }, { "docid": "cb57fff35d066667534bfaaba7c96944", "score": "0.56292784", "text": "def query(self,query):\n # create request body\n req_body = {\n \"query\":query\n }\n # create request\n req = Request(\n method = \"post\",\n endpoint='/streamapps/query/{}'.format(self.name),\n data=json.dumps(req_body)\n )\n # create response handler\n def response_handler(resp):\n if resp.is_success is True:\n return resp.body\n print(resp.body)\n return False\n # call the api\n return self._execute(req,response_handler)", "title": "" }, { "docid": "2c4367d919750741099bc25df0dba7e1", "score": "0.561507", "text": "def display_query(project, version, name):\n return query_service.get_versioned_query_as_html(project, version, name)", "title": "" }, { "docid": "8dffa5ee30dccac5128edcdff46251a5", "score": "0.5611329", "text": "def execute(self):\n return SIAResults(self.execute_votable(), url=self.queryurl, session=self._session)", "title": "" }, { "docid": "4c68b792c3244bfd1149a61c30396a27", "score": "0.5603586", "text": "def show_results(qry):\n\n result = runQuery(qry)\n resLen = len(result)\n\n # From this point, we need to make everything look pretty for the screen.\n print('<results>')\n print('&nbsp;Imagine a pretty nifty list of devices here with links and all that.<br/>&nbsp;Here is what we know based on our current database:<br/><br/>')\n if DEBUGGING:\n print('Query as received: {}\\n'.format(qry))\n print('Result Length: {}\\n'.format(resLen))\n if resLen > 0:\n print('<table><tr><th>Brand</th><th>Model</th><th>Form</th><th>Digital<br/>Modes</th><th>MSRP</th><th>Vendor</th></tr>')\n for rig in result:\n model, brand, form, mode, msrp, vlink = rig[0], rig[1], rig[2], rig[3], rig[4], rig[5]\n print(f\"<tr><td>&nbsp;{brand}&nbsp;</td><td>&nbsp;{model}&nbsp;</td><td>&nbsp;{form}&nbsp;</td><td>&nbsp;{mode}&nbsp;</td><td>&nbsp;${msrp}&nbsp;</td><td>&nbsp;<a href='{vlink}'>Link</a>&nbsp;</td></tr>\")\n print('</table>')\n else:\n print('<br/>There were no devices matching the criteria selected.')\n print('</results>')", "title": "" }, { "docid": "2c1ca06021ca8d303157265c776be5e2", "score": "0.5593014", "text": "def query(self, query):\n pass", "title": "" }, { "docid": "619f655bc8ae7eb5d09db804deca07a2", "score": "0.559074", "text": "def execute(self):\n return SSAResults(self.execute_votable(), self.getqueryurl())", "title": "" }, { "docid": "c99fe403dcc8298c1c06e058128763dd", "score": "0.5555807", "text": "def query(self) -> pulumi.Output[Optional[str]]:\n return pulumi.get(self, \"query\")", "title": "" }, { "docid": "ca7d1851b556d85bce09b4e763a22c9e", "score": "0.55321777", "text": "def query(self,name):\n return self._canonical[\"queries\"][name]", "title": "" }, { "docid": "f5d3f6e37565f250fb26276e27ad1489", "score": "0.5527054", "text": "def execute(self):\n return DalResults(self.execute_votable(), self.getqueryurl())", "title": "" }, { "docid": "7b32397366baf7a9b8a4da6ce2f8dd5d", "score": "0.5520517", "text": "def query(env):\n if not app.config['ENABLE_QUERY']:\n log.warning('Access to query interface disabled by administrator.')\n abort(403)\n\n envs = environments()\n if env != app.config['DEFAULT_ENVIRONMENT']:\n check_env(env, envs)\n\n form = QueryForm(meta={\n 'csrf_secret': app.config['SECRET_KEY'],\n 'csrf_context': session}\n )\n\n if form.validate_on_submit():\n if form.endpoints.data not in ENABLED_QUERY_ENDPOINTS:\n log.warning('Access to query endpoint %s disabled by administrator.',\n form.endpoints.data)\n abort(403)\n\n query = form.query.data.strip()\n\n # automatically wrap AST queries with [], if needed\n if form.endpoints.data != 'pql' and not query.startswith('['):\n query = f\"[{query}]\"\n\n try:\n result = get_or_abort_except_client_errors(\n puppetdb._query,\n form.endpoints.data,\n query=query)\n\n zero_results = (len(result) == 0)\n result = result if not zero_results else None\n\n if form.rawjson.data:\n # for JSON view pass the response from PuppetDB as-is\n return render_template('query.html',\n form=form,\n zero_results=zero_results,\n result=result,\n columns=None,\n envs=envs,\n current_env=env)\n else:\n # for table view separate the columns and the rows\n rows = []\n if not zero_results:\n columns = result[0].keys()\n for items in result:\n rows.append(list(items.values()))\n else:\n columns = []\n\n return render_template('query.html',\n form=form,\n zero_results=zero_results,\n result=rows,\n columns=columns,\n envs=envs,\n current_env=env)\n\n except HTTPError as e:\n error_text = e.response.text\n return render_template('query.html',\n form=form,\n error_text=error_text,\n envs=envs,\n current_env=env)\n\n return render_template('query.html',\n form=form,\n envs=envs,\n current_env=env)", "title": "" }, { "docid": "ba08aa1fcb16f0ebfd549bbbdb59bec7", "score": "0.54864115", "text": "def answerInlineQuery(\n self,\n inline_query_id: str,\n results: List[InlineQueryResult],\n cache_time: int = None,\n is_personal: bool = None,\n next_offset: str = None,\n switch_pm_text: str = None,\n switch_pm_parameter: str = None,\n ):\n kwargs = {k:v for k,v in locals().items() if k!='self' and v!=None}\n return self(\"answerInlineQuery\", kwargs)", "title": "" }, { "docid": "072ea70fb6d1fa1171354dbae434904e", "score": "0.5484513", "text": "def query(self) -> str:\n\n if len(self._raw) == 1: # string query\n return self._raw[0]\n\n # normal\n query_1, op, query_2 = self._raw\n\n return op.format(\n query_1.query,\n query_2.query if isinstance(query_2, Query) else query_1._callback(query_2),\n )", "title": "" }, { "docid": "285223688f08c60eef80defa013d16c7", "score": "0.5469835", "text": "def query(self,*args, **kwargs):\n\n return self.database.query(*args, **kwargs)", "title": "" }, { "docid": "72b0a2e8ef2dc87b780b0870d272aa00", "score": "0.5461389", "text": "def query(self, **options):\n\n return self._get(category='analysis', resource='query', subcategory='variant', **options)", "title": "" }, { "docid": "128842ea9e6116aafff11fa93a23b635", "score": "0.5443271", "text": "def query(request, *args,**kwargs):\n l = livestatus(request)\n return l.query(*args, **kwargs)", "title": "" }, { "docid": "5fbdc1c2f82cf790e0375704e7cf3fc1", "score": "0.54014784", "text": "def query(self,command):\n cmd=\"Q\"+command\n s=self.__query_string(cmd)\n return s", "title": "" }, { "docid": "7fd4be8558155a55f7c6be2c18f82835", "score": "0.5395507", "text": "def show_hike_result():\n \n hike_id = request.args.get(\"hikeId\")\n result = HikeResult.query.filter((HikeResult.hike_id == hike_id) &\n (HikeResult.canceled_by_user == False)).first()\n if result:\n trail = Hike.query.filter_by(hike_id=hike_id).first()\n trail_id = trail.trail_id\n trail_details=Trail.query.filter_by(trail_id=trail_id).first()\n ascent_from_enum = str(result.ascent_rating).split('.')[1]\n ascent_from_enum = ascent_from_enum.replace(\"_\", \" \")\n distance_from_enum = str(result.distance_rating).split('.')[1]\n distance_from_enum = distance_from_enum.replace(\"_\", \" \")\n challenge_from_enum = str(result.challenge_rating).split('.')[1]\n challenge_from_enum = challenge_from_enum.replace(\"_\", \" \")\n result_details = {'name': trail_details.trail_name,\n 'summary': trail_details.description,\n 'difficulty': trail_details.difficulty,\n 'loc': trail_details.location,\n 'lat': trail_details.latitude,\n 'lng': trail_details.longitude,\n 'len': trail_details.distance_in_miles,\n 'asc': trail_details.total_ascent,\n 'dsc': trail_details.total_descent,\n 'date': trail_details.status_at,\n 'status': trail_details.status,\n 'details': trail_details.status_details,\n 'assessment': result.assessment,\n 'distance': result.distance_in_miles,\n 'hikedOn': result.hiked_on.strftime(\"%A %B %d, %Y\"),\n 'ascentRating': ascent_from_enum.lower(),\n 'distanceRating': distance_from_enum.lower(),\n 'challengeRating': challenge_from_enum.lower(),\n 'hikeTime': result.hike_time}\n return jsonify(result_details)\n else:\n return 'Add Hike Result'", "title": "" }, { "docid": "f6e9b6b218585ae6671408ac5969262e", "score": "0.53820723", "text": "def echo(update, context):\r\n reply_markup = None\r\n text = update.message.text\r\n total = len(monster_names(text))\r\n if total == 0:\r\n message = '查無符合名稱的魔物!'\r\n elif total == 1:\r\n message = render_info(text, 'fuzzy')\r\n elif total <= 10:\r\n reply_markup = InlineKeyboardMarkup([\r\n [InlineKeyboardButton('模糊搜尋', callback_data = 'render {} fuzzy 0'.format(text))],\r\n [InlineKeyboardButton('精準搜尋', callback_data = 'query {} precise 0'.format(text))]\r\n ])\r\n message = '符合結果共 {}筆,請選擇搜尋方式:'.format(total)\r\n else:\r\n reply_markup = InlineKeyboardMarkup([[InlineKeyboardButton('精準搜尋', callback_data = 'query {} precise 0'.format(text))]])\r\n message = '符合筆數過多({}筆),只支援精準搜索:'.format(total)\r\n update.message.reply_text(message, reply_markup = reply_markup)", "title": "" }, { "docid": "e987cea1b2db337b0284c45269b6c83e", "score": "0.5381155", "text": "def show_query_results(query, format):\n print(\"\\n Results of:\" + query)\n\n for i in base.execute(query).fetchall() :\n print(format % i )\n print(\"-*\"*40)", "title": "" }, { "docid": "777b09e83a27fdd302c2937c0ef5f1f2", "score": "0.5365776", "text": "def _query_button_fired(self): \n\n # debug print\n logging.debug(\"QUERY button pressed\")\n self.lastlog_string = \"QUERY button pressed\"\n\n # get required prefixes\n prefixes = \"\"\n if self.query_rdf_prefix:\n prefixes += RDF_PREFIX\n if self.query_rdfs_prefix:\n prefixes += RDFS_PREFIX\n if self.query_owl_prefix:\n prefixes += OWL_PREFIX\n if self.query_ns_prefix:\n prefixes += NS_PREFIX\n\n # multilevel\n multilevel = self.query_multilevel\n\n if not multilevel:\n\n # read the required value\n l = self.query_level_int \n \n # retrieve URI related to the query\n # execute the sparql query\n uri_list = []\n if self.query_reification:\n uri_list = self.kp.custom_query(q_reification)\n else:\n if len(self.query_string) > 0:\n uri_list = self.kp.custom_query(prefixes + self.query_string)\n\n # move objects!\n self.redraw(uri_list, l)\n\n else:\n\n # retrieve URI related to the query\n # execute the sparql query\n uri_list = []\n if self.query_reification:\n print \"here\"\n uri_list = self.kp.custom_multilevel_query(q_reification)\n print \"here\"\n print uri_list\n else:\n if len(self.query_string) > 0:\n uri_list = self.kp.custom_multilevel_query(prefixes + self.query_string)\n \n # move objects!\n level_counter = 0\n for level in uri_list:\n\n level_counter += 1\n self.redraw(level, level_counter)", "title": "" }, { "docid": "1ea855d78f95a8cc7e491cc1dc42b888", "score": "0.53617865", "text": "async def showall(self, ctx: commands.Context):\n self.emojis = {str(e.id): e for e in self.bot.emojis}\n\n guild: discord.Guild = ctx.guild\n formatted = []\n async with self.config.guild(ctx.guild).wiggle() as wigglelist:\n for userid, emojiids in wigglelist.items():\n user: discord.Member = guild.get_member(int(userid))\n emojis: List[discord.Emoji] = [self.emojis[str(e)] for e in emojiids]\n line = f\"{' '.join([str(e) for e in emojis])} for {user.display_name}\"\n formatted.append(line)\n # await ctx.send(line)\n\n formatted = \"\\n\".join(formatted)\n pages = list(pagify(formatted))\n await menu(ctx, pages, DEFAULT_CONTROLS)\n # embedded_response = discord.Embed(\n # title=f\"Wiggle Emoji for {ctx.guild.name}\",\n # type=\"rich\",\n # description=formatted,\n # )\n # embedded_response = embed.randomize_colour(embedded_response)\n # await ctx.send(embed=embedded_response)", "title": "" }, { "docid": "99216a8ba8d19ebcdfc22209dac2f831", "score": "0.53615046", "text": "def query(self,input_query):\n reply = self.open.query(str(input_query))\n return str(reply)", "title": "" }, { "docid": "a250379d268919cf99c47c6399552818", "score": "0.5361407", "text": "def ng_query(self, request, *args, **kwargs):\n return self.build_json_response(self.get_query())", "title": "" }, { "docid": "68073b38e65b556a77f7eada990359e4", "score": "0.53519374", "text": "def query(self):\r\n raise RuntimeError(\"Must implement query!\")", "title": "" }, { "docid": "5a160dcd064bd666e48a4c031cf3f321", "score": "0.53504527", "text": "def query_data(instance_id, database_id):\n spanner_client = spanner.Client()\n instance = spanner_client.instance(instance_id)\n database = instance.database(database_id)\n\n results = database.execute_sql(\n 'SELECT SingerId, AlbumId, AlbumTitle FROM Albums')\n\n for row in results:\n print(u'SingerId: {}, AlbumId: {}, AlbumTitle: {}'.format(*row))", "title": "" }, { "docid": "edb4f6e9b28feb6432154e3cab9e2f0e", "score": "0.5350157", "text": "def query(self, query_path):\n\t\t\n\t\tresponse = ''\n\t\tclient = httplib2.Http()\n\t\t\n\t\ttry:\n\t\t\tif self.server_os_type == NIX_ARCH:\n\t\t\t\tquery = '%s/%s/video/embed?botId=%s&video=&botnet=&' % (self.api_url, self.api_token, query_path)\n\t\t\t\n\t\t\telif self.server_os_type == WIN_ARCH:\n\t\t\t\tquery = '%s/%s/video/embed?botId=&video=%s&botnet=&' % (self.api_url, self.api_token, query_path)\n\t\t\t\n\t\t\telse:\n\t\t\t\tquery = '%s/%s/video/embed?botId=%s&video=&botnet=&' % (self.api_url, self.api_token, query_path)\n\t\t\t\n\t\t\tresponse = client.request(query)\n\t\t\t\n\t\texcept BaseException as e:\n\t\t\tprint '[X] %s' % (e.message)\n\t\t\t\n\t\tif response:\n\t\t\tresponse_body = response[1]\n\t\t\tpattern = re.compile( r'.*src=\"(.*?)\"')\n\t\t\tmatched = pattern.match(response_body)\n\t\t\t\n\t\t\tif matched:\n\t\t\t\tdata = matched.group(1)\n\t\t\t\tindex = stricmp(data, self.emit_data)\n\t\t\t\t\n\t\t\t\tif self.server_os_type == WIN_ARCH:\n\t\t\t\t\tindex -= 1\n\t\t\t\t\t\n\t\t\t\treturn data[index:]\n\t\t\t\t\n\t\treturn False", "title": "" }, { "docid": "50cadc0ce961aa7c516ed6aa1a0011cb", "score": "0.53350765", "text": "def query(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"query\")", "title": "" }, { "docid": "50cadc0ce961aa7c516ed6aa1a0011cb", "score": "0.53350765", "text": "def query(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"query\")", "title": "" }, { "docid": "50cadc0ce961aa7c516ed6aa1a0011cb", "score": "0.53350765", "text": "def query(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"query\")", "title": "" }, { "docid": "50cadc0ce961aa7c516ed6aa1a0011cb", "score": "0.53350765", "text": "def query(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"query\")", "title": "" }, { "docid": "41c1df197145e76af49b99e344b3e5ac", "score": "0.5322119", "text": "def query_interface_run_query(\n *,\n db: Session = Depends(deps.get_db),\n resource_id: int,\n page: int = 0,\n page_size: int = 25,\n current_user: models.User = Depends(interface_read_validator),\n) -> List[Dict[str, Any]]:\n query = crud.query.get(db=db, id=resource_id)\n if not query:\n raise HTTPException(status_code=404, detail=\"Cannot find query.\")\n query_result = crud.query.run_query(\n db=db, id=resource_id, page=page, page_size=page_size\n )\n return jsonable_encoder(query_result)", "title": "" }, { "docid": "a2c70b6faf4c45d24435240deeca3867", "score": "0.53123814", "text": "def query(self):\n return self.__structure.query", "title": "" }, { "docid": "b8fdda858db01535049af578b9dd4a7b", "score": "0.5310502", "text": "def query_list(self) -> 'outputs.QueryListResponse':\n return pulumi.get(self, \"query_list\")", "title": "" }, { "docid": "b8fdda858db01535049af578b9dd4a7b", "score": "0.5310502", "text": "def query_list(self) -> 'outputs.QueryListResponse':\n return pulumi.get(self, \"query_list\")", "title": "" }, { "docid": "b8fdda858db01535049af578b9dd4a7b", "score": "0.5310502", "text": "def query_list(self) -> 'outputs.QueryListResponse':\n return pulumi.get(self, \"query_list\")", "title": "" }, { "docid": "b8fdda858db01535049af578b9dd4a7b", "score": "0.5310502", "text": "def query_list(self) -> 'outputs.QueryListResponse':\n return pulumi.get(self, \"query_list\")", "title": "" }, { "docid": "d524d922e5345b176ce3ff3cb559a9a9", "score": "0.53021187", "text": "def callback_query(call):\n chat_id = call.from_user.id\n weekdays = [\"Monday\", \"Tuesday\", \"Wednesday\",\n \"Thursday\", \"Friday\", \"Saturday\", \"Sunday\"]\n\n if call.data == \"hafta_kun\":\n keyboard_l = types.InlineKeyboardMarkup()\n btn1 = types.InlineKeyboardButton(\"Dushanba\", callback_data=\"Monday\")\n btn2 = types.InlineKeyboardButton(\"Seshanba\", callback_data=\"Tuesday\")\n btn3 = types.InlineKeyboardButton(\"Chorshanba\", callback_data=\"Wednesday\")\n btn4 = types.InlineKeyboardButton(\"Payshanba\", callback_data=\"Thursday\")\n btn5 = types.InlineKeyboardButton(\"Juma\", callback_data=\"Friday\")\n btn6 = types.InlineKeyboardButton(\"Shanba\", callback_data=\"Saturday\")\n btn7 = types.InlineKeyboardButton(\"Yakshanba\", callback_data=\"Sunday\")\n btn8 = types.InlineKeyboardButton(\"🔜Keyingi bosqich\", callback_data=\"next\")\n back = types.InlineKeyboardButton(\"🔙Ortga\", callback_data=\"back\")\n keyboard_l.add(btn1, btn2)\n keyboard_l.add(btn3, btn4)\n keyboard_l.add(btn5, btn6, btn7)\n keyboard_l.add(back, btn8)\n msg = bot.send_message(chat_id,\n \"🗓 Xabarnoma jo\\'natilishi kerak bo\\'lgan hafta kuni(lari)ni belgilang.\",\n parse_mode='html', reply_markup=keyboard_l)\n bot.answer_callback_query(call.id, \"Endi xabarnomani jo\\'natish vaqtini belgilang\")\n bot.delete_message(call.message.chat.id, call.message.message_id)\n\n elif call.data == \"only_one_date\":\n markup = types.ReplyKeyboardMarkup(resize_keyboard=True)\n markup.row('🔙Ortga')\n msg = bot.send_message(chat_id,\n \"🎯 Xabarnomani jo\\'natish uchun sanani yuboring. \\nSanani namunadagi ko\\'rinishda yuboring:\\n✅ Namuna: <b>16</b>\",\n parse_mode='html',reply_markup=markup)\n bot.answer_callback_query(call.id, \"Endi xabarnomani jo\\'natish sanasini yuboring\")\n bot.delete_message(call.message.chat.id, call.message.message_id)\n bot.register_next_step_handler(msg, data_date_step)\n\n\n elif call.data == \"only_one\":\n markup = types.ReplyKeyboardMarkup(resize_keyboard=True)\n markup.row('🔙Ortga')\n msg = bot.send_message(chat_id,\n \"🎯 Xabarnomani jo\\'natish uchun sanani yuboring. \\nSanani namunadagi ko\\'rinishda yuboring:\\n✅ Namuna: <b>27.09.2020</b>\",\n parse_mode='html',reply_markup=markup)\n bot.answer_callback_query(call.id, \"Endi xabarnomani jo\\'natish sanasini yuboring\")\n bot.delete_message(call.message.chat.id, call.message.message_id)\n bot.register_next_step_handler(msg, data_sana_step) \n\n elif call.data == \"one_back\":\n markup = types.ReplyKeyboardMarkup(resize_keyboard=True)\n markup.row('📣 Yangi xabarnomani qo\\'shish')\n markup.row('🔕 O\\'rnatilgan xabarnomani o\\'chirish')\n bot.answer_callback_query(call.id, \"Ortga\")\n bot.send_message(chat_id, \"<i>Men </i>Groot<i>man 🌱</i>\",\n reply_markup=markup, parse_mode=\"html\")\n bot.delete_message(call.message.chat.id, call.message.message_id)\n\n\n\n elif call.data == \"back\":\n \n bot.answer_callback_query(call.id, \"Ortga\")\n keyboard_l = types.InlineKeyboardMarkup()\n btn1 = types.InlineKeyboardButton(\"🗓 Har hafta\", callback_data=\"hafta_kun\")\n btn2 = types.InlineKeyboardButton(\"📅 Har oyda\", callback_data=\"only_one_date\")\n btn3 = types.InlineKeyboardButton(\"🔂 Bir martalik\", callback_data=\"only_one\")\n back = types.InlineKeyboardButton(\"🔙Ortga\", callback_data=\"back\")\n keyboard_l.add(btn1)\n keyboard_l.add(btn2)\n keyboard_l.add(btn3)\n keyboard_l.add(back)\n msg = bot.send_message(call.from_user.id, \"🎯 Xabarnoma turini tanlang\",\n reply_markup=keyboard_l)\n bot.delete_message(call.message.chat.id, call.message.message_id)\n\n elif call.data in weekdays:\n if call.data == \"Monday\":\n bot.answer_callback_query(call.id, \"Dushanba kuni belgilandi\")\n if call.data == \"Tuesday\":\n bot.answer_callback_query(call.id, \"Seshanba kuni belgilandi\")\n if call.data == \"Wednesday\":\n bot.answer_callback_query(call.id, \"Chorshanba kuni belgilandi\")\n if call.data == \"Thursday\":\n bot.answer_callback_query(call.id, \"Payshanba kuni belgilandi\")\n if call.data == \"Friday\":\n bot.answer_callback_query(call.id, \"Juma kuni belgilandi\")\n if call.data == \"Saturday\":\n bot.answer_callback_query(call.id, \"Shanba kuni belgilandi\")\n if call.data == \"Sunday\":\n bot.answer_callback_query(call.id, \"Yakshanba kuni belgilandi\") \n \n\n try:\n session = Session()\n days = session.query(Rejim).filter(Rejim.user_id == chat_id).filter(Rejim.finish == 0).order_by(Rejim.id.desc()).first()\n new_day = days.day\n if new_day is None:\n up_id = session.query(Rejim).filter(Rejim.user_id == chat_id).filter(Rejim.finish == 0).order_by(Rejim.id.desc()).first().id\n session.query(Rejim).filter(Rejim.id == up_id).update({Rejim.day: call.data}, synchronize_session=False)\n \n else:\n new_day = new_day + ' ' + call.data\n up_id = session.query(Rejim).filter(Rejim.user_id == chat_id).filter(Rejim.finish == 0).order_by(Rejim.id.desc()).first().id\n session.query(Rejim).filter(Rejim.id == up_id).update({Rejim.day: new_day}, synchronize_session=False)\n \n \n session.commit()\n session.close()\n\n except Exception as e:\n bot.send_message(config.admin_id, e)\n\n elif call.data == \"next\":\n session = Session()\n days = session.query(Rejim).filter(Rejim.user_id == chat_id).filter(Rejim.finish == 0).order_by(Rejim.id.desc()).first()\n day = days.day\n need_day = day.split()\n kunlar=\"\"\n\n for i in need_day:\n if i == \"Monday\":\n kunlar = kunlar+\"Dushanba \"\n if i == \"Tuesday\":\n kunlar = kunlar+\"Seshanba \"\n if i == \"Wednesday\":\n kunlar = kunlar+\"Chorshanba \"\n if i == \"Thursday\":\n kunlar = kunlar+\"Payshanba \"\n if i == \"Friday\":\n kunlar = kunlar+\"Juma \"\n if i == \"Saturday\":\n kunlar = kunlar+\"Shanba \"\n if i == \"Sunday\":\n kunlar = kunlar+\"Yakshanba \"\n else:\n kunlar = kunlar+\"\"\n\n\n keyboard_l = types.InlineKeyboardMarkup()\n btn1 = types.InlineKeyboardButton(\"✅ Ha, vaqtni belgilash\", callback_data=\"yes_time\")\n btn2 = types.InlineKeyboardButton(\"🙅‍♂️ Yo\\'q, qayta tanlash\", callback_data=\"back_day\")\n keyboard_l.add(btn1)\n keyboard_l.add(btn2)\n bot.send_message(chat_id, \"<b>\" + kunlar + \"</b>\" + \"\\n\\nYuqoridagi kunlarga xabarnoma o'rnatmoqchimisiz?\",\n parse_mode=\"html\", reply_markup=keyboard_l)\n \n bot.delete_message(call.message.chat.id, call.message.message_id)\n\n session.close()\n \n\n elif call.data == \"back_day\":\n try:\n session = Session()\n up_id = session.query(Rejim).filter(Rejim.user_id == chat_id).filter(Rejim.finish == 0).order_by(Rejim.id.desc()).first().id\n session.query(Rejim).filter(Rejim.id == up_id).update({Rejim.day: \"\"}, synchronize_session=False)\n # session.query(Rejim).filter(Rejim.user_id == chat_id).filter(Rejim.finish == 0).update(\n # {Rejim.day: \"\"}, synchronize_session=False)\n session.commit()\n session.close()\n\n keyboard_l = types.InlineKeyboardMarkup()\n btn1 = types.InlineKeyboardButton(\"Dushanba\", callback_data=\"Monday\")\n btn2 = types.InlineKeyboardButton(\"Seshanba\", callback_data=\"Tuesday\")\n btn3 = types.InlineKeyboardButton(\"Chorshanba\", callback_data=\"Wednesday\")\n btn4 = types.InlineKeyboardButton(\"Payshanba\", callback_data=\"Thursday\")\n btn5 = types.InlineKeyboardButton(\"Juma\", callback_data=\"Friday\")\n btn6 = types.InlineKeyboardButton(\"Shanba\", callback_data=\"Saturday\")\n btn7 = types.InlineKeyboardButton(\"Yakshanba\", callback_data=\"Sunday\")\n btn8 = types.InlineKeyboardButton(\"🔜Keyingi bosqich\", callback_data=\"next\")\n back = types.InlineKeyboardButton(\"🔙Ortga\", callback_data=\"back\")\n keyboard_l.add(btn1, btn2)\n keyboard_l.add(btn3, btn4)\n keyboard_l.add(btn5, btn6, btn7)\n keyboard_l.add(back, btn8)\n msg = bot.send_message(chat_id,\n \"Xabarnomani jo\\'natish kerak bo\\'lgan hafta kunini belgilang.\",\n parse_mode='html', reply_markup=keyboard_l)\n bot.answer_callback_query(call.id, \"Endi xabarnomani jo\\'natish kunini belgilang\")\n\n except Exception as e:\n bot.send_message(config.admin_id, e)\n\n elif call.data == \"yes_time\":\n bot.answer_callback_query(call.id, \"Endi xabarnomani jo\\'natish vaqtini belgilang\")\n markup = types.ReplyKeyboardMarkup(resize_keyboard=True)\n markup.row('🔙Ortga')\n msg = bot.send_message(chat_id,\n \"🕙 Xabarnomani jo\\'natish kerak bo\\'lgan vaqtni namunadagi ko\\'rinishda yuboring\\n\\n<b>✅ Namuna: </b>16:00\",\n parse_mode='html',reply_markup=markup)\n bot.register_next_step_handler(msg, data_time_step)\n bot.delete_message(call.message.chat.id, call.message.message_id)\n\n elif call.data == \"yes_text\":\n bot.answer_callback_query(call.id, \"Endi xabarnoma uchun matnni yuboring\")\n msg = bot.send_message(chat_id,\n \"🎯 Xabarnomada ko'rsatilishi zarur bo\\'lgan xabar matnini yuboring.\",\n parse_mode='html')\n bot.register_next_step_handler(msg, data_text_step)\n bot.delete_message(call.message.chat.id, call.message.message_id)\n\n elif call.data == \"save\":\n session = Session()\n days = session.query(Rejim).filter(Rejim.user_id == chat_id).filter(Rejim.finish == 0).order_by(Rejim.id.desc()).first()\n need_str = days.day\n need = need_str.split(\" \")\n for j in need:\n second = Second_db()\n second.user_id = days.user_id\n second.group_id = days.group_id\n second.day = j\n second.days_id = days.id\n second.time = days.time\n second.text = days.text\n second.finish = 0\n session.add(second)\n\n\n up_id = session.query(Rejim).filter(Rejim.user_id == chat_id).filter(Rejim.finish == 0).order_by(Rejim.id.desc()).first().id\n session.query(Rejim).filter(Rejim.id == up_id).update({Rejim.finish: 1}, synchronize_session=False) \n \n\n session.commit()\n session.close()\n bot.answer_callback_query(call.id, \"🟢 Xabarnoma tartibi o\\'rnatildi\")\n bot.delete_message(call.message.chat.id, call.message.message_id)\n \n\n if (call.data)[0:3] == \"del\":\n try:\n \n bot.answer_callback_query(call.id,\"O\\'chirildi\")\n del_id = int((call.data)[3:])\n session = Session()\n x = session.query(Rejim).filter(Rejim.id == del_id).one()\n session.delete(x)\n session.commit()\n session.close()\n bot.send_message(chat_id,\"Tanlangan xabarnoma o\\'chirildi\")\n bot.delete_message(call.message.chat.id, call.message.message_id)\n except Exception as e:\n bot.send_message(config.admin_id, e)", "title": "" }, { "docid": "e8b139de96f810f0391ec7815df955df", "score": "0.53020424", "text": "def show(**kwargs):\n run_show(**kwargs)", "title": "" }, { "docid": "7b570292a810bdaa78131b43ae860948", "score": "0.52817607", "text": "def demo_aql_search(self):\n logger.info(\"\\n+++++++++++++++++++++++++++++++AQL Search Start+++++++++++++++++++++++++++++++\")\n\n query_doc = ResourceUtility.generate_query_document(types=['dm_sysobject'], columns=['object_name'],\n sorts=[Sort('object_name', True, 'en', True)],\n expression_set=ExpSet('AND', FtExp('emc or rest')))\n logger.info('AQL search for keyword emc and parameters items-per-page=3,page=2,inline=true...')\n\n results = []\n try:\n results = self.client.aql_search(query_doc.dump(), {'items-per-page': '2', 'page': '1', 'inline': 'true'})\n except IOError:\n logger.info('Fail to search with AQL.')\n\n logger.info('Object names in page %d...', 2)\n for result in results.get_entries():\n logger.info(result.get('content').get('properties').get('object_name'))\n\n logger.info('Navigate to next page...')\n results = self.client.next_page(results)\n\n if results is None:\n logger.info('Next page does not exist.')\n else:\n logger.info('Object names in page %d...', 3)\n for result in results.get_entries():\n logger.info(result.get('content').get('properties').get('object_name'))\n\n # add facet definitions in search criteria\n self.step_separator('Facet against attribute {}'.format('r_object_type'))\n query_doc.facet_definitions = [\n FacetDefinition(facet_id='facet_r_object_type', attributes=['r_object_type'])]\n\n try:\n results = self.client.aql_search(query_doc.dump(), {'items-per-page': '2', 'page': '1', 'inline': 'true'})\n except IOError:\n logger.info('Fail to search with AQL.')\n\n logger.info('Facet results:')\n for facet in results.get('facets')[0].get('facet-value'):\n logger.info('group for %s has %s results and the navigation link is %s',\n facet.get('facet-value-constraint'), facet.get('facet-value-count'),\n facet.get('link').get('href'))\n\n logger.info(\"\\n+++++++++++++++++++++++++++++++AQL Search End+++++++++++++++++++++++++++++++\")", "title": "" }, { "docid": "9c0745b7ee32511fc3371acf9e6fcc48", "score": "0.5273036", "text": "def run_query(self, request):\n ok, fields = forms.fetch(request, ['msg0_id', 'msg0_sent', 'msg0_locked', 'msg1_id',\n 'msg1_sent', 'msg1_locked', 'mis0_id', 'mis0_status', 'mis1_id', 'mis1_status'],\n blanks=['msg0_id', 'msg0_sent', 'msg0_locked', 'msg1_id', 'msg1_sent', 'msg1_locked',\n 'mis0_id', 'mis0_status', 'mis1_id', 'mis1_status'])\n if not ok:\n return {'error': 'Bad parameters.'}\n # For each message with a non-empty ID, create a subquery that returns only the count of that message type\n # with the given parameters.\n # e.g., (select count(*) from messages where messages.user_id=users.user_id and messages.msg_type=\"MSG_OBELISK01a\") as msg0\n # We'll simultaneously build a criteria_list that will be used to check the outputs of each subquery.\n # e.g., 'msg0=1 and msg1=1'\n subquery_list = ''\n criteria_list = ''\n for i in range(2):\n msg_type = fields['msg%d_id' % i]\n if msg_type != '':\n if subquery_list != '':\n subquery_list += ',\\n'\n criteria_list += ' and '\n subquery_list += '(select count(*) from messages where messages.user_id=users.user_id and messages.msg_type=\"%s\"' % (msg_type)\n if fields['msg%d_locked' % i] == 'TRUE':\n subquery_list += ' and messages.locked=1'\n elif fields['msg%d_locked' % i] == 'FALSE':\n subquery_list += ' and messages.locked=0'\n if fields['msg%d_sent' % i] == 'TRUE':\n criteria_list += 'msg%d=1' % (i)\n else:\n criteria_list += 'msg%d=0' % (i)\n subquery_list += ') as msg%d' % (i)\n # Append subqueries and subquery criteria for missions.\n for i in range(2):\n mis_def = fields['mis%d_id' % i]\n if mis_def != '':\n if subquery_list != '':\n subquery_list += ',\\n'\n criteria_list += ' and '\n subquery_list += '(select count(*) from missions where missions.user_id=users.user_id and missions.mission_definition=\"%s\"' % (mis_def)\n if fields['mis%d_status' % i] == 'STARTED':\n subquery_list += ' and missions.done=0'\n criteria_list += 'mis%d=1' % (i)\n elif fields['mis%d_status' % i] == 'DONE':\n subquery_list += ' and missions.done=1'\n criteria_list += 'mis%d=1' % (i)\n else:\n criteria_list += 'mis%d=0' % (i)\n subquery_list += ') as mis%d' % (i)\n\n if subquery_list == '':\n return {'error': 'All inputs were left blank.'}\n \n # Put it all together into the final query.\n query = 'select * from (select users.email,\\n%s\\nfrom users) as tbl_1 where %s' % (subquery_list, criteria_list)\n return { 'sql_query': query }", "title": "" }, { "docid": "7e15842a32bd766570b11cab79ee767e", "score": "0.52478856", "text": "def query(self, **kwargs):\n\n raise NotImplemented()", "title": "" }, { "docid": "9b8f6dc4db64cc552388f88fbc6a5590", "score": "0.5245417", "text": "def get_query_text(ibs, cm, aid2, truth, **kwargs):\n text_list = []\n if cm is not None:\n qaid = cm.qaid\n score = cm.get_annot_scores([aid2])[0]\n rawscore = cm.get_annot_scores([aid2])[0]\n aid2_raw_rank = cm.get_annot_ranks([aid2])[0]\n else:\n qaid = kwargs.get('qaid', None)\n score = kwargs.get('score', None)\n rawscore = kwargs.get('rawscore', None)\n aid2_raw_rank = kwargs.get('aid2_raw_rank', None)\n if kwargs.get('show_truth', False):\n truth_str = '*%s*' % ibs.const.EVIDENCE_DECISION.INT_TO_NICE.get(truth, None)\n text_list.append(truth_str)\n if kwargs.get('show_rank', aid2_raw_rank is not None or cm is not None):\n try:\n # aid2_raw_rank = cm.get_annot_ranks([aid2])[0]\n aid2_rank = aid2_raw_rank + 1 if aid2_raw_rank is not None else None\n rank_str = 'rank=%s' % str(aid2_rank)\n except Exception as ex:\n ut.printex(ex)\n # ut.embed()\n raise\n text_list.append(rank_str)\n if kwargs.get('show_rawscore', rawscore is not None or cm is not None):\n rawscore_str = 'rawscore=' + ut.num_fmt(rawscore)\n if len(text_list) > 0:\n rawscore_str = '\\n' + rawscore_str\n text_list.append(rawscore_str)\n if kwargs.get('show_score', score is not None or cm is not None):\n score_str = 'score=' + ut.num_fmt(score)\n if len(text_list) > 0:\n score_str = '\\n' + score_str\n text_list.append(score_str)\n name_score = kwargs.get('name_score', None)\n name_rank = kwargs.get('name_rank', None)\n if kwargs.get('show_name_score', True):\n if name_score is not None:\n text_list.append('name_score=' + ut.num_fmt(name_score))\n if kwargs.get('show_name_rank', True):\n if name_rank is not None:\n # Make display one based\n text_list.append('name_rank=#{}'.format(str(name_rank + 1)))\n # with ut.embed_on_exception_context:\n if kwargs.get('show_timedelta', True):\n assert qaid is not None, 'qaid cannot be None'\n # TODO: fixme\n if isinstance(aid2, list):\n aid2_ = aid2[0]\n else:\n aid2_ = aid2\n timedelta_str = '\\n' + get_timedelta_str(ibs, qaid, aid2_)\n text_list.append(timedelta_str)\n query_text = ', '.join(text_list)\n return query_text", "title": "" }, { "docid": "326393a01b23c0ac92356371e1cef062", "score": "0.5244869", "text": "def _on_query_changed(self, evt):\n \n self.ShowArticles()", "title": "" }, { "docid": "b08a3bf2f7a4bc3b4882fa333a7c1c70", "score": "0.5242554", "text": "def my_query(request):\n userprofile = UserProfileS.objects.filter(user=request.user.id)\n allquery = QueryS.objects.filter(user=request.user.id).order_by('-created_at')\n popular_query = QueryS.objects.order_by('-views')[:5]\n branch = ['CSE', 'IT', 'ECE', 'ME', 'CE', 'EN']\n title = request.user.username + ' Queries'\n context_dict = {\n 'userprofile': userprofile,\n 'allquery': allquery,\n 'popular_query': popular_query,\n 'branch': branch,\n 'title': title}\n return render(request, 'vtr/index.html', context_dict)", "title": "" }, { "docid": "91e33ecbfd8017e374d99f625eb42f11", "score": "0.52389663", "text": "async def meta_query(self, meta, **kwargs: Any):\n if meta == 'siteinfo':\n async for json in self.query(meta='siteinfo', **kwargs):\n assert 'batchcomplete' in json\n assert 'continue' not in json\n return json['query']\n async for json in self.query(meta=meta, **kwargs):\n if meta == 'filerepoinfo':\n meta = 'repos'\n assert json['batchcomplete'] is True\n return json['query'][meta]", "title": "" }, { "docid": "0544fb9d95ba5b0d4802b2c0376536c0", "score": "0.5228882", "text": "def handle_query():\n act_type = get(\"action\")\n req_type = get(\"type\")\n obj_type = OBJECT_TYPES.get(req_type)\n obj_name = get(\"name\")\n\n log.debug(\"responding to: %s, %s, %s\", act_type, req_type, obj_name)\n\n res = cached_special_query(act_type, req_type, obj_name)\n if res is not None:\n return res\n\n ## queries for each object type available\n if obj_type:\n if act_type == \"submit\":\n handle_submit(obj_type)\n return handle_response(obj_type, req_type)\n \n else:\n return json.dumps({\n \"error\" : \"unknown request: %s.\" % str((req_type, act_type, obj_type)),\n })", "title": "" }, { "docid": "0e2eb312d23f77f5cb782026b1209edc", "score": "0.5227536", "text": "async def wiki(self, ctx, *, query: str):\n params = urllib.parse.urlencode({\"action\": \"opensearch\", \"search\": query})\n url = BASE_URL_WIKIPEDIA_API.format(params)\n async with ctx.bot.session.get(url) as response:\n if response.status == 200:\n data = await response.json()\n if not data[1]:\n await ctx.send(\"No results found. :<\")\n return\n embed = discord.Embed()\n for index in range(0, min(3, len(data[1]))):\n description = f\"{data[3][index]}\\n{data[2][index]}\"\n embed.add_field(name=data[1][index], value=description, inline=False)\n await ctx.send(embed=embed)\n else:\n message = \"Couldn't reach Wikipedia. x.x\"\n await ctx.send(message)", "title": "" }, { "docid": "5f2a1b4c8f4c515834619a01f21118c5", "score": "0.52245384", "text": "def query(self,text_input,prefix='answer:'):\n raise NotImplementedError", "title": "" }, { "docid": "d93c62512cc0748b0c2ca0f4f0552a9b", "score": "0.5218709", "text": "def clik_on_run(self):\n qry = self.q_box_manual.get(1.0, END)\n if(qry==\"\"):\n messagebox.showerror(\"Query Error\",\"qurey cant be empty\")\n return\n s_flag=self.semanticFlag.get() == 1\n stem_flag=self.stem_flag.get() == 1\n city_choise=self.get_choosen_cites()\n resualt_set = self.control.rum_custom_query(qry,s_flag,city_choise,stem_flag,self.res_path)\n display_flag=self.save_file_check.get() == 1\n if not display_flag:\n self.display_new_window(self.display(resualt_set))", "title": "" }, { "docid": "a4cffdb07965339329bcaf9487a3bde1", "score": "0.52054185", "text": "def show_qry(title, querystr, fld_join='\\t', row_separator=None):\n\toutput_line('\\n\\n')\n\toutput_line('--- %s ---' % title)\n\trs = select_qry(querystr)\n\tif rs:\n\t\tfor row in rs:\n\t\t\toutput_line(fld_join.join([fld2str(s) for s in row]))\n\t\t\tif row_separator:\n\t\t\t\toutput_line(row_separator)\n\telse:\n\t\toutput_line(' -- NO DATA --')", "title": "" }, { "docid": "d49b6e12bde82cfca34005cba62e522f", "score": "0.52050996", "text": "def _run_query(self, q):\n\n\t\tactions = ActionChains(self.driver)\n\n\t\tactions.move_to_element(self.driver.find_element_by_css_selector('div.ace_text-layer'))\n\t\tactions.click()\n\t\tactions.send_keys(q)\n\t\tactions.perform()\n\n\t\tget_answer_button = self.driver.find_element_by_css_selector('button.RunButton')\n\t\tself.click_and_wait(get_answer_button, secs=15)\n\n\t\trow_count = 0\n\n\t\ttry:\n\t\t\tfor _ in self.driver.find_element_by_css_selector('div.ShownRowCount').text.strip().split():\n\t\t\t\tif _.replace(',','').isdigit():\n\t\t\t\t\trow_count = int(_.replace(',',''))\n\t\t\t\t\tbreak\n\t\texcept:\n\t\t\tprint('cannot find row count!')\n\n\t\tprint(f'got {row_count} rows')\n\n\t\tself.rows_per_week.append(row_count)\n\n\t\tdownload_full_results = self.driver.find_element_by_css_selector('svg.Icon-downarrow')\n\t\tself.click_and_wait(download_full_results)\n\n\t\tcsv_option = self._find_by_text('button', 'Button', 'csv')\n\t\tself.click_and_wait(csv_option)\n\n\t\treturn self", "title": "" }, { "docid": "ffd3374cf0e077d0a7abc12aa8930a83", "score": "0.52003056", "text": "def query(self):\n return self.__p['query']", "title": "" }, { "docid": "97514463f2e417c4215abbc7960e63d4", "score": "0.5198253", "text": "def query_block(self):\n pass", "title": "" }, { "docid": "fa9256ea7a6899afdc57cf9d1da6e080", "score": "0.5195169", "text": "def work(self):\n if self.options.client:\n client = Client.objects.get(slug=self.options.client)\n show = Show.objects.get(\n client=client, slug=self.options.show)\n else:\n show = Show.objects.get(slug=self.options.show)\n\n self.one_show(show)\n\n return", "title": "" }, { "docid": "6a5dffec0ee4134b95cd5185ce9fc8cd", "score": "0.518749", "text": "async def wikiss13(self, ctx: commands.Context, *, query: str):\n async with ctx.typing():\n payload = self.generate_payload(query)\n async with aiohttp.ClientSession() as session:\n async with session.get(\n self.API_URL,\n params=payload,\n headers=self.HEADERS\n ) as res:\n result = await res.json(content_type=None)\n\n embed_tasks = []\n if \"query\" in result and \"pages\" in result[\"query\"]:\n result[\"query\"][\"pages\"].sort(\n key=lambda unsorted_page: unsorted_page[\"index\"]\n )\n pages = result[\"query\"][\"pages\"]\n if 'redirects' in result['query']:\n for redirect in result[\"query\"][\"redirects\"]:\n skip = False\n for page in pages:\n if page['title'] == redirect['to'] and not 'tofragment' in redirect:\n skip = True\n break\n if skip:\n continue\n page = {\n 'title': redirect['from'],\n 'fullurl': self.WIKI_URL + '/' + self.fix_fragment_urls(redirect['to']),\n 'redirect_title': redirect['to'],\n }\n if 'tofragment' in redirect:\n page['fullurl'] += '#' + self.fix_fragment_urls(redirect['tofragment'])\n page['tofragment'] = self.fix_fragment_urls(redirect['tofragment'])\n pages.append(page)\n pages.sort(key=lambda page: -self.similarity(page['title'], query))\n for page in pages:\n if (\n \"categories\" in page\n and page[\"categories\"]\n and \"title\" in page[\"categories\"][0]\n and page[\"categories\"][0][\"title\"]\n == self.DISAMBIGUATION_CAT\n ):\n continue # Skip disambiguation pages\n if not ctx.channel.permissions_for(ctx.me).embed_links:\n # No embeds here :(\n await ctx.send(\n warning(\n f\"I'm not allowed to do embeds here, so here's the first result:\\n{page['fullurl']}\"\n )\n )\n return\n embed_tasks.append(self.generate_embed(page, session))\n if not ctx.channel.permissions_for(ctx.me).add_reactions:\n break # Menu can't function so only show first result\n embeds = await asyncio.gather(*embed_tasks, return_exceptions=True)\n\n\n if not embeds:\n await ctx.send(\n error(f\"I'm sorry, I couldn't find \\\"{query}\\\" on SS13 Wiki\")\n )\n elif len(embeds) == 1:\n embeds[0].set_author(name=f\"Result 1 of 1\")\n await ctx.send(embed=embeds[0])\n else:\n count = 0\n for embed in embeds:\n count += 1\n embed.set_author(name=f\"Result {count} of {len(embeds)}\")\n await menu(ctx, embeds, DEFAULT_CONTROLS, timeout=60.0)", "title": "" }, { "docid": "9dd6b2c67977d1a001e0c57836420ba6", "score": "0.51872844", "text": "def query(self, *, sparql: str) -> Result:\n return self.graph.query(sparql)", "title": "" }, { "docid": "b34b1b51a131b8e7a1bab23b2c0e8f15", "score": "0.51871115", "text": "def query(self, qry, *args, **kwargs):\n self.qry = qry\n return QueryJob(self.qry)", "title": "" }, { "docid": "1a79f98eec625064787b2b67aca25b09", "score": "0.51815903", "text": "def on_chosen_inline_result(self, msg):\n print(msg)\n print('inline_message_id' in msg)\n result_id, from_id, query_string = telepot.glance(msg, flavor='chosen_inline_result')\n print(from_id, \",\", self.id, ':', 'Chosen Inline Result:', result_id, from_id, query_string)\n print(\"Message sent to \" + str(from_id))", "title": "" }, { "docid": "d98123f0b06761d789419be45e9f0218", "score": "0.5178854", "text": "def query(self, q):\n \n # create a query and get the results from the storage engine\n q = ReconcileQuery(q)\n results = self.storage.search(q)\n \n # sort the results by score\n if getattr(results, \"sort\", None):\n results.sort(key=lambda x: x.score, reverse=True)\n \n # if there's a limit on results only return those results\n if q.limit:\n results = results[0:q.limit]\n \n # prepare each result in the JSON return format\n for i in results:\n \n # check if it's an exact match\n match = q.query.lower()==i[self.search_field].lower() or i.score==100\n \n q.add_result({\n \"id\":i[self.id_field],\n \"name\":i[self.search_field],\n \"type\":[{\n \"id\": \"/\" + self.type,\n \"name\": self.type\n }],\n \"score\":i.score,\n \"match\":match,\n })\n \n # if we've got an exact match then just return it\n if match:\n return q.results\n \n return q.results", "title": "" }, { "docid": "d0e2cbffb46459295f2c7eeee95138c4", "score": "0.5178549", "text": "def embed_query(self, text: str) -> List[float]:\n \n return self._embedding_func([text])[0]", "title": "" }, { "docid": "278ec3aa046e257429e5901f73f801d1", "score": "0.51777095", "text": "def GetQuery(self):\n return self.dbHandlerModule.Query(self.dbHandler)", "title": "" }, { "docid": "1748ef626855120f097b0e30bd2920d3", "score": "0.51754", "text": "def getAskResult(self,smw,ask,limit=20):\n #PrintRequest.debug=self.debug\n result=smw.query(ask,limit=limit)\n if self.debug:\n print (result) \n return result;", "title": "" }, { "docid": "7b95a4831b75125b2f65b30a67b26d87", "score": "0.5165777", "text": "def do_run_sql(self):\n query, results = self._get_sql_results()\n if query and results:\n res = f\"QUERY\\n{'-' * 20}\\n{query}\\n{'-' * 20}\\nRESULT\\n\"\n res += tabulate(\n [[str(column) for column in row] for row in results],\n tablefmt=\"grid\",\n floatfmt=\".3f\",\n )\n res += \"\\n\"\n print(res)", "title": "" }, { "docid": "8074e3c01f9caa090ff13f53c55c4f74", "score": "0.5154938", "text": "def show_search(self, search_q: str, user: discord.Member) -> None:\n\n # Get the search result from the client\n search_result: list = self.youtube_client.search_videos(search_q, self.max_result)\n\n # Store the user search\n user_name = user.name + \"#\" + user.discriminator\n self.user_search[user_name] = search_result\n\n # Create the search message\n search_message: str = user.display_name + \" here is the result for \\\"\" + search_q + \"\\\" :\\n\"\n search_message += \"```\\n\"\n for i in range(len(search_result)):\n search_message += str(i + 1) + \". \"\n search_message += search_result[i][\"title\"] + \" - \" + search_result[i][\"channel_title\"]\n search_message += \" [\" + search_result[i][\"duration\"] + \"]\\n\"\n search_message += \"```\"\n search_message += \"Type `!choose <ID>` to add a song to the queue\"\n\n # Send the message\n self.send_message(search_message)", "title": "" }, { "docid": "82c9dd1acb59e6a9ea741b29dba1ac68", "score": "0.5141633", "text": "def __query(self, q, r=False):\n\t\tdb = self.bot_name\n\t\tconn = sql.connect(db)\n\n\t\ttry:\n\t\t\tc = conn.cursor()\n\t\t\tc.execute(q)\n\t\t\tconn.commit()\n\n\t\t\tif r:\n\t\t\t\tresults = c.fetchone()\n\t\t\t\tc.close()\n\t\t\t\treturn results\n\t\t\telse:\n\t\t\t\tc.close()\n\t\t# todo - add a proper exception message\n\t\texcept:\n\t\t\tpass", "title": "" }, { "docid": "36f18eab68af72c63d02748e7e6ca45a", "score": "0.5134174", "text": "def query(self, query_statement, args, context=None):\n pass", "title": "" }, { "docid": "536f5d3334f0c1eb958848a230f061a1", "score": "0.5112002", "text": "async def _arun(self, query: str) -> str:\n return (await self.wrapper.aresults(query, self.num_results)).__str__()", "title": "" }, { "docid": "7673e7e77975371fbfe0c2781b18c623", "score": "0.51080906", "text": "def show_results(self, result, delta_wealth):\n\n pass", "title": "" }, { "docid": "e5122663690f0d6072babaa92fb622fa", "score": "0.51037693", "text": "def query(self,comando):\n \n time.sleep(0.5) \n try:\n return self.interface.query(comando)\n except Exception as e:\n raise Exception", "title": "" }, { "docid": "5640beb1a24194b190ca94012dfcaa1e", "score": "0.5101341", "text": "def coreQuery(self):\n\n\t\twebquery = {'query':self.query, 'resulttype': 'core', 'format': 'json'}\n\t\tr = requests.get(self.epmc_basequeryurl, params=webquery)\n\t\tself.rawresults = r.json()\n\t\tif len(self.rawresults['resultList']['result']) == 1:\n\t\t\tself.singleresult = self.rawresults['resultList']['result'][0]", "title": "" }, { "docid": "3f6692069622122f8fb3aef2715dc7f4", "score": "0.50959855", "text": "def show_all_hike_results():\n \n user_id = session.get(\"current_user\", None)\n if user_id:\n results = HikeResult.query.filter((HikeResult.user_id == user_id) &\n (HikeResult.canceled_by_user == False)).all()\n return jsonify(results)\n return 'No active results found'", "title": "" }, { "docid": "09ae8fb01a55bbca6d78397cb8f8d5ae", "score": "0.509199", "text": "def query(self):\n return self._query", "title": "" }, { "docid": "48baeafa1018463686baa54df6b7e729", "score": "0.50915104", "text": "async def run_query(*, github_access_token, query):\n endpoint = \"https://api.github.com/graphql\"\n query = json.dumps({\"query\": query})\n client = ClientWrapper()\n resp = await client.post(\n endpoint,\n data=query,\n headers={\"Authorization\": f\"Bearer {github_access_token}\"},\n )\n resp.raise_for_status()\n return resp.json()", "title": "" }, { "docid": "19d6dfa72b6d737429d27175753e6844", "score": "0.5082662", "text": "def get_query():\n return CiscoC2900Query", "title": "" }, { "docid": "e4182878b215aec5cddbf4560f8ce2fe", "score": "0.5071671", "text": "def set_queries(self) -> None:", "title": "" }, { "docid": "b18885c94f02e4278b04b96808e81f43", "score": "0.5064763", "text": "def query_function():\n args = get_args(\n request_args=request.args,\n allowed_str_args=['text', 'entry_name'],\n allowed_int_args=['limit']\n )\n\n return jsonify(query.function(**args))", "title": "" } ]
e5d0c52e17e4e5e5e0f1ed744b6195bc
Compute the probability of the different services contained in the data using the given classifier.
[ { "docid": "31822774ceb7b6860b32167010e772e7", "score": "0.7801408", "text": "def services(self, data, classifier):\n values = self.__generate_values(data, classifier)\n max_value = 0\n # Search max index in probabilities (number of categories)\n for val in values:\n if val[0] > max_value:\n max_value = val[0]\n # Add element to the array (start at 0)\n max_value += 1\n # List of probabilities for each category\n probabilities = [0] * max_value\n for val in values:\n if (val[1]*100) > probabilities[val[0]]:\n probabilities[val[0]] = (100*val[1])\n\n return probabilities", "title": "" } ]
[ { "docid": "c13a61aa62c3aa58c2f8926ba1620388", "score": "0.6992532", "text": "def probability(self, data, classifier):\n values = self.__generate_values(data, classifier)\n max_value = 0\n # Search max index in probabilities (number of categories)\n for val in values:\n if val[0] > max_value:\n max_value = val[0]\n # Add element to the array (start at 0)\n max_value += 1\n # List of probabilities for each category\n probabilities = [0] * max_value\n for val in values:\n if val[1] > probabilities[val[0]]:\n probabilities[val[0]] = val[1]\n data = 0\n for prob in probabilities:\n if (prob*100) > data:\n data = (prob*100)\n return data", "title": "" }, { "docid": "2fbbf9f79d55bc7330e697f898cbadc7", "score": "0.66785395", "text": "def _calculate_probablility(self, trained_data: dict, classification: str, tokens: list, delta=0.01):\n if classification not in trained_data:\n return 0\n\n token_counts = trained_data[classification]\n total_words_in_category = sum(val for val in token_counts.values())\n vocab_size = len(token_counts.keys())\n probability = Decimal('1')\n for token in tokens:\n counts = Decimal(token_counts[token] + delta)\n token_probability = counts / Decimal(total_words_in_category + (delta * vocab_size))\n probability = probability * token_probability\n\n return probability", "title": "" }, { "docid": "9ea3ee3534840321fda2898c5961e22c", "score": "0.63413054", "text": "def PredictProbModel(classifier,X_test):\n Y_test_pred_proba = classifier.predict_proba(X_test)\n \n return Y_test_pred_proba*100", "title": "" }, { "docid": "6a4f1563ce9514e09a59d80b7ac71e50", "score": "0.61598575", "text": "def __calc_probabilities__(self, dataset):\n prior_prob = defaultdict(float)\n # Format for this dict is: First dict key refers to attribute name, second dict key refers\n # attribute value and third dict key refers to class name. E.g. P[0]['20-29']['recurrence-events\\n']\n posterior_prob = defaultdict(lambda: defaultdict(lambda: defaultdict(float)))\n\n # Calculating the prior probabilities by dividing by the number of instances\n for class_name, value in self.prior_counts.items():\n prior_prob[class_name] = value/dataset.get_num_rows()\n\n # Now calculate the posterior probabilities by dividing the fractional counts\n # by the fractional class counts\n for attr_name, val_dict in self.posterior_counts.items():\n for attr_val, class_dict in self.posterior_counts[attr_name].items():\n for class_name, count in self.posterior_counts[attr_name][attr_val].items():\n numerator = count\n # Subtract the fractional counts from the total if this attribute of\n # the instance is missing\n denominator = self.prior_counts[class_name] - self.missing_counts[attr_name][class_name]\n posterior_prob[attr_name][attr_val][class_name] = numerator/denominator\n\n return prior_prob, posterior_prob", "title": "" }, { "docid": "17616a3c345c08625da43fde4d5fadfc", "score": "0.61435264", "text": "def __generate_values(self, data, classifier):\n # Load default parameters\n parameters = self.__load_default()\n\n # Load classifiers\n classifiers = []\n z = 0\n while True:\n try:\n classifier.classify([], '_'.join(parameters) + '_' + str(z))\n z += 1\n except FileNotFoundError:\n break\n\n # The classifier is not trained\n if z < 2:\n raise FileNotFoundError\n\n # OpenCV Selective Search has a bug which generates bounding boxes outside the border of the image.\n # Therefore, a big border is needed to solve the problem.\n bordersize = 200\n # List for all the Histograms\n all = []\n # Load vocabulary generated in the training process\n self.vocabulary = np.loadtxt(os.path.join(self.feature_directory, 'default_' + '_'.join(parameters), 'voc.out'))\n # Load standard scaler generated in the training process\n self.std_slr = pickle.load(\n open(os.path.join(self.feature_directory, 'default_' + '_'.join(parameters), \"std.out\"), \"rb\"))\n # Loop over all images\n for img in data:\n probabilities = [0] * z\n # Apply white border\n border = cv2.copyMakeBorder(cv2.imread(img), top=bordersize,\n bottom=bordersize, left=bordersize, right=bordersize,\n borderType=cv2.BORDER_CONSTANT, value=[255, 255, 255])\n # Create Selective Search Segmentation Object using default parameters\n ss = cv2.ximgproc.segmentation.createSelectiveSearchSegmentation()\n # Set input image on which we will run segmentation\n ss.setBaseImage(border)\n # Use quality Search\n ss.switchToSelectiveSearchQuality()\n # Run selective search segmentation on input image\n rects = ss.process()\n # Iterate over all the region proposals\n for j, rect in enumerate(rects):\n # Compare rectangle for region proposal till numShowRects\n if j < 500: # 500 is the best value found in the Bachelor Thesis Logos recognition for website services\n # Get rectangle coordinates\n x, y, w, h = rect\n probs = []\n # Loop over the different logos classes to compute the histogram\n for i in range(z):\n hist = self.__compute_hist(border[y:y + h, x:x + w])\n if hist is not None:\n # Compute probability that the histogram is the logo i\n prob = classifier.classify([hist], '_'.join(parameters) + '_' + str(i))[0]\n # Save probability and logo type\n probs.append((prob[0], i))\n # Sort the probabilities to have the highest as the first element\n probs.sort(key=lambda tup: tup[0], reverse=True)\n # loop over all probabilities (one for each class) and take the highest until now\n for i in range(len(probs)):\n # if prob is higher than the probability until now replace it\n if probs[i][0] > probabilities[probs[i][1]]:\n probabilities[probs[i][1]] = probs[i][0]\n # print image with random name\n break\n else:\n break\n for i in range(z):\n all.append((i, probabilities[i]))\n return all", "title": "" }, { "docid": "87e2b3d985277858a46ca84a0ae6e629", "score": "0.6140995", "text": "def model_probs(self, model):\n if not model:\n model = OneVsRestClassifier(svm.SVC(kernel='linear', probability=True), n_jobs=-1)\n model.fit(self.X_train, self.y_train).decision_function(self.X_test)\n predictions = model.predict_proba(self.X)\n return predictions", "title": "" }, { "docid": "3592a7f9f754fbed554ed77430bbdff0", "score": "0.60972387", "text": "def calculate_prob(input_data,feature_values):\n m = input_data.shape[0] #total sample size\n n = input_data.shape[1] #total feature(attribute) number\n \n total_feature_value_number = len(feature_values)\n probs = np.zeros((total_feature_value_number,n))\n #if all values are the same --> Assuming we have two labels only!!!\n if check_all_values_same(input_data,feature_values):\n for i in range(total_feature_value_number):\n if input_data[0] == feature_values[i]:\n probs[i,0] = 1.0\n else: \n for i in range(n):\n current_attribute_data = input_data[:,i]\n total_prob = 0\n for j in range(total_feature_value_number):\n # Calculate how many times we have specific value for attribute\n total = current_attribute_data[current_attribute_data == feature_values[j]].shape[0]\n probs[j,i] = total/m\n total_prob = total_prob + probs[j,i]\n #print(\"Total prob: \", total_prob)\n\n # Total probability must be equal to 1.0\n epsilon = 0.001\n if (total_prob - 1.0) > epsilon:\n print(\"Total prob. is not equal to 1.0!\")\n \n return probs", "title": "" }, { "docid": "2aca7cabb48c22dd66b562aad61417c6", "score": "0.60691977", "text": "def train_classifier(self):\n for c_class in self.classes:\n self.class_prob[c_class] = math.log(float(len(self.result_set[c_class])) / self.total_docs, 2)\n #print \"Class: %s, prob: %d\" %(c_class, self.class_prob[c_class])\n\n \"\"\"Find the probability of each document\"\"\"\n \n \"\"\"count the total number of unique terms\"\"\"\n words_map = {}\n unique_word = 0\n for c_class in self.classes:\n data = self.result_set[c_class]\n words = re.split(r'\\W+', data)\n for word in words:\n if word not in words_map:\n unique_word += 1\n words_map[word] = 1\n\n for c_class in self.classes:\n data = self.result_set[c_class]\n words = re.split(r'\\W+', data)\n local_word_count = {}\n for word in words:\n if word not in local_word_count:\n local_word_count[word] = 0\n local_word_count[word] += 1\n\n num_words = len(words)\n for word in local_word_count:\n if word not in self.word_prob:\n self.word_prob[word] = {}\n self.word_prob[word][c_class] = math.log((float)(local_word_count[word] + 1) / (num_words + unique_word), 2)\n self.nan[c_class] = math.log((float) (1)/ (num_words + unique_word), 2)\n #print c_class, num_words, unique_word\n \n \n \n \"\"\"\n for word in self.word_prob:\n print \"%s, %s\" %(word, self.word_prob[word])\n \"\"\"", "title": "" }, { "docid": "decf7de5aead797d88d7a1ed119b6e9f", "score": "0.60660297", "text": "def classify(templates, data, index, classifier):\n\n features = featurize(data, index, templates)\n return classifier.predict_proba(features)", "title": "" }, { "docid": "5fa8ed84e08b343a2e3f4369e63f35ed", "score": "0.60084236", "text": "def _conditional_prob(self, X_splitted):\n y_unique = list(X_splitted.keys()) # number of classes\n n = X_splitted[y_unique[0]].shape[1] # number of features\n counts = []\n totals = []\n for y in X_splitted.keys():\n temp_count = np.sum(X_splitted[y], axis=0).tolist()\n total = np.sum(X_splitted[y])\n counts.append(temp_count)\n totals.append(total)\n counts = np.array(counts)\n totals = np.array(totals).reshape(len(y_unique), 1)\n self.conditional = (counts + self.smooth) / (totals + n)\n # print(counts)\n # print(totals)", "title": "" }, { "docid": "e5dc0690d17b77de3a5dbfcdd340bb7b", "score": "0.600323", "text": "def predictClassifier(classifierObj, data):\n\n \n result = classifierObj.predict(data)\n return result", "title": "" }, { "docid": "cbd7e308ddd32509828f1895583b6f35", "score": "0.5993264", "text": "def dice_coef_metric_per_classes(\n probabilities: np.ndarray,\n truth: np.ndarray,\n eps: float = 1e-9,\n classes: list = ['WT', 'TC', 'ET']) -> np.ndarray:\n scores = {key: list() for key in classes}\n num_classes = probabilities.shape[0]\n predictions = probabilities\n assert (predictions.shape == truth.shape)\n\n for class_ in range(num_classes):\n prediction = predictions[class_]\n truth_ = truth[class_]\n intersection = 2.0 * (truth_ * prediction).sum()\n union = truth_.sum() + prediction.sum()\n if truth_.sum() == 0 and prediction.sum() == 0:\n scores[classes[class_]].append(1.0)\n else:\n scores[classes[class_]].append((intersection + eps) / union)\n\n return scores", "title": "" }, { "docid": "ebb06930723730d60b31fb22d5423a15", "score": "0.59763265", "text": "def predict_probability(*args, **kwargs):\n return _algorithms.svm_01_predict_probability(*args, **kwargs)", "title": "" }, { "docid": "f777190f58a73947ac4e55fa827bf172", "score": "0.5939167", "text": "def compute_priors(data, protected_index, prediction_index):\n counts = np.zeros((2, 2))\n for batch in list(data):\n _, labels = batch[0], batch[1]\n\n for label in labels:\n prot_value = label[protected_index]\n pred_value = label[prediction_index]\n counts[prot_value][pred_value] += 1\n total = sum(sum(counts))\n\n prot_rate = np.round(counts[1][1]/sum(counts[1]), 4)\n unprot_rate = np.round(counts[0][1]/sum(counts[0]), 4)\n\n print('Prob. protected class:', np.round(sum(counts[1])/total, 4))\n print('Prob. positive outcome:', np.round(sum(counts[:, 1])/total, 4))\n print('Prob. positive outcome given protected class', prot_rate)\n print('Prob. positive outcome given unprotected class', unprot_rate)", "title": "" }, { "docid": "4bdd63fcea34210611355677a63afc30", "score": "0.5928846", "text": "def prob(dataset):\n\n # bin by the number of different values per feature\n num_rows, num_columns = dataset.shape\n bins = [len(np.unique(dataset[:, ci])) for ci in range(num_columns)]\n\n prob, _ = np.histogramdd(dataset, bins)\n return prob / np.sum(prob)", "title": "" }, { "docid": "bf18f129e3421257faf98d462e441f20", "score": "0.59250057", "text": "def svc_cl_pred(self, x):\n \n # Validate\n pred_proba = self.svc_classif.predict_proba(x)\n probabilities = np.zeros((len(x), self.class_count), float)\n\n rownum = 0\n for row in pred_proba: \n c = 0\n for i in row:\n col = int(self.svc_classif.classes_[c]-1)\n probabilities[rownum, col] = i\n c += 1\n rownum += 1\n \n # print probabilities\n # print \"Found following classes:\"\n # print self.svc_classif.classes_\n \n pred_class = self.svc_classif.predict(x)\n \n return pred_class, probabilities", "title": "" }, { "docid": "5f6366f94af59a869bca88914a52aaa4", "score": "0.5883085", "text": "def compute_initial_distribution(training_data, order):\n\tlength = len(training_data)\n\ttags_list = [training_data[idx][1] for idx in range(0, length)]\n\n\tif order == 2:\n\t\tcount_dict = defaultdict(int)\n\t\tpi_dict = defaultdict(float)\n\t\t# Count the tags that appear in the first of the sentence.\n\t\tcount_dict[tags_list[0]] += 1\n\t\tfor idx1 in range(0, length-1):\n\t\t\tif tags_list[idx1] == \".\":\n\t\t\t\tif tags_list[idx1+1] != \".\":\n\t\t\t\t\tcount_dict[tags_list[idx1+1]] += 1\n\t\ttotal_counts = sum(count_dict.values())\n\t\t# Calculate the probability.\n\t\tfor tag_i in count_dict.keys():\n\t\t\tpi_dict[tag_i] += float(count_dict[tag_i]) / total_counts\n\n\t\treturn pi_dict\n\n\telif order == 3:\n\t\tcount_dict = defaultdict(lambda: defaultdict(int))\n\t\tpi_dict = defaultdict(lambda: defaultdict(float))\n\t\t# Count the tags that appear in the first of the sentence.\n\t\tcount_dict[tags_list[0]][tags_list[1]] += 1\n\t\tfor idx1 in range(0, length-2):\n\t\t\tif tags_list[idx1] == \".\":\n\t\t\t\tif tags_list[idx1+1] != \".\":\n\t\t\t\t\tif tags_list[idx1+2] != \".\":\n\t\t\t\t\t\tcount_dict[tags_list[idx1+1]][tags_list[idx1+2]] += 1\n\t\ttotal_counts = 0\n\t\tfor tag1 in count_dict.keys():\n\t\t\ttotal_counts += sum(count_dict[tag1].values())\n\t\t# Calculate the probability.\n\t\tfor tag_i in count_dict.keys():\n\t\t\tfor tag_j in count_dict[tag_i].keys():\n\t\t\t\tpi_dict[tag_i][tag_j] += float(count_dict[tag_i][tag_j]) / total_counts\n\n\t\treturn pi_dict\n\n\telse:\n\t\traise Exception('The order must be 2 or 3.')", "title": "" }, { "docid": "6677aae65df136f925718fd2011bd0fb", "score": "0.58715874", "text": "def _predict_probability_from_features(self, features):\n return LogisticRegression.predict_proba(self.target_model, features)", "title": "" }, { "docid": "5e1ee0fbc236b0f6cbdb165d1f07866f", "score": "0.5856548", "text": "def test_clf(clf, train_data, train_target, test_data, test_target):\r\n clf.fit(train_data, train_target)\r\n pred = clf.predict(test_data)\r\n acc = np.sum(pred==test_target) / test_data.shape[0]\r\n return acc", "title": "" }, { "docid": "3ba28f4c74c8d744f0eabc62627a8342", "score": "0.58552", "text": "def computeProbabilities(self):\n pass", "title": "" }, { "docid": "02012cd35cba409248a09e2143d4c53d", "score": "0.5841066", "text": "def evaluate(model, ds):\n results = ds.map(lambda x: (model.predict(x.features), x.label))\n truePositives = results.filter(lambda x: x[0] == x[1])\n result = float(truePositives.count()) / results.count()\n truePositives.unpersist()\n results.unpersist()\n return result", "title": "" }, { "docid": "80c9442d72caae7dab26e06dd6269350", "score": "0.5815622", "text": "def train(self):\n total = 0\n classes = {} \n counts = {}\n # determine size of a training vector\n size = len(self.data[0])\n #\n # iterate through training instances\n for instance in self.data:\n total += 1\n category = instance[0]\n classes.setdefault(category, 0)\n counts.setdefault(category, {})\n classes[category] += 1\n # now process each column in instance\n col = 0\n for columnValue in instance[1:]:\n col += 1\n tmp = {}\n if col in counts[category]:\n tmp = counts[category][col]\n if columnValue in tmp:\n tmp[columnValue] += 1\n else:\n tmp[columnValue] = 1\n counts[category][col] = tmp\n ## print(classes) {'i500': 9, 'i100': 6}\n ## print(counts)\n #############################\n ## counts数据结构: 但该DS有一个不足就是:如果数据量特别大的时候,数据结构的构造非常麻烦。\n # {'i500': {1: {'appearance': 3, 'health': 4, 'both': 2}, 2: {'active': 4, 'sedentary': 2, 'moderate': 3}, 3: {'aggressive': 6, 'moderate': 3}, 4: {'yes': 6, 'no': 3}},\n # 'i100': {1: {'both': 3, 'health': 1, 'appearance': 2}, 2: {'active': 2, 'sedentary': 3, 'moderate': 1}, 3: {'aggressive': 1, 'moderate': 5}, 4: {'yes': 2, 'no': 4}}}\n #############################\n\n\n # ok. done counting. now compute probabilities\n #\n # first prior probabilities\n # \n for (category, count) in classes.items():\n self.prior[category] = count / total\n # now compute conditional probabilities 算法改进\n for (category, columns) in counts.items():\n tmp = {}\n for (col, valueCounts) in columns.items():\n tmp2 = {}\n for (value, count) in valueCounts.items():\n tmp2[value] = count / classes[category]\n tmp[col] = tmp2\n #convert tmp to vector\n tmp3 = []\n for i in range(1, size):\n tmp3.append(tmp[i])\n self.conditional[category] = tmp3\n ## print(self.conditional)\n #############################\n ## conditional数据结构\n # {'i500': [{'health': 0.4444444444444444, 'appearance': 0.3333333333333333, 'both': 0.2222222222222222}, \n # {'active': 0.4444444444444444, 'sedentary': 0.2222222222222222, 'moderate': 0.3333333333333333}, \n # {'aggressive': 0.6666666666666666, 'moderate': 0.3333333333333333}, \n # {'yes': 0.6666666666666666, 'no': 0.3333333333333333}], \n # 'i100': [{'both': 0.5, 'health': 0.16666666666666666, 'appearance': 0.3333333333333333}, \n # {'active': 0.3333333333333333, 'sedentary': 0.5, 'moderate': 0.16666666666666666}, \n # {'aggressive': 0.16666666666666666, 'moderate': 0.8333333333333334}, \n # {'yes': 0.3333333333333333, 'no': 0.6666666666666666}]}\n #############################", "title": "" }, { "docid": "38baada5f959da0f875b11f902fbf0e5", "score": "0.5800794", "text": "def learn_discrete(dataset,virtual_count=1,domain=None):\r\n res = dict()\r\n # print \"DATA:\", dataset, \"Domain: \", domain\r\n if(domain==None):\r\n domain = set(dataset)\r\n for elem in domain:\r\n res[elem] = 0\r\n #TODO: generate a distribution over the domain from the data and\r\n #taking into account the virtual counts\r\n denom = len(dataset) + (virtual_count*len(domain))\r\n for elem in dataset:\r\n res[elem] = res.get(elem, 0) + 1\r\n for k,v in res.iteritems():\r\n res[k] = (v+virtual_count)/float(denom)\r\n # print res\r\n return res\r\n # NOTE: currently assumes that same virtual count applies to every entry in domain\r", "title": "" }, { "docid": "a2c7d609a9ec2aa39efbbe7b6b90e708", "score": "0.57777405", "text": "def classifier(train_pca, val_pca, test_pca):\n \n clf = RandomForestClassifier(n_estimators=100, \n max_depth=None, \n min_samples_split=2, \n random_state=0)\n clf.fit(train_pca, train_labels.values.ravel())\n \n accuracy = clf.score(val_pca, val_labels)\n val_predictions = clf.predict(val_pca)\n negative, positive = class_sample_count(val_predictions)\n \n print(\"-\"*40)\n print(\"Classifier Accuracy: {0}\".format(accuracy) + \"\\n\")\n print(\"Validation Set Predictions\")\n print(\"Number of Data Points for Class -1: {0}\".format(negative))\n print(\"Number of Data Points for Class 1: {0}\".format(positive))\n print(\"-\"*40)\n \n test_labels = clf.predict(test_pca)\n # Convert label numpy array to dataframe\n test_labels_df = pd.DataFrame(test_labels)\n \n negative, positive = class_sample_count(test_labels) \n print(\"-\"*40)\n print(\"Test Set Predictions\")\n print(\"Number of Data Points for Class -1: {0}\".format(negative))\n print(\"Number of Data Points for Class 1: {0}\".format(positive))\n print(\"-\"*40)\n\n return test_labels_df", "title": "" }, { "docid": "38671734fca341e79cc0306a45615b6c", "score": "0.57676584", "text": "def classify(self, features):\n spamProb = self.document_probability(features, 'spam')\n return spamProb", "title": "" }, { "docid": "e0e9596c5bdfbd01f70f5487710dbb2f", "score": "0.576644", "text": "def classifier_test(classifier, feature_vector, mu_ft, std_ft):\n \n # Normalize feature_vector\n x = (feature_vector - mu_ft) / std_ft \n y_hat = classifier.predict(x)\n #y_hat = None\n return y_hat", "title": "" }, { "docid": "6d76a51b453f1eabe97406be2f29c232", "score": "0.5761093", "text": "def compute_distributions(data, labels, discrete_features, continuous_features):\n\t# obtain counts for discrete features given needs work\n\tdiscrete_positive = [discrete_conditional(data, labels, feature, class_=1) \n\t\t\t\t\t\t\t\tfor feature in discrete_features]\n\t# obtain counts for discrete features given not needs work\n\tdiscrete_negative = [discrete_conditional(data, labels, feature, class_=-1) \n\t\t\t\t\t\t\t\tfor feature in discrete_features]\n\t# obtain probability distribution for continuous features given needs work\n\tcontinuous_positive = []\n\tfor feature in continuous_features:\n\t\tif feature != 'writing':\n\t\t\tcontinuous_positive.append(\n\t\t\t\tcontinuous_conditional(data, labels, feature, class_=1, distribution='normal')\n\t\t\t\t)\n\t\telse:\n\t\t\tcontinuous_positive.append(\n\t\t\t\tcontinuous_conditional(data, labels, feature, class_=1, distribution='negativeBinomial')\n\t\t\t\t)\t\n\t# obtain probability distribution for continuous features given not needs work\n\tcontinuous_negative = []\n\tfor feature in continuous_features:\n\t\tif feature != 'writing':\n\t\t\tcontinuous_negative.append(\n\t\t\t\tcontinuous_conditional(data, labels, feature, class_=-1, distribution='normal')\n\t\t\t\t)\n\t\telse:\n\t\t\tcontinuous_negative.append(\n\t\t\t\tcontinuous_conditional(data, labels, feature, class_=-1, distribution='negativeBinomial')\n\t\t\t\t)\t\n\treturn (discrete_positive, discrete_negative, continuous_positive, continuous_negative)", "title": "" }, { "docid": "a91e0570619171ca46e928259fe4d179", "score": "0.5758035", "text": "def learn_naive_bayes(class_key,feature_keys,\r\n dataset,\r\n class_prior_count=1,feature_posterior_count=1,\r\n class_domain=None,feature_domains=None):\r\n if class_domain == None:\r\n #compute the set of values that the class can take on\r\n class_domain = set([instance[class_key] for instance in dataset])\r\n if feature_domains == None:\r\n #compute the set of values that the features can take on\r\n feature_domains = dict()\r\n for f in feature_keys:\r\n if f not in feature_domains:\r\n feature_domains[f] = set([instance[f] for instance in dataset])\r\n\r\n #create a uniform class prior\r\n # PCuniform = uniform(class_domain)\r\n # PFuniform = dict()\r\n # #create uniform feature priors\r\n # for f in feature_keys:\r\n # PFf = dict()\r\n # #for all values v in the class domain, PFf[v] is a distribution over f's\r\n # #domain\r\n # for class_v in class_domain:\r\n # PFf[class_v] = uniform(feature_domains[f])\r\n # PFuniform[f] = PFf\r\n # return (PCuniform,PFuniform)\r\n\r\n\r\n PClearned = learn_discrete([instance[class_key] for instance in dataset],\r\n class_prior_count,\r\n class_domain)\r\n PFlearned = dict()\r\n # TODO what here?\r\n for f in feature_keys:\r\n PFf = dict()\r\n for class_v in class_domain:\r\n # print \"CurrClass: \", class_v, \"CurrFeat: \", f\r\n PFf[class_v] = learn_discrete([instance[f] for instance in dataset if instance[class_key]==class_v], \r\n feature_posterior_count,\r\n feature_domains[f])\r\n PFlearned[f] = PFf\r\n return (PClearned,PFlearned)", "title": "" }, { "docid": "1575a8533dfcc28a99e132a3d52dc571", "score": "0.5757652", "text": "def Probability(self, doc, dclass=\"\"):\r\n if dclass:\r\n sum_dclass = self.sum_words_in_class(dclass)\r\n prob = 0\r\n\r\n d = Document(self.__vocabulary)\r\n d.add_text_to_doc(doc)\r\n\r\n for j in self.__document_classes:\r\n sum_j = self.sum_words_in_class(j)\r\n prod = 1\r\n for i in d.Words():\r\n wf_dclass = 1 + self.__document_classes[dclass].WordFreq(i)\r\n wf = 1 + self.__document_classes[j].WordFreq(i)\r\n r = wf * sum_dclass / (wf_dclass * sum_j)\r\n prod *= r\r\n prob += prod * self.__document_classes[j].NumberOfDocuments() / self.__document_classes[\r\n dclass].NumberOfDocuments()\r\n if prob != 0:\r\n return 1 / prob\r\n else:\r\n return -1\r\n else:\r\n prob_list = []\r\n for dclass in self.__document_classes:\r\n prob = self.Probability(doc, dclass)\r\n prob_list.append([dclass, prob])\r\n prob_list.sort(key=lambda x: x[1], reverse=True)\r\n return prob_list", "title": "" }, { "docid": "d289107e62fe2a6e2eaf298e9cfbf64f", "score": "0.57522213", "text": "def n_validator(data, p, classifier, *args):\n\n # if args are passed it is synthetic data\n if len(args) > 0:\n # merge the two synthetic data\n data = data + args[0]\n\n # randomized the data\n random.shuffle(data)\n #split the data in p groups\n chunked_data = chunks(data, p)\n\n n = len(chunked_data)\n training = chunked_data[-1] # p-1 part of data is training data\n score = 0\n for i in range(n - 1):\n lt = len(chunked_data[i])\n labels_list = []\n test_list = []\n\n for j in range(lt):\n # separate test data from its label\n labels_list.append(chunked_data[i][j][1])\n test_list.append(chunked_data[i][j][0])\n #classifier is nn_classifier, method passed as param\n labels = classifier(training, test_list)\n\n #compared the label with the one from classifier\n for t in range(len(labels)):\n if labels[t] == labels_list[t]:\n score += 1\n # return performance of classifier\n return score / len(data)", "title": "" }, { "docid": "400d8c8b9042bb42d3de1ad1494b24cf", "score": "0.5744106", "text": "def predict(self, feature_matrix):\r\n classifications = []\r\n for propability in self.predict_probability(feature_matrix):\r\n if propability > self.cut_off:\r\n classifications.append(1)\r\n else:\r\n classifications.append(0)\r\n return classifications", "title": "" }, { "docid": "7226a09c09ad1e3e450129fcd34d40da", "score": "0.5730646", "text": "def predict(self, cls, point, examples):\n value = point[self.name]\n if cls not in self.probs:\n self.probs[cls] = dict()\n if value in self.probs[cls]:\n return self.probs[cls][value]\n else:\n examples = examples[self.name]\n self.probs[cls][value] = self._prob(point[self.name], examples.value_counts(), len(examples))\n return self.probs[cls][value]", "title": "" }, { "docid": "cb6e42dc00c6b20245025a3aacc2d513", "score": "0.5726601", "text": "def predict_probability(*args, **kwargs):\n return _algorithms.svm_dense_predict_probability(*args, **kwargs)", "title": "" }, { "docid": "289f021bc06b62ddd06a3a349305f0fd", "score": "0.5713281", "text": "def classify(self, document, discrete=True):\n v = self._vector(document)[1]\n i = self._iteration or 1\n i = float(i)\n p = defaultdict(float)\n for type, w in self._weight.iteritems():\n #p[type] = sum(w[f][0] for f in v if f in w) # Without averaging.\n s = 0\n for f in v:\n if f in w:\n w0, w1, j = w[f]\n s += ((i-j) * w0 + w1) / i\n p[type] = s\n # Normalize probability estimates.\n m = min(chain(p.itervalues(), (0,)))\n s = sum(x-m for x in p.itervalues()) or 1\n for type in p:\n p[type] -= m\n p[type] /= s\n if not discrete:\n return p\n try:\n # Ties are broken in favor of the majority class\n # (random winner for majority ties).\n m = max(p.itervalues())\n p = sorted((self._classes[type], type) for type, w in p.iteritems() if w == m > 0)\n p = [type for frequency, type in p if frequency == p[0][0]]\n return choice(p)\n except:\n return self.baseline", "title": "" }, { "docid": "a1b91b7bfc69fe5737dc4d0594d44e2e", "score": "0.5709207", "text": "def __init__(self, pred_class, pred_class_count, total_count): \n self.pred_class = pred_class\n self.pred_class_count = pred_class_count\n self.total_count = total_count\n self.prob = pred_class_count / total_count # probability of having the class label", "title": "" }, { "docid": "53f242ef2b343e378dbcfa0ade0b4655", "score": "0.5703653", "text": "def cprob(self, f, cat):\n clf = self.fprob(f, cat)\n if clf == 0: return 0.0 # else testOneCategory() fails\n freqsum = sum([self.fprob(f, c) for c in self.categories()])\n p = clf/freqsum\n return p", "title": "" }, { "docid": "b95793b24ab5a41a5e8357914ec8581f", "score": "0.56769687", "text": "def Naive_Bayes_classifier(headline, word_list, training_set, training_label, m, p):\n #headline = headline.split()\n \n #calculate P(fake) and P(real)\n n = len(train_real) + len(train_fake)\n count_real = len(train_real)\n count_fake = len(train_fake)\n prob_fake = len(train_fake) / float(n)\n prob_real = 1.0 - prob_fake\n \n prob_word_real = []\n prob_word_fake = [] \n for i in word_list.keys():\n #P(word_i|real)\n P_word_i_real = (word_list[i][0]+m*p)/float(count_real + 1)\n #P(word_i|fake)\n P_word_i_fake = (word_list[i][1]+m*p)/float(count_fake + 1)\n \n if i in headline:\n prob_word_real.append(P_word_i_real)\n prob_word_fake.append(P_word_i_fake) \n elif i not in headline:\n prob_word_real.append(1. - P_word_i_real)\n prob_word_fake.append(1. - P_word_i_fake) \n \n #conditional independence is assumed by Naive Bayes\n #do multiplication to get P(words|real) and P(words|fake)\n multi_real = 0\n for p in prob_word_real:\n multi_real += math.log(p)\n multi_real = math.exp(multi_real)\n \n multi_fake = 0\n for p in prob_word_fake:\n multi_fake += math.log(p)\n multi_fake = math.exp(multi_fake)\n \n #compute P(class)*P(words|class)\n prob_real_words = prob_real * multi_real\n prob_fake_words = prob_fake * multi_fake\n \n #compute P(class)*(1 - P(words|class)) for part 3\n prob_real_not_words = prob_real * (1. - multi_real)\n prob_fake_not_words = prob_fake * (1. - multi_fake) \n \n #probability that the given headline is fake, P(fake|words)\n prob = prob_fake_words/ (prob_fake_words + prob_real_words)\n \n #probability that the headline is fake when the word absence, P(fake|~words), for part 3\n prob_absence = prob_fake_not_words/ (prob_fake_not_words + prob_real_not_words) \n \n result = \"real\"\n if prob > 0.5:\n result = \"fake\"\n \n return result, prob, prob_absence", "title": "" }, { "docid": "78c2f9405143d8564f0cdf7334a50859", "score": "0.5674112", "text": "def classifier(point):\n kernel_v = np.array([kernel_func(point, v) \\\n for v in support_vectors])\n predict_class = sum(alpha_y_nz*kernel_v)\n if predict_class>0:\n return 1\n else:\n return 0", "title": "" }, { "docid": "581d7009f10ab1d61f3b55649aa91631", "score": "0.5670493", "text": "def classify(self, instances):\n\n priors = [0.0] * len(self.estimators)\n probabilities = [[0.0 for x in range(len(priors))] for y in range(len(instances))]\n\n # For all class labels estimate the prior of the class\n for i in range(len(self.estimators)):\n priors[i] = len(self.estimators[i].getSamples())\n\n # Now we iterate over all instances to determine the kernel densities\n for i in range(len(instances)):\n densities = [0] * len(self.estimators)\n\n # Then estimate the kernel density based on the stored instances with that label\n # TODO: variable kernel, incorporate bandwidth!\n for j in range(len(self.estimators)):\n densities[j] = self.estimators[j].estimateDensity(instances[i], self.bandwidths[j])\n # Compute probabilities as product of estimated prior and density\n for j in range(len(densities)):\n probabilities[i][j] = priors[j]*densities[j]\n\n # Normalize probabilities\n if(sum(probabilities[i]) == 0):\n # Choose class with highest prior\n probabilities[i][np.argmax(priors)] = 1.0\n else:\n probabilities[i] /= sum(probabilities[i])\n\n return probabilities", "title": "" }, { "docid": "c41e9f48666405dda26fc24296745627", "score": "0.56640023", "text": "def calculate_accuracies(X_train, X_test, y_train, y_test,\r\n classifier, num_features=None):\r\n X_train, y_train = X_train, y_train\r\n X_test, y_test = X_test, y_test\r\n pipe = Pipeline([('vectorizer', TfidfVectorizer(max_features=num_features)),\r\n ('classifier', classifier)])\r\n pipe.fit(X_train, y_train)\r\n y_pred = pipe.predict(X_test)\r\n y_pred_train = pipe.predict(X_train)\r\n return accuracy_score(y_pred, y_test), accuracy_score(y_pred_train,\r\n y_train)", "title": "" }, { "docid": "06da4f3891a26ec39c0070ffb55c2699", "score": "0.5661928", "text": "def predict(algo):\n\n X_train, X_test = vectorize.ngrams_vectorize(train_sentences=train_passages,\n test_sentences=test_passages,\n ngram_range=NGRAMS,\n max_features=MAX_FEATURES)\n\n clf = algo.fit(X_train, Y_train)\n assert clf.classes_.tolist()[0] == 'fic' # make sure that the class ordering is ['fic' 'non']\n preds_with_probs = clf.predict_proba(X_test) # for AUROC & AUPRC\n\n print(\"Train: {} & {} | Test: {}\".format(X_train.shape, Y_train.shape, X_test.shape))\n print(\"Ordering:\", clf.classes_)\n print(\"Y test shape:\", preds_with_probs.shape)\n\n return preds_with_probs", "title": "" }, { "docid": "30b5c1154a113024c63eb05c5fd02d90", "score": "0.5658066", "text": "def _classify(self, document, probability=False):\n if self._model is None:\n return None\n M = self._model[0]\n H1 = self._model[1]\n H2 = self._model[2]\n H3 = self._model[3]\n n = len(H1)\n v = self._vector(document)[1]\n v = dict(map(lambda (i, k): (H1.get(k, n+i+1), v[k]), enumerate(v)))\n # For linear SVC, use LIBLINEAR which is 10x faster.\n # For kernel SVC, use LIBSVM.\n if self.extension == LIBLINEAR:\n f = self._svm.liblinearutil.predict\n o = \"-b 0 -q\"\n else:\n f = self._svm.libsvmutil.svm_predict\n o = \"-b %s -q\" % int(probability)\n p = f([0], [v], M, o)\n # Note: LIBLINEAR does not currently support probabilities for classification.\n if self._type == CLASSIFICATION and probability is True and self.extension == LIBLINEAR:\n return {}\n if self._type == CLASSIFICATION and probability is True:\n return defaultdict(float, ((H3[i], w) for i, w in enumerate(p[2][0])))\n if self._type == CLASSIFICATION:\n return H3.get(int(p[0][0]))\n if self._type == REGRESSION:\n return p[0][0]\n if self._type == DETECTION:\n return p[0][0] > 0 # -1 = outlier => return False\n return p[0][0]", "title": "" }, { "docid": "6198bb977c74adcc47a3968702fea1d2", "score": "0.56548846", "text": "def n_validator(data, p, classifier, *args):\n\n # if args aren't passed k = 1\n k = 1\n dist_type = 1\n if args:\n k = args[0]\n\n # if second args is passed\n if len(args) > 1:\n dist_type = args[1]\n \"\"\"if a 3er args and its type is array\n is passed merge the two synthetic data\"\"\"\n if len(args) > 2 and isinstance(args[2], list):\n data = data + args[2]\n\n # randomized the data\n random.shuffle(data)\n # split the data in p groups\n chunked_data = chunks(data, p)\n\n n = len(chunked_data)\n training = chunked_data[-1] # p-1 part of data is training data\n score = 0\n for i in range(n - 1):\n lt = len(chunked_data[i])\n labels_list = []\n test_list = []\n\n for j in range(lt):\n # separate test data from its label\n labels_list.append(chunked_data[i][j][1])\n test_list.append(chunked_data[i][j][0])\n # classifier is knn_classifier, method passed as param\n labels = classifier(training, test_list, k, dist_type)\n\n # compared the label with the one from classifier\n for t in range(len(labels)):\n if labels[t] == labels_list[t]:\n score += 1\n # return performance of classifier\n return score / len(data)", "title": "" }, { "docid": "9458ddc564a1ec6563d8466014fe899d", "score": "0.56402564", "text": "def classify(data, labels, discrete_features, continuous_features, alpha):\n\t# compute total number of projects labelled \"needs work\"\n\tnum_needsWork = num_positive(labels)\n\t# obtain all the necessary probability distributions\n\tdis_pos, dis_neg, cont_pos, cont_neg = compute_distributions(data, labels, \n\t\t\t\t\t\t\t\t\t\t\t\tdiscrete_features, continuous_features)\n\t# initialize numpy array to store predicted labels\n\tpredicted_labels = np.zeros(len(labels), dtype=int)\n\n\t# iterate over rows in data set\n\tfor i in range(len(labels)):\n\t\t# compute number of training instances labelled positive (needs work)\n\t\tif labels[i]==1: num_pos = num_needsWork-1\n\t\telse: num_pos = num_needsWork\n\n\t\t# compute number of training instances labelled negative (not needs work)\n\t\tnum_neg = len(labels)-1-num_pos\n\n\t\t# compute unconditional probability of positive class (needs work)\n\t\tP_needsWork = num_pos/(len(labels)-1)\n\n\t\t# initialize scores for positive class (needs work) and negative class (not needs work)\n\t\tscore_pos, score_neg = math.log(P_needsWork), math.log(1-P_needsWork)\n\t\t\n\t\t# for each discrete feature\n\t\tfor j in range(len(discrete_features)):\n\t\t\t# access feature value\n\t\t\tfeature_value = data[discrete_features[j]][i]\n\t\t\t# obtain conditional count for that feature value given positive\n\t\t\tcount_pos = dis_pos[j].get(feature_value, 0) \n\t\t\t# if test case had true label positive, subtract 1 from this count\n\t\t\tif labels[i]==1: count_pos -= 1 \n\t\t\t# find conditional probability, applying Laplace smoothing\n\t\t\tprob_pos = (count_pos + alpha)/(num_pos + alpha*len(dis_pos[j]))\n\t\t\t# update score for positive class\n\t\t\tscore_pos += math.log(prob_pos)\n\t\t\t# obtain conditional count for feature value given negative\n\t\t\tcount_neg = dis_neg[j].get(feature_value, 0)\n\t\t\t# if test case had true label negative, subtract 1 from this count\n\t\t\tif labels[i]==-1: count_neg -= 1 \n\t\t\t# find conditional probability, applying Laplace smoothing\n\t\t\tprob_neg = (count_neg + alpha)/(num_neg + alpha*len(dis_neg[j]))\n\t\t\t# update score for negative class\n\t\t\tscore_neg += math.log(prob_neg)\n\n\t\t# for each continuous feature\n\t\tfor k in range(len(continuous_features)):\n\t\t\t# access feature value\n\t\t\tfeature_value = data[continuous_features[k]][i]\n\t\t\t# obtain conditional probability for feature value given positive\n\t\t\tprob_pos = cont_pos[k](feature_value)\n\t\t\t# update score for positive class\n\t\t\tscore_pos += math.log(prob_pos)\n\t\t\t# obtain conditional probability for feature value given negative\n\t\t\tprob_neg = cont_neg[k](feature_value)\n\t\t\t# update score for negative class\n\t\t\tscore_neg += math.log(prob_neg)\n\t\t# classify current row\n\t\tif score_pos > score_neg: predicted_labels[i] = 1\n\t\telse: predicted_labels[i] = -1\n\n\t# return vector of predicted labels\n\treturn(predicted_labels)", "title": "" }, { "docid": "b2dc41e1b63211fa19eb9cf38c8b1fca", "score": "0.56321377", "text": "def test_generate_classifier(templates_path_pattern, persist_clf_filename):\n global HYPERPARAMS\n clf, X_test, y_test = generate_classifier(templates_path_pattern, hyperparams=HYPERPARAMS)\n print('Test Accuracy of SVC = ', round(clf.score(X_test, y_test), 4))\n n_predict = 10\n print('My SVC predicts: ', clf.predict(X_test[0:n_predict]))\n print('For these', n_predict, 'labels: ', y_test[0:n_predict])\n if persist_clf_filename is not None:\n persist_classifier(clf, X_test, y_test, pickle_file=persist_clf_filename)", "title": "" }, { "docid": "ca7b8dc0ae7e1f130cb05d3dad94749a", "score": "0.56172806", "text": "def evaluate(filename, repeat=1, train_percent=.7):\n total = 0\n correct = 0\n \n for i in range(repeat):\n data = load_data(filename, num_train_per_cat)\n \n classifier = ParagraphClassifier(training_data, feature_extractor)\n \n for (data, bio_type) in test_data:\n total += 1\n if classifier.classify(data) == bio_type:\n correct += 1\n\n print \"Paragraph classifier on all data, results over \" + str(repeat) + \" runs: \" + str(float(correct)/total)\n\n classifier.classifier.show_most_informative_features(50)\n return classifier", "title": "" }, { "docid": "0cff0535ced2fcbf3f8caae6a6e1c15e", "score": "0.5617273", "text": "def score(self, feature_values):\n fv_vector = vectorize_values(feature_values)\n scaled_fv_vector = self.apply_scaling(fv_vector)\n\n prediction = self.estimator.predict([scaled_fv_vector])[0]\n labels = self.estimator.classes_\n probas = self.estimator.predict_proba([scaled_fv_vector])[0]\n probability = {label: proba for label, proba in zip(labels, probas)}\n\n doc = {'prediction': prediction, 'probability': probability}\n return util.normalize_json(doc)", "title": "" }, { "docid": "8c809498a68eb1cb05dde0d9819959e0", "score": "0.5604324", "text": "def calc_cond_prob(self):\n for key in self.class_labels:\n denominator = self.class_labels[key] + 2*config.COND_PROB_DELTA\n for feature in config.FEATURE_DICT:\n numerator = config.COND_PROB_DELTA + config.FEATURE_DICT[feature][key]\n if feature not in self.cond_probs:\n self.cond_probs[feature] = {}\n self.cond_probs[feature][key] = numerator/denominator", "title": "" }, { "docid": "368d346da4ebf3646c4694348e12f926", "score": "0.5603966", "text": "def compute_score(train, test):\n return sum([classify(sample, train) == sample[4] for sample in test]) / len(test)", "title": "" }, { "docid": "2a9a25934594045c684eca4608a715c4", "score": "0.56034255", "text": "def predict_knn(classifier, X_test):\n\n pred_classes = classifier.predict(X_test)\n pred_probs = classifier.predict_proba(X_test)\n\n return pred_classes, pred_probs", "title": "" }, { "docid": "e9129226f50ffdeef2d4096665f9f82c", "score": "0.5603181", "text": "def feature_probability(self, feature, label):\n total = self.features.get(feature, 0)\n if total:\n return Decimal(self.feature_counts.get(feature, {}).get(label, 0))/total\n\n return total", "title": "" }, { "docid": "438ac9a8928da99882b42b34d9a40fde", "score": "0.5600129", "text": "def calculateProbabilityDensityFunc(self):\n\n trainErrorSetosa = 0\n trainErrorVirginica = 0\n trainErrorVersicolor = 0\n\n testErrorSetosa = 0\n testErrorVirginica = 0\n testErrorVersicolor = 0\n\n i = 1/(((2*pi)**(self.numOfFeatures/2))*sqrt(linalg.det(self.sigma)))\n z = linalg.inv(self.sigma)\n\n iTwo = 1/(((2*pi)**(self.numOfFeatures/2))*sqrt(linalg.det(self.sigmaTwo)))\n zTwo = linalg.inv(self.sigmaTwo)\n\n iThree = 1/(((2*pi)**(self.numOfFeatures/2))*sqrt(linalg.det(self.sigmaThree)))\n zThree = linalg.inv(self.sigmaThree)\n\n (m, n) = self.testData.shape\n for x in range(m):\n diff = subtract(self.testData[x], self.muOne) # This will give us a 4x1\n diff = diff.reshape((self.numOfFeatures, 1))\n diffTransposed = transpose(diff)\n\n k = exp(-0.5 * dot(dot(diffTransposed, z), diff))\n probSetosa = i * k\n\n diffTwo = subtract(self.testData[x], self.muTwo) # This will give us a 4x1\n diffTwo = diffTwo.reshape((self.numOfFeatures, 1))\n diffTwoTransposed = transpose(diffTwo)\n\n k = exp(-0.5 * dot(dot(diffTwoTransposed, zTwo), diffTwo))\n probVersicolor = iTwo * k\n\n diffThree = subtract(self.testData[x], self.muThree) # This will give us a 4x1\n diffThree = diffThree.reshape((self.numOfFeatures, 1))\n diffThreeTransposed = transpose(diffThree)\n\n k = exp(-0.5 * dot(dot(diffThreeTransposed, zThree), diffThree))\n probVirginica = iThree * k\n\n maxVal = max([probSetosa[0], probVersicolor[0], probVirginica[0]])\n\n answer = \"\"\n if maxVal == probSetosa:\n answer = \"Iris-setosa\"\n elif maxVal == probVersicolor:\n answer = \"Iris-versicolor\"\n elif maxVal == probVirginica:\n answer = \"Iris-virginica\"\n trueVal = self.data[\"class\"][x].decode(\"utf-8\")\n if answer != trueVal:\n if trueVal == \"Iris-setosa\":\n if x >= 40 and x < 50:\n testErrorSetosa += 1\n else:\n trainErrorSetosa += 1\n elif trueVal == \"Iris-versicolor\":\n if x >= 90 and x < 100:\n testErrorVersicolor += 1\n else:\n trainErrorVersicolor += 1\n elif trueVal == \"Iris-virginica\":\n if x >= 140 and x < 150:\n testErrorVirginica += 1\n else:\n trainErrorVirginica += 1\n\n trainError = (trainErrorVirginica + trainErrorVersicolor + trainErrorSetosa) / 120\n testError = (testErrorSetosa + testErrorVersicolor + testErrorVirginica) / 30\n\n testErrorSetosa /= 10\n testErrorVirginica /= 10\n testErrorVersicolor /= 10\n\n trainErrorSetosa /= 40\n trainErrorVirginica /= 40\n trainErrorVersicolor /= 40\n print(\"The test error rate was {}\".format(testError))\n print(\"The training error rate was {}\".format(trainError))\n\n print(\"The Setosa test error rate was {}\".format(testErrorSetosa))\n print(\"The Versicolor test error rate was {}\".format(testErrorVersicolor))\n print(\"The Virginica test error rate was {}\".format(testErrorVirginica))\n\n print(\"The Setosa training error rate was {}\".format(trainErrorSetosa))\n print(\"The Versicolor training error rate was {}\".format(trainErrorVersicolor))\n print(\"The Virginica training error rate was {}\".format(trainErrorVirginica))", "title": "" }, { "docid": "6e9595d14ac8cdedb47b916dac4887fd", "score": "0.5560794", "text": "def train(self, corpus): \n\n for sentence in corpus.corpus:\n last_token = None\n for datum in sentence.data:\n token = datum.word\n if not self.counts[token]:\n self.v += 1\n self.counts[token] += 1\n if last_token:\n self.counts[(last_token, token)] += 1\n last_token = token\n\n for sentence in corpus.corpus:\n last_token = None\n for datum in sentence.data:\n token = datum.word\n if last_token:\n self.probs[(last_token, token)] = 1.0 * (self.counts[(last_token, token)] + 1) / (self.counts[last_token] + self.v)\n last_token = token", "title": "" }, { "docid": "8132db5ee03171a9ace1ca3d49cf69cf", "score": "0.55428374", "text": "def classify(self, document, discrete=True):\n # Given red & round, what is the likelihood that it is an apple?\n # p = p(red|apple) * p(round|apple) * p(apple) / (p(red) * p(round))\n # The multiplication can cause underflow so we use log() instead.\n # For unknown features, we smoothen with an alpha value.\n v = self._vector(document)[1]\n m = self._method\n a = self._alpha\n n = self._classes.itervalues()\n n = float(sum(n))\n p = defaultdict(float)\n for type in self._classes:\n if m == MULTINOMIAL:\n if not type in self._cache: # 10x faster\n self._cache[type] = float(sum(self._likelihood[type].itervalues()))\n d = self._cache[type]\n if m == BINOMIAL \\\n or m == BERNOUILLI:\n d = float(self._classes[type])\n L = self._likelihood[type]\n g = sum(log((L[f] if f in L else a) / d) for f in v)\n g = exp(g) * self._classes[type] / n # prior\n p[type] = g\n # Normalize probability estimates.\n s = sum(p.itervalues()) or 1\n for type in p:\n p[type] /= s\n if not discrete:\n return p\n try:\n # Ties are broken in favor of the majority class\n # (random winner for majority ties).\n m = max(p.itervalues())\n p = sorted((self._classes[type], type) for type, g in p.iteritems() if g == m > 0)\n p = [type for frequency, type in p if frequency == p[0][0]]\n return choice(p)\n except:\n return self.baseline", "title": "" }, { "docid": "20425f88f95749ba95750439cbdfff12", "score": "0.5542399", "text": "def predict_probability(self, estimator, x_tr, weight):\n sample_distribution = []\n for (i, tree_in_rf) in enumerate(estimator.estimators_):\n tr_samples_class_dist = tree_in_rf.predict_proba(x_tr) * weight[i]\n sample_distribution.append(tr_samples_class_dist)\n\n sample_distribution = np.asarray(sample_distribution)\n\n return np.sum(sample_distribution, axis=0)", "title": "" }, { "docid": "1610dac215487016b2c510dbd91adf32", "score": "0.5533235", "text": "def fit_predict(X_train, label_nums, test_data, classes_name, vectorizer):\n clf = MLPClassifier(verbose=True, early_stopping=True, activation='tanh', learning_rate='adaptive')\n clf.fit(X_train, label_nums)\n pred = clf.predict(vectorizer.transform(test_data))\n generate_results(pred, classes_name)\n return pred", "title": "" }, { "docid": "dfe33ed5fed89d18f2253588947d74c8", "score": "0.553313", "text": "def classify(self, document, discrete=True):\n # Distance is calculated between the document vector and all training instances.\n # This will make KNN slow in higher dimensions.\n classes = {}\n v1 = self._vector(document)[1]\n D = ((distance(v1, v2, method=self.distance), type) for type, v2 in self._vectors)\n D = ((d, type) for d, type in D if d < 1) # Nothing in common if distance=1.0.\n D = heapq.nsmallest(self.k, D) # k-least distant.\n # Normalize probability estimates.\n s = sum(1 - d for d, type in D) or 1\n p = defaultdict(float)\n for d, type in D:\n p[type] += (1 - d) / s\n if not discrete:\n return p\n try:\n # Ties are broken in favor of the majority class\n # (random winner for majority ties).\n m = max(p.itervalues())\n p = sorted((self._classes[type], type) for type, w in p.iteritems() if w == m > 0)\n p = [type for frequency, type in p if frequency == p[0][0]]\n return choice(p)\n except:\n return self.baseline", "title": "" }, { "docid": "195ae6ab0f3e031c15c1b349bdad2fb1", "score": "0.55322266", "text": "def svcClassifier(self):\r\n name = 'SVC'\r\n svc = SVC()\r\n svc.fit(self.X_train, self.y_train)\r\n score = svc.score(self.X_train, self.y_train)\r\n\r\n print(\"***** Accuracy Score ******* \\n\")\r\n print(\"Score: \", score)\r\n\r\n ## Cross-validaton using 10-fold cross validation\r\n cv_scores = cross_val_score(svc, self.X_train, self.y_train, cv=10)\r\n print(\"Cross-Validation average score: %.2f \\n\" % cv_scores.mean())\r\n\r\n ypredicted = svc.predict(self.X_test)\r\n\r\n cm = confusion_matrix(self.y_test, ypredicted)\r\n print(\"***** Confusion Matrix ******* \\n\")\r\n print(cm)\r\n\r\n # Save the model\r\n self.saveModel(svc, name)\r\n \r\n # Append value to the results\r\n self.results.append((name, cv_scores.mean()))\r\n \r\n return cv_scores", "title": "" }, { "docid": "7756fcd46151bd2dc30f161704ae759f", "score": "0.5531835", "text": "def predict_proba(self, test_data):\r\n\r\n try:\r\n getattr(self, \"model\")\r\n except AttributeError:\r\n raise RuntimeError(\"Please train your classifier/model\")\r\n\r\n obj_mu=self.model.get('obj_mu')\r\n obj_sigma=self.model.get('obj_sigma')\r\n\r\n bkg_mu=self.model.get('bkg_mu')\r\n bkg_sigma=self.model.get('bkg_sigma')\r\n\r\n obj_prob=np.zeros(len(test_data[:,0]))\r\n bkg_prob=np.zeros(len(test_data[:,0]))\r\n\r\n for n in range(len(test_data[:,0])): # n data samples\r\n for m in range(len(test_data[0,:])): # m features\r\n # print (normpdf(test_data[n,m], obj_mu[m], obj_sigma[m]))\r\n obj_prob[n]=obj_prob[n]+math.log(self.normpdf(test_data[n,m], obj_mu[m], obj_sigma[m])+10e-10)\r\n bkg_prob[n]=bkg_prob[n]+math.log(self.normpdf(test_data[n,m], bkg_mu[m], bkg_sigma[m])+10e-10)\r\n\r\n # calculate the log-likelihood ratio and normalize to [0 1] probabilities\r\n prob=obj_prob-bkg_prob\r\n prob=(prob-prob.min())/(prob.max()-prob.min())\r\n\r\n # for two classes\r\n prob_k=[]\r\n for i in range(len(prob)):\r\n prob_k.append([1-prob[i], prob[i]])\r\n return np.array(prob_k)", "title": "" }, { "docid": "da70da2a4a87c84c155ab0a5e3bc3d0e", "score": "0.55207586", "text": "def accuracy_on_data_set(self, dataset):\n tp_or_fn = 0.0\n fp_or_tn = 0.0\n for example, tag in dataset:\n pred = self.predict_on_single_example(example)\n if pred == tag:\n tp_or_fn += 1\n else:\n fp_or_tn += 1\n return tp_or_fn / (tp_or_fn + fp_or_tn)", "title": "" }, { "docid": "6022f8892c67757ca24fc44e13922164", "score": "0.5519891", "text": "def teach_all_frequency(classifier):\n N = classifier.input_dimension\n positive_sample_count = 1000\n X = np.random.randn(2 * positive_sample_count, N)\n Y = np.random.randint(low=0, high=1, size=[2 * positive_sample_count])\n scores = []\n base_labels = ['cos_', 'sin_']\n for base_label in base_labels:\n for f in range(N):\n label = base_label + str(f)\n wave = np.sin([f * i * (2 * math.pi) / N for i in range(N)])\n X[:positive_sample_count, :] = np.array([wave, ] * positive_sample_count)\n Y[:positive_sample_count] = np.ones(shape=positive_sample_count)\n x_train, x_test, y_train, y_test = train_test_split(X, Y)\n classifier.fit(x_train, y_train, label)\n scores.append(classifier.score(x_test, y_test, label))\n return np.mean(scores)", "title": "" }, { "docid": "4e59fc88ac51c000a88c31b72f40d195", "score": "0.55097884", "text": "def evaluate_classifier_accuracy(self):\n correct = 0.0\n total = 0.0\n\n with open(\"data/SemEval/test2.txt\") as tsv:\n for tweet in csv.reader(tsv, dialect=\"excel-tab\"):\n self.tokenize_and_update_model(tweet[1], tweet[0])\n content = tweet[1]\n bow = self.tokenize_doc(content)\n label = tweet[0]\n if self.classify(bow) == label:\n correct += 1.0\n total += 1.0\n return 100 * correct / total", "title": "" }, { "docid": "dcfcfc8383b159ab986e3a7a597563d9", "score": "0.55000794", "text": "def probability(*args, **kwargs):", "title": "" }, { "docid": "3ce1dd5d5b783c81b185630028a4d16a", "score": "0.5499486", "text": "def naive_bayes(train_label, train_word_dict, test_words, m, p_hat):\n real_count = train_label.count(1)\n fake_count = train_label.count(0)\n total_count = len(train_label)\n\n # Get priors\n p_real = float(real_count) / float(total_count)\n p_fake = float(fake_count) / float(total_count)\n real_probs, fake_probs = [], []\n\n for word, word_count in train_word_dict.iteritems():\n if word in test_words:\n real_probs.append((float(word_count[0]) + m * p_hat) / float(real_count + m))\n fake_probs.append((float(word_count[1]) + m * p_hat) / float(fake_count + m))\n else:\n real_probs.append(1. - (float(word_count[0]) + m * p_hat) / float(real_count + m))\n fake_probs.append(1. - (float(word_count[1]) + m * p_hat) / float(fake_count + m))\n\n # Get the likelihoods and calculate the probability of test being real and fake\n p_real_likelihood = small_product(real_probs)\n p_real_prob = p_real_likelihood * p_real\n\n p_fake_likelihood = small_product(fake_probs)\n p_fake_prob = p_fake_likelihood * p_fake\n\n\n # prediction = np.argmax([p_real_prob, p_fake_prob])\n # print \"prediction:\", prediction\n if p_real_prob >= p_fake_prob:\n return 1\n else:\n return 0", "title": "" }, { "docid": "9bc94eda4dd798ba4ec3a631bf2d3099", "score": "0.54976195", "text": "def _predict_proba(self, X):\n dists = np.zeros((X.shape[0], self.n_classes_))\n\n # Call predict proba on each classifier, multiply the probabilities by the\n # classifiers weight then add them to the current HC2 probabilities\n dists = np.add(\n dists,\n self._stc.predict_proba(X) * (np.ones(self.n_classes_) * self.stc_weight_),\n )\n dists = np.add(\n dists,\n self._drcif.predict_proba(X)\n * (np.ones(self.n_classes_) * self.drcif_weight_),\n )\n dists = np.add(\n dists,\n self._arsenal.predict_proba(X)\n * (np.ones(self.n_classes_) * self.arsenal_weight_),\n )\n dists = np.add(\n dists,\n self._tde.predict_proba(X) * (np.ones(self.n_classes_) * self.tde_weight_),\n )\n\n # Make each instances probability array sum to 1 and return\n return dists / dists.sum(axis=1, keepdims=True)", "title": "" }, { "docid": "dcf9c035d27f1f816bcbbcbfa03fe8e9", "score": "0.5491038", "text": "def test_separable_100perc():\n\n separable_ds = make_fully_separable_classes(max_class_size=100,\n max_dim=np.random.randint(2,\n max_dim))\n separable_ds.description = 'fully_separable_dataset'\n out_path_sep = os.path.join(out_dir, 'two_separable_classes.pkl')\n out_dir_sep = os.path.join(out_dir, 'fully_separable_test')\n os.makedirs(out_dir_sep, exist_ok=True)\n separable_ds.save(out_path_sep)\n\n nrep = 10\n gsl = 'none' # to speed up the process\n for clf_name in cfg.classifier_choices:\n for fs_name in cfg.all_dim_red_methods:\n\n cli_str = 'neuropredict -y {} -t {} -n {} -c {} -g {} -o {} -e {} -dr ' \\\n '{}' \\\n ''.format(out_path_sep, train_perc, nrep, 1, gsl, out_dir_sep,\n clf_name, fs_name)\n sys.argv = shlex.split(cli_str)\n cli()\n\n cv_results = neuropredict.reports.load_results_from_folder(out_dir_sep)\n for sg, result in cv_results.items():\n raise_if_mean_differs_from(result['accuracy_balanced'],\n result['target_sizes'],\n reference_level=1.0,\n # comparing to perfect\n eps_chance_acc=0.5,\n method_descr='{} {}'.format(fs_name,\n clf_name))", "title": "" }, { "docid": "ac34b7e589eb620b50a778efe1736150", "score": "0.5487508", "text": "def test_naive_bayes(self, test_set, classes, class_probabilities, class_feature_probs):\n\n print('[ INFO ]: Testing with Naive Bayes Classifier...')\n\n class_results = {}\n scores = {}\n true_samples_total = 0\n n_samples = 0\n\n for cl in classes:\n\n # Create new column for class predictions\n feature_set = test_set.drop(classes, axis=1)\n feature_set['pred_class'] = 0\n true_class = test_set[cl]\n\n for row in range(len(feature_set)):\n\n # Initialize probability sums for each class\n true_probs_sum = 1\n false_probs_sum = 1\n true_conditional_prob_sum = 1\n false_conditional_prob_sum = 1\n\n for col in feature_set.columns:\n\n if col != 'pred_class':\n\n # Calculate probabilities assuming the class is present or 1\n if feature_set[col].iloc[row] == 1:\n\n # Compute conditional feature probabilities based on\n # wether or not the feature is present (1 or 0)\n true_prob = class_feature_probs[cl][0].get(col)\n false_prob = 1 - class_feature_probs[cl][1].get(col)\n\n else:\n\n # Calculate probabilities assuming the class is not present or 0\n true_prob = 1 - class_feature_probs[cl][0].get(col)\n false_prob = class_feature_probs[cl][1].get(col)\n\n # Multiply all feature probabilities together for each record\n true_probs_sum = true_probs_sum * true_prob\n false_probs_sum = false_probs_sum * false_prob\n\n # Multiply class conditional probabilities by conditional feature probabilities\n true_conditional_prob_sum = class_probabilities[cl] * true_probs_sum\n false_conditional_prob_sum = (1 - class_probabilities[cl]) * false_probs_sum\n\n # Determine which probability is highest - highest one is selected as the prediction value\n if true_conditional_prob_sum > false_conditional_prob_sum:\n feature_set['pred_class'].iloc[row] = 1\n\n # Place the results into a data frame for comparison\n results = pd.concat([feature_set['pred_class'], true_class], axis=1)\n results.columns = ['pred_class', 'true_class']\n class_results[cl] = results\n\n # Calculate the number of TP, TN, FP, FN\n true_positives = len(results.loc[(results['true_class'] == 1) & (results['pred_class'] == 1)])\n true_negatives = len(results.loc[(results['true_class'] == 0) & (results['pred_class'] == 0)])\n false_positives = len(results.loc[(results['true_class'] == 0) & (results['pred_class'] == 1)])\n false_negatives = len(results.loc[(results['true_class'] == 1) & (results['pred_class'] == 0)])\n\n scores[cl] = {\n 'TP' : true_positives,\n 'TN' : true_negatives,\n 'FP' : false_positives,\n 'FN' : false_negatives\n }\n\n true_samples_total = true_samples_total + true_positives + true_negatives\n n_samples = n_samples + len(results)\n\n classification_accuracy = true_samples_total / n_samples\n\n return class_results, scores, classification_accuracy", "title": "" }, { "docid": "266b5a9615ff6cb439920131bac93377", "score": "0.54754114", "text": "def calculateAccuracy(self, classifier, data):\r\n #initialize\r\n accuracy = 0 \r\n \r\n if data.SETS == 2: \r\n #calculate the accuracy\r\n testSamplesCount = data.testX.shape[0]\r\n for sampleIndex in range(testSamplesCount):\r\n label = classifier.predictClass(data.testX[sampleIndex, :])\r\n if label == data.testy[sampleIndex]:\r\n accuracy += 1\r\n accuracy = float(accuracy)/testSamplesCount\r\n print (\"accuracy\", accuracy)", "title": "" }, { "docid": "07a07bf9350de95a31a97ab204c89ccf", "score": "0.5475121", "text": "def evaluate_classifier(classifier, X_validation, y_validation):\n return classifier.score(X_validation, y_validation)", "title": "" }, { "docid": "7ac7789c1631599150ad25cc618b734a", "score": "0.54631627", "text": "def predict (trained_classifier, test_vectors):\n t0 = time.time()\n prediction=trained_classifier.predict(test_vectors)\n t1 = time.time()\n time_predict=t1-t0\n print(\"Prediction Results for \"+str(type(trained_classifier).__name__))\n print(\"Prediction time: %fs\" % (time_predict))\n return prediction", "title": "" }, { "docid": "2573483d8d2ace4dfc9807c81769ff7c", "score": "0.545273", "text": "def calculate_probabilities(words_occurrence_dict):\n total = sum(words_occurrence_dict.values())\n probability_dict = {}\n for word, occurrence in words_occurrence_dict.items():\n prob = occurrence/total\n# print(f'occurrence: {occurrence}, total: {total}, prob: {prob}')\n probability_dict[word] = prob\n return probability_dict", "title": "" }, { "docid": "679b99ac5c3a70755f2c710ea9d00e20", "score": "0.5444537", "text": "def prob_cal(self,price):\n prob=0\n for i in range(self.components):\n # extract the weights and the distribution parameters for each mixed Guassian component\n weight=self.GMM.weights_[i]\n mu=self.GMM.means_[i][0]\n std=np.sqrt(self.GMM.covariances_[i][0][0])\n # calculate the cumulative probability by composing all the Guassian components\n prob=prob+weight*norm.cdf(price,loc=mu,scale=std)\n \n return prob", "title": "" }, { "docid": "ee29484fd54feada76ebbbe85912c0e7", "score": "0.5443583", "text": "def predict_classes(probas, cut):\n N = len(probas)\n res = np.zeros(N)\n for i in range(N):\n if probas[i] < cut:\n res[i] = 0\n else:\n res[i] = 1\n return res", "title": "" }, { "docid": "1c99b44867a0750ded56c3a547ed278f", "score": "0.54432774", "text": "def predict(data):\n ids = [x['id'] for x in data]\n\n # get feats\n X1 = np.concatenate([get_feats1(x) for x in data])\n X2 = np.concatenate([get_feats2(x) for x in data])\n\n # classifiers\n svc1 = joblib.load('pan20/auth/svc1.model')\n rf1 = joblib.load('pan20/auth/rf1.model')\n nb1 = joblib.load('pan20/auth/nb1.model')\n svc2 = joblib.load('pan20/auth/svc2.model')\n rf2 = joblib.load('pan20/auth/rf2.model')\n nb2 = joblib.load('pan20/auth/nb2.model')\n bst = joblib.load('pan20/auth/bst.model')\n\n # preds\n p_svc1 = get_preds(svc1, X1)\n p_rf1 = get_preds(rf1, X1)\n p_nb1 = get_preds(nb1, X1)\n p_svc2 = get_preds(svc2, X2)\n p_rf2 = get_preds(rf2, X2)\n p_nb2 = get_preds(nb2, X2)\n\n # to text\n to_txt(p_svc1, p_rf1, p_nb1, p_svc2, p_rf2, p_nb2)\n\n # xgboost\n dmatrix = xgb.DMatrix(xgb_in_path)\n preds = bst.predict(dmatrix)\n\n # form into expected dictionary and return\n preds = [{'id': ids[i], 'value': float(preds[i])}\n for i in range(len(preds))]\n\n return preds", "title": "" }, { "docid": "b446c7ff4ba0427f516ab2fbfbc753bd", "score": "0.54369456", "text": "def calc_priors(self):\n label_counts = sum(self.class_labels.values())\n denominator = label_counts + len(self.class_labels)*config.CLASS_PRIOR_DELTA\n for key in self.class_labels:\n numerator = config.CLASS_PRIOR_DELTA + self.class_labels[key]\n self.priors[key] = np.math.log(numerator/denominator, 10)", "title": "" }, { "docid": "9f7ffc324186c52ea11fd39989974cc2", "score": "0.5436108", "text": "def train(self, corpus): \n # TODO your code here\n for sentence in corpus.corpus: # iterate over sentences in the corpus\n for datum in sentence.data: # iterate over datums in the sentence\n word = datum.word # get the word\n self.tokens.append(word)\n self.words.add(word)\n\n self.freqs = Counter(self.tokens) # create dict with help of\n\n for key in self.freqs:\n self.probs[key] = (self.freqs[key] + 1) / (len(self.tokens) + len(self.words))\n\n pass", "title": "" }, { "docid": "ca5552cd9d6e65e22b9cea04f1fc8c7a", "score": "0.54291344", "text": "def specificity_metric_per_classes(\n probabilities: np.ndarray,\n truth: np.ndarray,\n eps: float = 1e-9,\n classes: list = ['WT', 'TC', 'ET']) -> np.ndarray:\n scores = {key: list() for key in classes}\n num_classes = probabilities.shape[0]\n predictions = probabilities\n assert (predictions.shape == truth.shape)\n\n for class_ in range(num_classes):\n prediction = predictions[class_]\n truth_ = truth[class_]\n tp = l_and(prediction, truth_).sum()\n tn = l_and(l_not(prediction), l_not(truth_)).sum()\n fp = l_and(prediction, l_not(truth_)).sum()\n if truth_.sum() == 0 and prediction.sum() == 0:\n scores[classes[class_]].append(tn / (tn + fp))\n else:\n scores[classes[class_]].append(tn / (tn + fp))\n\n return scores", "title": "" }, { "docid": "7d769740f8f0bc9cc0c9ecd3843ff808", "score": "0.54272044", "text": "def construct_probability_vector():\n # We count the labels\n counter = Counter()\n for label in self.y:\n counter.update(label)\n\n # We divide the count of each label by the total number of data points in the set\n len_dataset = float(len(self.x))\n probability_vector = [count / len_dataset for count in counter.values()]\n\n return probability_vector", "title": "" }, { "docid": "208e6b16bf1435b5a36663dc38ddd395", "score": "0.5425743", "text": "def cal_cpt_4(train, f_a, f_b, features):\n n = len(features[f_a])\n cpt = train.groupby([f_a, f_b, 'class']).size()\n den = train.groupby([f_b,'class']).size() \n for v_a in features[f_a]:\n for v_b in features[f_b]:\n for label in features['class']:\n if (v_a,v_b,label) in cpt.index:\n cpt.loc[(v_a,v_b,label)] = (cpt.loc[(v_a,v_b,label)]+1)/\\\n (den.loc[v_b,label]+n)\n elif (v_a,v_b,label) not in cpt.index and (v_b,label) in den.index:\n cpt.loc[(v_a,v_b,label)] = 1/(den.loc[v_b,label]+n)\n elif (v_b,label) not in den.index:\n cpt.loc[(v_a,v_b,label)] = 1/n\n else:\n pass\n return cpt", "title": "" }, { "docid": "4445274b7ea2ab0a5eb91d3fee726feb", "score": "0.54226005", "text": "def compute_probabilities():\r\n\tword_tag_probabilities = {} # {(word,tag): p(tag|word)}\r\n\t# calculate the probability for each word-tag pair\r\n\tfor word_tag in word_tag_count:\r\n\t\tword_tag_probabilities[word_tag] = word_tag_count[word_tag] / word_count[word_tag[WORD_INDEX]]\r\n\treturn word_tag_probabilities", "title": "" }, { "docid": "a6ce1fb894b977dd66b27ad46c75e62a", "score": "0.54222006", "text": "def evaluate_multilabel_classifier(model: Model, loss_fn: Callable, dataset: Dataset) -> Dict[str, float]:\n samples = iter(OneShotDataLoader(dataset)).next()\n with torch.no_grad():\n features = Variable(samples['features'])\n labels = Variable(samples['label'])\n model_output = model(features)\n loss = loss_fn(model_output, labels).data.item()\n\n predictions = functional.sigmoid(model_output)\n precision = average_precision_score(labels.data.numpy(), predictions.data.numpy())\n\n return {'loss': loss, 'average_precision_score': precision}", "title": "" }, { "docid": "9a1ca06ccac294102c9dbaead2625a51", "score": "0.5422043", "text": "def classify_multi(classifiers, X_train, y_train, X_test, y_test=None, groups=None):\n \n num_samples = X_test.shape[0]\n num_classes = 9\n num_classifiers = len(classifiers)\n print(num_samples, num_classes, num_classifiers)\n probabilities = np.zeros((num_samples, num_classes, num_classifiers))\n predictions = np.zeros((num_samples, num_classifiers))\n scores = np.zeros(num_classifiers)\n \n for i in range(num_classifiers):\n pred, score, clf, proba = classify(classifiers[i], X_train, y_train, X_test, y_test, groups)\n predictions[:, i] = pred\n scores[i] = score\n probabilities[:, :, i] = proba\n \n return predictions, scores, classifiers, probabilities", "title": "" }, { "docid": "5935d2daf450034d127e3890c274ea8b", "score": "0.54180753", "text": "def predict_prob(self, X_test):\n # res = np.dot(X_test, self.feat_prob.T) * self.posteriori\n res = self.predict(X_test)\n return res / res.sum(axis=1)[:, None]", "title": "" }, { "docid": "95c99810a8f972c4e6a847c3f39fb3a3", "score": "0.54173476", "text": "def predict_probability(self, feature_matrix):\r\n scores = Core.dot_product_matrix_vector(feature_matrix, self.coefficients)\r\n # Compute P(y_i = +1 | x_i, w)\r\n probabilities = [1.0 / (1 + math.exp(-score)) for score in scores]\r\n return probabilities", "title": "" }, { "docid": "3e2b2a52404aedcad93b65006335380b", "score": "0.5416358", "text": "def _probability_for_class(self, x, k):\n p = self._p_y[k] / self._n\n for i, j in enumerate(x):\n p_xj_y = (self._p_x[k][i][j] + 1) / (self._p_y[k] + 2)\n p *= p_xj_y\n return p", "title": "" }, { "docid": "c34923162b35f3714d4842f9be1fee41", "score": "0.54104936", "text": "def sensitivity_metric_per_classes(\n probabilities: np.ndarray,\n truth: np.ndarray,\n eps: float = 1e-9,\n classes: list = ['WT', 'TC', 'ET']) -> np.ndarray:\n scores = {key: list() for key in classes}\n num_classes = probabilities.shape[0]\n predictions = probabilities\n assert (predictions.shape == truth.shape)\n\n for class_ in range(num_classes):\n prediction = predictions[class_]\n truth_ = truth[class_]\n intersection = (truth_ * prediction).sum()\n union = truth_.sum()\n if truth_.sum() == 0 and prediction.sum() == 0:\n scores[classes[class_]].append(np.nan)\n else:\n scores[classes[class_]].append((intersection + eps) / union)\n\n return scores", "title": "" }, { "docid": "1495bf4d29d683ee01294a592713ad47", "score": "0.53951234", "text": "def calculate_prob_scores(test_labels, probabilities, min_label=1):\n results = {}\n\n thres = max(int(0.2*len(test_labels)), 1)\n results['p_top20'] = float(np.sum(test_labels[:thres] == min_label)/thres)\n results['brier'] = float(np.mean((probabilities - test_labels)**2))\n results['log_loss'] = float(log_loss(test_labels, probabilities))\n results['auc'] = float(roc_auc_score(test_labels, probabilities))\n\n return results", "title": "" }, { "docid": "9e3f2071fc970a3bb6ada0f5c481befa", "score": "0.53816223", "text": "def get_classwise_dice(predict, soft_y):\n y_vol = torch.sum(soft_y, dim = 0)\n p_vol = torch.sum(predict, dim = 0)\n intersect = torch.sum(soft_y * predict, dim = 0)\n dice_score = (2.0 * intersect + 1e-5)/ (y_vol + p_vol + 1e-5)\n return dice_score", "title": "" }, { "docid": "3c60ab4b1b906da32cef154414a8545f", "score": "0.537611", "text": "def classify(self, traindata, trainlabels, testdata, testlabels):\n # Put your code below\n # running the above 3 classifiers turns out that the SVC is the best classifier\n classifier = SVC(C=8, gamma=0.125)\n classifier.fit(traindata, trainlabels)\n testError = 1 - classifier.score(testdata, testlabels)\n # Do not change this sequence!\n return (classifier, testError)", "title": "" }, { "docid": "497b269c8825ee3be2bf4529c9b9eefd", "score": "0.53722775", "text": "def SVMPrediciton(data_trn,label_trn,data_tst,label_tst):\n if all([label_trn[i] == label_trn[0] for i in range(len(label_trn))]):\n label_pred = label_trn[0] * np.ones(data_tst.shape[0])\n pred_accuracy = sum([1 for i in range(len(label_pred)) if label_pred[i] == label_tst[i]]) / len(label_pred)\n return pred_accuracy\n clf = svm.SVC()\n clf.fit(data_trn, label_trn)\n label_pred = clf.predict(data_tst)\n pred_accuracy = sum([1 for i in range(len(label_pred)) if label_pred[i] == label_tst[i]])/len(label_pred)\n return pred_accuracy", "title": "" }, { "docid": "140ffaef8dbd9bf60453941ba7e427f3", "score": "0.5365233", "text": "def train_classifier(classifier, vectorizer, base, language):\n print(\"============ {} =============\".format(base))\n with open(os.path.join(base, \"seeds.json\"), 'r') as json_posts:\n posts = json.load(json_posts)\n data = [(item, 1.0) for item in posts['political']]\n data.extend([(item, 0.0) for item in posts['not_political']])\n print(\"num seeds: {}\".format(len(data)))\n data.extend(load_ads_from_psql(language))\n print(\"num unique samples: {}\".format(len(data)))\n train, test = train_test_split(data, test_size=0.1)\n x_train, y_train = zip(*train)\n x_test, y_test = zip(*test)\n x_train = vectorizer.transform(x_train)\n x_test = vectorizer.transform(x_test)\n x_train, y_train = equalize_classes(x_train, y_train)\n print(\"final size of training data: %s\" % x_train.shape[0])\n classifier.fit(x_train, y_train)\n print(classification_report(y_test, classifier.predict(x_test)))\n return classifier", "title": "" }, { "docid": "ad65553346206b25cf45602f6ae0c392", "score": "0.53630364", "text": "def predict_proba(self, X):\n check_is_fitted(self, [\"classes_\", \"calibrators_\"])\n\n # Y[i, j] gives the probability that sample i has the label j.\n Y = np.array([c.predict_proba(\n np.column_stack([np.sum(np.delete(X, obj=i, axis=1), axis=1), X[:, self.classes_[i]]]))[:, 1] for i, c in\n enumerate(self.calibrators_)]).T\n\n if len(self.calibrators_) == 1:\n # Only one estimator, but we still want to return probabilities for two classes.\n Y = np.concatenate(((1 - Y), Y), axis=1)\n\n # Pad with zeros for classes not in training data\n if np.shape(Y)[1] != np.shape(X)[1]:\n p_pred = np.zeros(np.shape(X))\n p_pred[:, self.classes_] = Y\n Y = p_pred\n\n # Normalize probabilities to 1.\n Y = sklearn.preprocessing.normalize(Y, norm='l1', axis=1, copy=True, return_norm=False)\n return np.clip(Y, a_min=0, a_max=1)", "title": "" }, { "docid": "7a6c1a57f3169cd3c3cf6dedb12d7ea4", "score": "0.53597957", "text": "def fit(self, X_train, y_train):\n # dictionary where the key is the class label and value is the probability\n self.X_train = X_train\n self.y_train = y_train\n self.priors = {}\n # dictionary where key is the class label and value is the number of occurences\n class_dict = myutils.create_dictionary(y_train)\n total_instances = len(y_train)\n\n # copy class_dict into priors\n self.priors = copy.deepcopy(class_dict)\n # divide value by total instances to get percent \n for item in self.priors:\n self.priors[item] = self.priors[item]/total_instances\n #print(self.priors)\n # value of nested dictionary is probability of class label, parallel to labels\n labels = self.priors.keys()\n self.posteriors = {}\n\n # create a dictionary of attributes and how often they appear \n # attributes = {att0 : {1 : 5, 2 : 10}, att1 : {3 : 5, 2 : 6, 1: 4}}\n attribute_totals = {}\n for i in range(len(X_train[0])):\n name = \"att{}\".format(i)\n attribute_totals[name] = {}\n for j in range(len(X_train)):\n if X_train[j][i] in attribute_totals[name]:\n attribute_totals[name][X_train[j][i]] += 1\n else:\n attribute_totals[name][X_train[j][i]] = 1\n\n # create a dictionary with posterior probabilities\n # self.posteriors = {att0 : {1 : {\"yes\" : 0, \"no\" : 0}, 2 : {\"yes\" : 0, \"no\" : 0}}\n self.posteriors = copy.deepcopy(attribute_totals)\n for attribute in self.posteriors:\n for item in self.posteriors[attribute]:\n self.posteriors[attribute][item] = {}\n for label in labels:\n self.posteriors[attribute][item][label] = 0\n \n \n for i in range(len(X_train[0])): # iterate through attributes\n for item in self.posteriors[\"att{}\".format(i)]: # iterate through dictionary at attribute\n for row in range(len(X_train)): # iterate down rows\n if X_train[row][i] == item:\n self.posteriors[\"att{}\".format(i)][item][y_train[row]] += 1\n for label in labels:\n self.posteriors[\"att{}\".format(i)][item][label] = self.posteriors[\"att{}\".format(i)][item][label] / (self.priors[label] * total_instances)\n\n #print(self.posteriors)", "title": "" }, { "docid": "10b0a2a48190bab8a3687ae37a4f91ea", "score": "0.53541857", "text": "def predict(self, cls, point, examples):\n value = point[self.name]\n if cls not in self.stats:\n self.stats[cls] = dict()\n if value not in self.stats[cls]:\n examples = examples[self.name]\n self.stats[cls][value] = (examples.mean(), examples.std())\n mean, std = self.stats[cls][value]\n return self.normal_prob(mean, std, value)", "title": "" }, { "docid": "c78816f37c5f5234a2bed3d095c6ba3a", "score": "0.5350316", "text": "def decision_function(self, dataset, parallel=True):\n # handle NaNs\n if np.any(np.isnan(dataset)):\n dataset = np.nan_to_num(dataset, copy=False)\n\n # set up variables\n if self.batch_size > 0 and parallel:\n return self.decision_function_batch(dataset, self.batch_size)\n else:\n pairs_proba = np.empty((len(dataset), len(self.root_nodes)), float) # indexes of data points\n\n # get all clusters for all points in all trees\n for d_idx, d in enumerate(dataset):\n # traverse all trees\n for t_idx, tree in enumerate(self.root_nodes):\n d_mean, d_pct, d_pdf_mean, d_cov_det, d_cov_inv = descend_density_tree(d, tree)\n if d_pct > self.thresh_traverse:\n if self.method == 'normal':\n pairs_proba[d_idx, t_idx] = d_pct * my_normal(d, d_mean, d_cov_det, d_cov_inv)\n if self.standardize:\n pairs_proba[d_idx, t_idx] /= d_pdf_mean # standardize by max. probability\n else:\n pairs_proba[d_idx, t_idx] = euclidean(d_mean, d)\n if self.standardize:\n pairs_proba[d_idx, t_idx] /= d_pdf_mean # standardize by max. probability\n else:\n pairs_proba[d_idx, t_idx] = np.nan\n self.scores = np.log(np.nanmean(pairs_proba, axis=-1))\n return self.scores", "title": "" }, { "docid": "638bbbef4f3367c6f80c60b165e119db", "score": "0.53485787", "text": "def rfc_cl_pred(self, x, feat_imp_plot_filename = None):\n \n pred_proba = self.rfc_classif.predict_proba(x)\n probabilities = np.zeros((len(x), self.class_count), float)\n\n rownum = 0\n for row in pred_proba: \n c = 0\n for i in row:\n col = int(self.rfc_classif.classes_[c]-1)\n probabilities[rownum, col] = i\n c += 1\n rownum += 1\n \n # print probabilities\n # print \"Found following classes:\"\n # print self.rfc_classif.classes_\n \n pred_class = self.rfc_classif.predict(x)\n \n return pred_class, probabilities", "title": "" }, { "docid": "4585456eeed51ac5b766eb71aaad02e0", "score": "0.5348072", "text": "def __calculate_score(y_pred_class, y_pred_prob):\n if y_pred_class == 0:\n MAX = 0.5\n scaled_percentage = (y_pred_prob * MAX) / 100\n return MAX - scaled_percentage\n else:\n MAX = 1\n scaled_percentage = (y_pred_prob * MAX) / 100\n return scaled_percentage", "title": "" }, { "docid": "1582336a87bffa99b2e0c2a4f6b4d1bd", "score": "0.5342883", "text": "def _compute_diversity(self, targets, prediction_matrix):\n diversity = np.zeros(self.n_classifiers)\n\n for clf_index in range(self.n_classifiers):\n for clf_index2 in range(clf_index + 1, self.n_classifiers):\n this_diversity = self.diversity_func(targets,\n prediction_matrix[:, clf_index],\n prediction_matrix[:, clf_index2])\n\n diversity[clf_index] += this_diversity\n diversity[clf_index2] += this_diversity\n return diversity", "title": "" }, { "docid": "4e9dab93431fef193aec5cb2871f82ce", "score": "0.534066", "text": "def feature_scalars(d, features_list, test_size, random_state=42):\n data = featureFormat(d, features_list, sort_keys=True)\n # Split between labels (poi) and the rest of features\n target, features = targetFeatureSplit(data)\n\n # Create both training and test sets through split_data()\n features_train, features_test, labels_train, labels_test = train_test_split(\n features,\n target,\n test_size=test_size,\n random_state=random_state)\n \n classifier = [\"ADA\", \"SVC\"]\n for c in classifier:\n if c == \"ADA\":\n clf = AdaBoostClassifier()\n elif c == \"SVM\":\n clf = SVC(kernel='linear', max_iter=1000)\n\n result = []\n clf.fit(features_train, labels_train)\n pred = clf.predict(features_test)\n importances = clf.feature_importances_\n\n for i in range(len(importances)):\n t = [features_list[i], importances[i]]\n result.append(t)\n\n result = sorted(result, key=lambda x: x[1], reverse=True)\n\n print (result)\n\n\n return None", "title": "" }, { "docid": "8478eb9e8dcb911cc9ba1f1e50446d8e", "score": "0.53401184", "text": "def implement_classifiers(self, X_train, X_test, y_train, y_test):\n clf_LR = LogisticRegression(random_state=1, max_iter=2000)\n clf_LR.fit(X_train, y_train)\n clf_RF = RandomForestClassifier(n_estimators=40, max_depth=7, random_state=2)\n clf_RF.fit(X_train, y_train)\n prob_train_LR = clf_LR.predict_proba(X_train)\n prob_test_LR = clf_LR.predict_proba(X_test)\n prob_train_RF = clf_RF.predict_proba(X_train)\n prob_test_RF = clf_RF.predict_proba(X_test)\n probs_LR = [prob_train_LR, prob_test_LR]\n probs_RF = [prob_train_RF, prob_test_RF]\n return probs_LR, probs_RF", "title": "" } ]
42f2bfcea28db02ee805f37e98d23681
String que representa al objeto Libro
[ { "docid": "b7ae2c10f2a195a56f527606368b4323", "score": "0.0", "text": "def __str__(self):\n\t\treturn self.titulo", "title": "" } ]
[ { "docid": "ee0aec15a332c3d07dd070f692de3b44", "score": "0.7063576", "text": "def __str__(self):\n\t\treturn '%s (%s)' % (self.id, self.libro.titulo)\n\t\t# return '{0} ({1})'.format(self.id,self.libro.titulo) \n\t\t# return f'{self.id} ({self.libro.titulo})'", "title": "" }, { "docid": "804ceac8112125375ffb2c8117f49d72", "score": "0.68884933", "text": "def get_str(self):", "title": "" }, { "docid": "0e4809fc4236c658bcefae9c00068020", "score": "0.67911744", "text": "def get_string_object(self):\n data = self.get_object()\n\n try:\n return data.decode(\"utf-8\")\n except Exception as e:\n raise TypeError(\n \"The object behind this OSPar cannot be converted to a \"\n \"string. Error is: %s\" % str(e))", "title": "" }, { "docid": "6cf2e1516e0d96f58dbeacefba6f285c", "score": "0.67404175", "text": "def toString(self) -> unicode:\n ...", "title": "" }, { "docid": "509bb99c682cc4b30a85474a84fecaf5", "score": "0.66948855", "text": "def __str__(my):\n\t\treturn my.getlua()", "title": "" }, { "docid": "bdd18e0146f546e4b06c4151cbd10def", "score": "0.6681714", "text": "def __str__(self):\n return self.__javaObject.toString()", "title": "" }, { "docid": "1d616911ca8296252d3960b05ca1bc26", "score": "0.66631716", "text": "def __str__(self):\n return _pcbnew.string___str__(self)", "title": "" }, { "docid": "2ae6bf2b97e098d50654220f14b6a48a", "score": "0.6587463", "text": "def toString(self):", "title": "" }, { "docid": "454f6509d5fdd139bd24f809219fe7fb", "score": "0.6566757", "text": "def __str__(self):\n return self.get_string()", "title": "" }, { "docid": "454f6509d5fdd139bd24f809219fe7fb", "score": "0.6566757", "text": "def __str__(self):\n return self.get_string()", "title": "" }, { "docid": "7d7046dd26485533451bfedb8aa2cfdb", "score": "0.6531341", "text": "def createString(self):\n return self.name + \" \" + self.type", "title": "" }, { "docid": "eaad3e4dbc6970f87eb083457ca3ddfa", "score": "0.65206707", "text": "def __str__(self):\n return self.as_raw_string()", "title": "" }, { "docid": "d2c68874bcf0375474690196e7019295", "score": "0.65144557", "text": "def __str__(self):\n return self.string", "title": "" }, { "docid": "d2c68874bcf0375474690196e7019295", "score": "0.65144557", "text": "def __str__(self):\n return self.string", "title": "" }, { "docid": "f7a9bfb8e7e2b97929dc03a821e81f0f", "score": "0.6499038", "text": "def __str__(self):\n string = str(self.get_str())\n return string", "title": "" }, { "docid": "34aaf8fe0eead0e6f9f4ee3303ac9b9a", "score": "0.64654696", "text": "def __str__(self):\r\n return self.asbytes()", "title": "" }, { "docid": "c0dbf4e146a5bd492134a7e451ba093d", "score": "0.64364135", "text": "def __str__(self) -> str:", "title": "" }, { "docid": "6fd591d12fae87f62dd7dd628a556d17", "score": "0.6433937", "text": "def to_str(self) -> str:\n ...", "title": "" }, { "docid": "6fd591d12fae87f62dd7dd628a556d17", "score": "0.6433937", "text": "def to_str(self) -> str:\n ...", "title": "" }, { "docid": "6fd591d12fae87f62dd7dd628a556d17", "score": "0.6433937", "text": "def to_str(self) -> str:\n ...", "title": "" }, { "docid": "6fd591d12fae87f62dd7dd628a556d17", "score": "0.6433937", "text": "def to_str(self) -> str:\n ...", "title": "" }, { "docid": "6fd591d12fae87f62dd7dd628a556d17", "score": "0.6433937", "text": "def to_str(self) -> str:\n ...", "title": "" }, { "docid": "6fd591d12fae87f62dd7dd628a556d17", "score": "0.6433937", "text": "def to_str(self) -> str:\n ...", "title": "" }, { "docid": "6fd591d12fae87f62dd7dd628a556d17", "score": "0.6433937", "text": "def to_str(self) -> str:\n ...", "title": "" }, { "docid": "7a703d001e7a087bb79c489f7897c480", "score": "0.6430306", "text": "def __str__(self) -> str:\n return stringify_object(self)", "title": "" }, { "docid": "c729df1ff0501b2312f326b0a5b4b55a", "score": "0.6362368", "text": "def get_string_object(self, key):\n data = self.get_object(key)\n\n try:\n return data.decode(\"utf-8\")\n except Exception as e:\n raise TypeError(\n \"The object behind this OSPar cannot be converted to a \"\n \"string. Error is: %s\" % str(e))", "title": "" }, { "docid": "f9dd22b654d20903c9261fce4b021bc2", "score": "0.6360529", "text": "def __str__(self) -> str:\n return str(self.ref())", "title": "" }, { "docid": "9a92b6437f5ad4fc507e748466fbde8d", "score": "0.6353938", "text": "def toString(self):\n pass", "title": "" }, { "docid": "7673fd39b363aea8d531d482302127db", "score": "0.6349691", "text": "def asString(*args, **kwargs):\n \n pass", "title": "" }, { "docid": "7673fd39b363aea8d531d482302127db", "score": "0.6349691", "text": "def asString(*args, **kwargs):\n \n pass", "title": "" }, { "docid": "3d3f0c58bea00b6435ffa2e266a1605f", "score": "0.6331875", "text": "def toString(self):\r\n\r\n raise NotImplementedError", "title": "" }, { "docid": "e381e7aa67bd9df1be8294d5a7d4ea82", "score": "0.6320581", "text": "def type_str(self):\n return self.__str__(self)", "title": "" }, { "docid": "5542d7842fbddee26d7c72b90b0db47b", "score": "0.6312442", "text": "def __str__(self) -> str:\n raise NotImplementedError", "title": "" }, { "docid": "424cc6f7fb9e40934d3068a03f507d97", "score": "0.62814695", "text": "def __repr__(self):\n return '<%s cromossomo=\"%s\" aptidao=%s>' % \\\n (self.__class__.__name__,\n ''.join(map(str,self.cromossomo)),self.aptidao)", "title": "" }, { "docid": "3312674bf095a28cffb1a7db6295aee3", "score": "0.62651455", "text": "def __str__(self):\n return \"<\" + self.__class__.__name__ + \">\"", "title": "" }, { "docid": "5c65cef57f6617d22670a31b3ee2869c", "score": "0.6252754", "text": "def __str__(self):\n return self.toString()", "title": "" }, { "docid": "6e29806902714fb68c69d996988cfef9", "score": "0.6235899", "text": "def __str__(self):\n raise NotImplementedError", "title": "" }, { "docid": "6e29806902714fb68c69d996988cfef9", "score": "0.6235899", "text": "def __str__(self):\n raise NotImplementedError", "title": "" }, { "docid": "6e29806902714fb68c69d996988cfef9", "score": "0.6235899", "text": "def __str__(self):\n raise NotImplementedError", "title": "" }, { "docid": "9f98c069ccdce4ee85b2e42bfb5b004b", "score": "0.6229918", "text": "def as_str(self):\n raise NotImplementedError", "title": "" }, { "docid": "a1c2a9a032659d9f122fa592781488f9", "score": "0.6225189", "text": "def __str__(self):\r\n return f'{self.elemento}'", "title": "" }, { "docid": "b59b68c6e572be721dce844cbd87fc29", "score": "0.6192753", "text": "def __str__(self):\n return str(self)", "title": "" }, { "docid": "f5a71e98c6bd7ff33432689f1f8186f5", "score": "0.6192573", "text": "def __str__(self):\n string = \"\\tid: \" + str(self.getID()) + \"\\n\" + \\\n \"\\ttype: \" + str(self.getType()) + \"\\n\" + \\\n \"\\tpath: \" + str(self.getPath()) + \"\\n\" + \\\n \"\\tarchitecture: \" + str(self.getArchitecture()) + \"\\n\" + \\\n \"\\tosType: \" + str(self.getOsType()) + \"\\n\" + \\\n \"\\tversion: \" + str(self.getVersion()) + \"\\n\" + \\\n \"\\tmustClone: \" + str(self.getMustClone()) + \"\\n\" + \\\n \"\\tmd5Signature: \" + str(self.getMd5Signature()) + \"\\n\" + \\\n \"\\tdescription: \" + str(self.getDescription()) + \"\\n\" + \\\n \"\\towner: \" + str(self.getOwner()) + \"\\n\" + \\\n \"\\tstatus: \" + str(self.getStatus()) + \"\\n\" \n\n # process attributes \n string += \"\\tattributes: \"\n attributes = self.getAttributes()\n if (attributes == None):\n string += \"None\\n\"\n else:\n string += \"\\n\"\n for key in attributes.keys():\n string += \"\\t\\tname: \" + key + \" value: \" + attributes[key] + \"\\n\"\n\n return string", "title": "" }, { "docid": "1de255d34042869684bc03c1c8b99e18", "score": "0.61917824", "text": "def to_string(self):\n raise NotImplementedError()", "title": "" }, { "docid": "81c99298d476b85692a91855c5a7e51f", "score": "0.6190968", "text": "def __str__(self):\n return getattr(self, self.STR_FIELD)", "title": "" }, { "docid": "5f4f6cb41976132a9a08fa5c332893b8", "score": "0.61786526", "text": "def to_string(self):\n pass", "title": "" }, { "docid": "43b4ec4044302e607e035e697ee5f7b0", "score": "0.61761224", "text": "def __str__(self):\n return self._real", "title": "" }, { "docid": "4484023eaba3222917952ff179d72531", "score": "0.616267", "text": "def __repr__(self):\n return self.get_string()", "title": "" }, { "docid": "4484023eaba3222917952ff179d72531", "score": "0.616267", "text": "def __repr__(self):\n return self.get_string()", "title": "" }, { "docid": "c417aa764007b1bbbb4ca81e890b096d", "score": "0.61615163", "text": "def __str__(self):\n\t\treturn self.idioma", "title": "" }, { "docid": "2a9ed1d9f00f4f0ae5262ebc402e3193", "score": "0.6160581", "text": "def __repr__(self) -> str:\n str_text = f\"Library Name: {self.library_name}\\n\"\n str_text += f\"Library Phone: {self.library_phone}\"\n str_text += f\"\\nMember ID: {self.member_id}\"\n\n return str_text", "title": "" }, { "docid": "ef3c347c64b4adce2ed4756d5a9fed4e", "score": "0.6148283", "text": "def _basic_str(obj):\n return obj.__class__.__name__ + ': ' + obj.__repr__()", "title": "" }, { "docid": "b9005c9aa5a23b6f5a218792dcaff4ce", "score": "0.61476195", "text": "def __str__(self):\n return self.to_string()", "title": "" }, { "docid": "4d8decbb44c9b63ed991627219409061", "score": "0.61458707", "text": "def __str__(self):\r\n return self._str()", "title": "" }, { "docid": "3c297d6ea2fd189852bb61514ed435d5", "score": "0.6141665", "text": "def __str__(self):\n return \"{}\".format(self.nom_)", "title": "" }, { "docid": "76d75f324b1ba2eac9cbed17e9e020ba", "score": "0.613978", "text": "def __str__(self):\n return self.__class__.__name__", "title": "" }, { "docid": "76d75f324b1ba2eac9cbed17e9e020ba", "score": "0.613978", "text": "def __str__(self):\n return self.__class__.__name__", "title": "" }, { "docid": "76d75f324b1ba2eac9cbed17e9e020ba", "score": "0.613978", "text": "def __str__(self):\n return self.__class__.__name__", "title": "" }, { "docid": "1667a6e68ed05df376de8f21ddb836a8", "score": "0.613933", "text": "def __str__(self):\n raise NotImplementedError()", "title": "" }, { "docid": "1667a6e68ed05df376de8f21ddb836a8", "score": "0.613933", "text": "def __str__(self):\n raise NotImplementedError()", "title": "" }, { "docid": "1667a6e68ed05df376de8f21ddb836a8", "score": "0.613933", "text": "def __str__(self):\n raise NotImplementedError()", "title": "" }, { "docid": "1667a6e68ed05df376de8f21ddb836a8", "score": "0.613933", "text": "def __str__(self):\n raise NotImplementedError()", "title": "" }, { "docid": "7ab51f80e98e84eefbbe86b99fcc8266", "score": "0.6129854", "text": "def __str__(self):\n info = \"Television Object\\n\"\n info += \"Current channel: \" + str(self.__channel) + \"\\n\"\n info += \"Current volume: \" + str(self.__volume) + \"\\n\"\n return info", "title": "" }, { "docid": "28ec6260b29e5c30a87d77af406c683f", "score": "0.61234945", "text": "def __str__(self):\n return self.format() % { \"filename\": self.__filename,\n \"linenr\": self.__linenr,\n \"type\": self.__type,\n \"message\": self.__message }", "title": "" }, { "docid": "5ad3bef4bd48917c498acd10a5b29772", "score": "0.60934037", "text": "def __str__(self):\n tostring = \"Datos del piloto: \\nIdentificador del piloto: \" + str(self.idPiloto)\n\n if self.nombre is not None:\n tostring += \"\\nNombre completo: \" + self.nombre\n if self.apellidos is not None:\n tostring += \" \" + self.apellidos\n if self.equipo is not None:\n tostring += \"\\nEquipo: \" + self.equipo\n if self.equipoAnterior is not None:\n tostring += \"\\nEquipo Anterior: \" + self.equipoAnterior\n if self.nacionalidad is not None:\n tostring += \"\\nNacionalidad: \" + self.nacionalidad\n if self.fechanac is not None:\n tostring += \"\\nFecha de Nacimiento: \" + self.fechanac\n\n return tostring + \"\\n\"", "title": "" }, { "docid": "dd6cbe5fb2257f75e6a5f4bbb09b0cdf", "score": "0.6088494", "text": "def to_string_custom(self): # Abstract method to be overridden in sub-classes\n raise NotImplementedError", "title": "" }, { "docid": "ecff840906ad540b2a4e3d7b565a818f", "score": "0.60827667", "text": "def __str__(self):\n return str(self.data)", "title": "" }, { "docid": "2b2548a395c87af3ed8f19f5d2d57abb", "score": "0.6080913", "text": "def __str__ (self):\r\n return str(self.dato)", "title": "" }, { "docid": "fecca1bb4f265974e9649edb3b9b449c", "score": "0.6076322", "text": "def __str__(self):\n return '%s(%s)' % (type(self).__name__, self.stream)", "title": "" }, { "docid": "2cdd1406bda7ff593871a6b1c759d5fc", "score": "0.6074355", "text": "def toString(self):\n raise NotImplementedError(\"method 'toString' not implemented\")", "title": "" }, { "docid": "6e7136d8b7fbb7a9f6e0111c9ac40c12", "score": "0.60707307", "text": "def __str__(self):\r\n #TODO, NOTEST\r", "title": "" }, { "docid": "6f43a8ea70ff8393823099168f926951", "score": "0.6062711", "text": "def __str__(self):\n return my_str(self)", "title": "" }, { "docid": "0a0914a9519b86e46f2acf64a6175247", "score": "0.60597503", "text": "def __str__(self):\n return self.string(False)", "title": "" }, { "docid": "3aeed2482ac1304ea042fbe9280268e6", "score": "0.60583043", "text": "def __str__(self):\n return type(self).__name__", "title": "" }, { "docid": "36ba47e028ac7784f4439f520d56283c", "score": "0.60561854", "text": "def __str__(self):\n return str(self.asarray())", "title": "" }, { "docid": "b6ae6de9a7cafc9dc64dfdaae287e583", "score": "0.6038015", "text": "def __str__(self):\r\n return self.__repr__()", "title": "" }, { "docid": "3c2f3cce4b1fade751558836a9e6dc1b", "score": "0.60341203", "text": "def __str__(self):\n return self._entry() + \\\n self._name() + \\\n self._classname() + \\\n self._sysname() + \\\n self._reaction() + \\\n self._substrate() + \\\n self._product() + \\\n self._inhibitor() + \\\n self._cofactor() + \\\n self._effector() + \\\n self._comment() + \\\n self._pathway() + \\\n self._genes() + \\\n self._disease() + \\\n self._structures() + \\\n self._dblinks() + \\\n \"///\"", "title": "" }, { "docid": "7ee25085c25a3d2b2705f90daff534ca", "score": "0.60324925", "text": "def __str__(self) -> str:\n\t\treturn self.name", "title": "" }, { "docid": "bb635072cb5b7404f5a44af705de6acb", "score": "0.60288095", "text": "def __str__(self):\n pass", "title": "" }, { "docid": "bb635072cb5b7404f5a44af705de6acb", "score": "0.60288095", "text": "def __str__(self):\n pass", "title": "" }, { "docid": "bb635072cb5b7404f5a44af705de6acb", "score": "0.60288095", "text": "def __str__(self):\n pass", "title": "" }, { "docid": "bb635072cb5b7404f5a44af705de6acb", "score": "0.60288095", "text": "def __str__(self):\n pass", "title": "" }, { "docid": "ab513006bef349665d10f5550b9e2ac9", "score": "0.60280454", "text": "def __str__(self):\n raise NotImplementedError(self.__str__)", "title": "" }, { "docid": "c1cd4f716a4bf5e723c274bbaf6ab603", "score": "0.602036", "text": "def to_string(self):\n return self.__str__()", "title": "" }, { "docid": "874c5576c99ef409975d1f18a7c5b78e", "score": "0.6020168", "text": "def __str__(self):\n return str(self.__dict__)", "title": "" }, { "docid": "874c5576c99ef409975d1f18a7c5b78e", "score": "0.6020168", "text": "def __str__(self):\n return str(self.__dict__)", "title": "" }, { "docid": "874c5576c99ef409975d1f18a7c5b78e", "score": "0.6020168", "text": "def __str__(self):\n return str(self.__dict__)", "title": "" }, { "docid": "874c5576c99ef409975d1f18a7c5b78e", "score": "0.6020168", "text": "def __str__(self):\n return str(self.__dict__)", "title": "" }, { "docid": "874c5576c99ef409975d1f18a7c5b78e", "score": "0.6020168", "text": "def __str__(self):\n return str(self.__dict__)", "title": "" }, { "docid": "329fc951a120cb5a58702928f6c1ff68", "score": "0.6019881", "text": "def to_string_custom(self):\n raise NotImplementedError", "title": "" }, { "docid": "0c2ec75aa83777faa4c989beda029875", "score": "0.60185874", "text": "def getString(self):\n return str(Data.execute('text($)',self))", "title": "" }, { "docid": "9ec7b3e814c5c36919c03da99a7c9d3f", "score": "0.60181403", "text": "def __str__(self):\n\t\treturn f'{self.name}'", "title": "" }, { "docid": "d0258fbbd660640e885ebcad645d2058", "score": "0.6017249", "text": "def __str__(self):\n string = \"\\id: \" + str(self.getID()) + \"\\n\" + \\\n \"\\type: \" + str(self.getType()) + \"\\n\" + \\\n \"\\tvendor: \" + str(self.getNetbootable()) + \"\\n\" + \\\n \"\\tmodel: \" + str(self.getInfrastructure()) + \"\\n\" + \\\n \"\\tdescription: \" + str(self.getVendor()) + \"\\n\" + \\\n \"\\tbuilding: \" + str(self.getModel()) + \"\\n\" + \\\n \"\\tfloor: \" + str(self.getModel()) + \"\\n\" + \\\n \"\\troom: \" + str(self.getPhysicalLocation()) + \"\\n\" + \\\n \"\\tstatus: \" + str(self.getStatus()) + \"\\n\" \n \n # process attributes \n string += \"\\tattributes: \"\n attributes = self.getAttributes()\n if (attributes == None):\n string += \"None\\n\"\n else:\n string += \"\\n\"\n for key in attributes.keys():\n string += \"\\t\\tname: \" + key + \" value: \" + attributes[key] + \"\\n\"\n\n return string", "title": "" }, { "docid": "91b6a25e0ef1ba8830a8f13a058d9f25", "score": "0.6017077", "text": "def __str__(self):\n return unicode(self.value())", "title": "" }, { "docid": "9f78d4535829b2141e4dbe899a78c6b9", "score": "0.6015789", "text": "def get_string( obj ):\n if isinstance(obj, type): return obj.__name__\n return str( obj )", "title": "" }, { "docid": "4d0c2ac175b2603715cbcc7827c9ff6b", "score": "0.6012824", "text": "def to_str(self) -> str:\n return self.__str__()", "title": "" }, { "docid": "f4ae0335cb7f5c8e2daf5a7a8811b1c8", "score": "0.60065883", "text": "def __repr__(self):\n\n # Intro.\n text = \"Class containing all the residue specific data.\\n\"\n\n # Objects.\n text = text + \"\\n\"\n text = text + \"Objects:\\n\"\n for name in dir(self):\n # Spin systems.\n if name == 'spin':\n text = text + \" spin: The list of spin systems of the residues\\n\"\n continue\n\n # Skip the ResidueContainer methods.\n if name == 'is_empty':\n continue\n\n # Skip special objects.\n if match(\"^_\", name):\n continue\n\n # Add the object's attribute to the text string.\n text = text + \" \" + name + \": \" + repr(getattr(self, name)) + \"\\n\"\n\n return text", "title": "" }, { "docid": "e2bc2b7b1f131773396f6896e98a1068", "score": "0.60027885", "text": "def __str__(self):\n return \"Object text: \" + self.quoteText + '\\n' +\\\n \"Links: \" + str(self.link) + '\\n' +\\\n \"Names: \" + str(self.name) + '\\n' +\\\n \"Description \" + str(self.description) + '\\n' +\\\n \"labels: \" + str(self.labels) \n # \"tag_count: \" + str(self.tag_count) ", "title": "" }, { "docid": "20b4ff38f50899321b6492d707f169c1", "score": "0.59947354", "text": "def __str__(self):\n return f'Cachorro: {self.nome}'", "title": "" }, { "docid": "7a5e588174633cfad7ce3fae5e72efc6", "score": "0.5987516", "text": "def str_class(self):\r\n return self._str(self.__class__.__dict__.keys())", "title": "" }, { "docid": "819a974ccfbfa6cba44b462e1ef34619", "score": "0.59863085", "text": "def __str__(self):\n return codecs.decode(self.__bytes__(), encoding=\"ascii\", errors=\"ignore\")", "title": "" } ]
7a6c02e370059d1e75397d934e382c1b
Compatability wrapper for advancing an iterator.
[ { "docid": "b1c96ea935867c12aba7d0143c07edb3", "score": "0.6273643", "text": "def next(iter):\r\n return iter.next()", "title": "" } ]
[ { "docid": "4054233d5049ece5e6898be151e34502", "score": "0.73166263", "text": "def __add__(self, other: Iterator[T]):\n try:\n return self.chain(other)\n except Exception:\n pass\n return NotImplemented", "title": "" }, { "docid": "2edfa18bd364406c37307f0465c0a31d", "score": "0.6890491", "text": "def __radd__(self, other: Iterator[T]):\n if isinstance(other, FIt):\n return other.chain(self)\n else:\n try:\n return FIt(other).chain(self)\n except TypeError:\n pass\n return NotImplemented", "title": "" }, { "docid": "fc3a830235749ee56a4de290928d01e9", "score": "0.6333624", "text": "def next(iterator, default=None): # real signature unknown; restored from __doc__\n pass", "title": "" }, { "docid": "624aac4d84ef868fda20f8e7354f1f3c", "score": "0.63039947", "text": "def __iter__(self):\n iterator = (self[i] for i in range(len(self)))\n return iterator", "title": "" }, { "docid": "3ccd7eef02f17dfd1e0cb8e679036f02", "score": "0.62874675", "text": "def iterator(*args, **kwargs):\n \n pass", "title": "" }, { "docid": "3ccd7eef02f17dfd1e0cb8e679036f02", "score": "0.62874675", "text": "def iterator(*args, **kwargs):\n \n pass", "title": "" }, { "docid": "435d5c73878fba3a8e8146a8fa0ee5c7", "score": "0.62762266", "text": "def __iter__(self):\n cursor = self.first()\n while cursor is not None:\n yield cursor.element()\n cursor = self.after(cursor)", "title": "" }, { "docid": "3c9f1f2bf9d8a10651c1814502f35c83", "score": "0.62630713", "text": "def __next__(self):\n try:\n value = self.iterable[self.index]\n self.index += 1\n return value\n except IndexError:\n raise StopIteration", "title": "" }, { "docid": "e05c60147d03e3aeae08cc835886c43e", "score": "0.6243981", "text": "def __next__(self):\n if self._pointer >= self._end:\n raise StopIteration\n result = self._pointer\n self._pointer += self._step\n return result", "title": "" }, { "docid": "300ce0c3d9d08d38e59d16220f63f79a", "score": "0.6213829", "text": "def __iter__(self):\n iter = ProxyIterator()\n iter.Begin()\n return iter", "title": "" }, { "docid": "acb5c4d362424468e7893a7608323191", "score": "0.62063974", "text": "def __iter__(self):\n return self.__next__()", "title": "" }, { "docid": "8c39fcd76e35f446cc8df69ad71c5ac5", "score": "0.6198102", "text": "def __iter__(self):\n\t\tcursor = self.first()\n\t\twhile cursor is not None:\n\t\t\tyield cursor.element()\n\t\t\tcursor = self.after(cursor)", "title": "" }, { "docid": "2cc03af7095ffa5f31ae4a77047c78ef", "score": "0.618874", "text": "def coiterate(self, iterator, doneDeferred=None):\n if doneDeferred is None:\n doneDeferred = defer.Deferred()\n if self._stopped:\n doneDeferred.errback(SchedulerStopped())\n return doneDeferred\n self.iterators.append((iterator, doneDeferred))\n self._reschedule()\n return doneDeferred", "title": "" }, { "docid": "d47f4f02bd291db38df848f9d3142f74", "score": "0.61870456", "text": "def __call__(self, iterator, **params):\n raise NotImplementedError", "title": "" }, { "docid": "a1b21c402dce54f0fb49b9c204e029da", "score": "0.6148958", "text": "def accumulate(iterable, f):\n it = iter(iterable)\n start = 0 if f is add else 1\n for i in it:\n start = f(start, i)\n yield start", "title": "" }, { "docid": "6aea711bd32ec9348a00bdee8274132a", "score": "0.6114392", "text": "def next_explicit(self, playlist, iter):\n return self.next(playlist, iter)", "title": "" }, { "docid": "3476e2f14ae84fb6b3d6b9fb2c13e5ee", "score": "0.6100374", "text": "def __next__(self):\n if self.index >= len(self.data):\n raise StopIteration()\n rslt = self.data[self.index]\n self.index += 1\n return rslt", "title": "" }, { "docid": "8122e7692c36648f5f2a5d08b6bc3862", "score": "0.60587806", "text": "def append_it(iterator, last_val):\n for x in iterator:\n yield x\n yield last_val", "title": "" }, { "docid": "b89b87bee3ce1e1ca892c1c023b5111e", "score": "0.60305375", "text": "def __next__(self):\n if self._current is None:\n raise StopIteration()\n else:\n answer = self._current\n self._advance()\n return answer", "title": "" }, { "docid": "3875f203df9c1b572a4aabbea97ff158", "score": "0.6028944", "text": "def __iter__(self):\n return GenericIterator(self)", "title": "" }, { "docid": "3875f203df9c1b572a4aabbea97ff158", "score": "0.6028944", "text": "def __iter__(self):\n return GenericIterator(self)", "title": "" }, { "docid": "3875f203df9c1b572a4aabbea97ff158", "score": "0.6028944", "text": "def __iter__(self):\n return GenericIterator(self)", "title": "" }, { "docid": "590296a52e21b601ed012d6f326df1b4", "score": "0.6001304", "text": "def consume(iterator):\n # Inspired by this: https://docs.python.org/3/library/itertools.html#itertools-recipes\n collections.deque(iterator, maxlen=0)", "title": "" }, { "docid": "f41c96e11cbce632fdf757d615e797a1", "score": "0.5979671", "text": "def __iter__(self):\n i = 0\n while True:\n # Other code may have iterated between yields,\n # so always check the cache.\n if i < len(self):\n yield self[i]\n else:\n # Throws StopIteration when done.\n # Prevent StopIteration bubbling from generator, following https://www.python.org/dev/peps/pep-0479/\n try:\n yield next(self)\n except StopIteration:\n return\n i += 1", "title": "" }, { "docid": "d7612bd7fb1a8a7717c65a245b7e5a8e", "score": "0.59609455", "text": "def iterator_at(self, element=None): # real signature unknown; restored from __doc__\n pass", "title": "" }, { "docid": "27eed1fdd6e5b7c069eac3ffbc05d218", "score": "0.5959506", "text": "def __iter__(self):\n return iter(self.scan_iter())", "title": "" }, { "docid": "1effaf15a6ae74ee9c07cfb14aca238f", "score": "0.5950056", "text": "def test1IterateWithBaseIterator( self ):\n\n if FIXCLING: # failure b/c of temporaries\n return\n\n gROOT.LoadMacro( \"CoralAttributeList.C+\" )\n\n a = coral_pyroot_regression.AttributeList()\n\n a.extend( \"i\", \"int\" )\n self.assertEqual( a.size(), 1 )\n self.assertEqual( a.begin(), a.begin() )\n self.assertNotEqual( a.begin(), a.end() )\n\n b = a.begin()\n e = a.end()\n self.assertNotEqual( a, e )\n\n b.__preinc__()\n self.assertEqual( b, e )\n self.assertNotEqual( b, a.begin() )", "title": "" }, { "docid": "2da3f1242ce67ab82ebe15bd644b9d1e", "score": "0.594175", "text": "def __iter__(self):\n self._iter_index = 0\n return self", "title": "" }, { "docid": "f9ed820fa7741114fb5680f3d709b15e", "score": "0.5931594", "text": "def __iter__(self):\r\n \r\n # Py3K requires iter() here\r\n return iter(getattr(self._data(), '_sa_iterator')())", "title": "" }, { "docid": "57f3ca9b8f78afaabde548e953a6b67c", "score": "0.59275997", "text": "def next(self):\n return next(self.it)", "title": "" }, { "docid": "345f407c02402ff1853a08f6ab3b0366", "score": "0.59252393", "text": "def _iterator(self):\n return NotImplementedError", "title": "" }, { "docid": "ab0d3178ca5490e1d863c3a6aa2f8c7b", "score": "0.58793753", "text": "def _iter(self, sentinal):\n _current = self._latest\n\n def _next_entry():\n nonlocal _current\n result = _current\n if _current is not None:\n _current = _current.next\n return result\n\n return iter(_next_entry, sentinal)", "title": "" }, { "docid": "1a5f4c3df00c1dc4047d990e050703d3", "score": "0.58765614", "text": "async def accumulate(\n itr: AnyIterable[T], func: Accumulator[T] = operator.add\n) -> AsyncIterator[T]:\n itr = iter(itr)\n try:\n total: T = await next(itr)\n except AnyStop:\n return\n\n yield total\n async for item in itr:\n total = await maybe_await(func(total, item))\n yield total", "title": "" }, { "docid": "764dc94492e583cac974dcb187766d92", "score": "0.5851874", "text": "def __iter__(self):\n self.rewind()\n return self", "title": "" }, { "docid": "6e196794889cb5cf9d80f5436100afef", "score": "0.5848296", "text": "def iter(source, sentinel=None): # known special case of iter\n pass", "title": "" }, { "docid": "93a0d2f5375c2fcd29eae27af98847b7", "score": "0.5847012", "text": "def __iter__(self):\n raise NotImplementedError(\"No __iter__ method\")", "title": "" }, { "docid": "83492cc0ea5f0507753edcc2356a28bf", "score": "0.58456135", "text": "def __next__(self):\n return next(self.iterList)", "title": "" }, { "docid": "ce14b2c3a88cc97bfc55758d268416dd", "score": "0.5836095", "text": "def __iter__(self):\r\n return dIter(self)", "title": "" }, { "docid": "68cecfa16444fb30dc9a039cd079d6b8", "score": "0.5832011", "text": "def __iter__(self):\n self._current_index = 0\n return self", "title": "" }, { "docid": "9f71161b5ee8f8aa092f920e42af3cd9", "score": "0.58319837", "text": "def __add__(self, n):\n return _almathswig.SwigPyIterator___add__(self, n)", "title": "" }, { "docid": "a86be41305959da3a8f48e16315dd112", "score": "0.5816075", "text": "def next(self, playlist, iter):\n if iter is not None:\n return iter\n else:\n return super(BetterTrackRepeatOrder, self).next(playlist, iter)", "title": "" }, { "docid": "c95249b0a7eb94e2c52294c60d858fdf", "score": "0.5813842", "text": "def __iter__(self):\n return self.post_iter()", "title": "" }, { "docid": "374884b28265f30abf9e3c2239826254", "score": "0.58033097", "text": "def __next__(self):\n if self.index == self.tis.size:\n raise StopIteration\n else:\n ti = TwistedInt(self.index, self.tis.size)\n self.index += 1\n return ti", "title": "" }, { "docid": "ea35af43087792f73475fbf7bece0acb", "score": "0.5800778", "text": "def __init__(self, iterator):\n self.iter = iterator\n self.next_val = iterator.cul() if iterator.hasNext() else None", "title": "" }, { "docid": "a29a127961f9ea0dfcc65b0dee4fa997", "score": "0.57744163", "text": "def __iadd__(self, n):\n return _almathswig.SwigPyIterator___iadd__(self, n)", "title": "" }, { "docid": "d61d573b32719554065b768385b60e4f", "score": "0.5765361", "text": "def __iter__(self):\n\t\treturn iter(self.seq)", "title": "" }, { "docid": "8e520adb8289525601b5e60c5acdc7b8", "score": "0.5762696", "text": "def extend(self, iterable): # real signature unknown; restored from __doc__\n pass", "title": "" }, { "docid": "5da0848307ad09c699e8a4dec83b1fce", "score": "0.57614225", "text": "def __iter__(self):\n return self.preorder()", "title": "" }, { "docid": "d44625e811d2aa79a2e582b3de483163", "score": "0.57518893", "text": "def _add_from_iterable(self, iterable):\n for item in iterable:\n self.add(item)", "title": "" }, { "docid": "230b324b423286ed78c654002c60d123", "score": "0.5747115", "text": "def __next__(self):\n return next(self._iter_obj)", "title": "" }, { "docid": "184799d166b85a018cfeae74e6ef6d31", "score": "0.57339525", "text": "def __next__(self):\n current = self._current_index\n self._current_index += 1\n try:\n return self._records[current]\n except:\n del self._current_index\n raise StopIteration", "title": "" }, { "docid": "fc5866f42854119bebe5ac84b0989890", "score": "0.5733377", "text": "def __next__(self):\n return _almathswig.SwigPyIterator___next__(self)", "title": "" }, { "docid": "7b55933c5f6ea4bcbd7a3d9ea3f2c93a", "score": "0.5729581", "text": "def next_implicit(self, playlist, iter):\n return self.next(playlist, iter)", "title": "" }, { "docid": "1f8f413f204384c0be0e5f8648048dd1", "score": "0.57272506", "text": "def collection_iter(collection):\r\n try:\r\n return getattr(collection, '_sa_iterator',\r\n getattr(collection, '__iter__'))()\r\n except AttributeError:\r\n raise TypeError(\"'%s' object is not iterable\" %\r\n type(collection).__name__)", "title": "" }, { "docid": "46f2f911de7f690e96ba88b6ff07dbd8", "score": "0.57239234", "text": "def next(self): # real signature unknown; restored from __doc__\n pass", "title": "" }, { "docid": "46f2f911de7f690e96ba88b6ff07dbd8", "score": "0.57239234", "text": "def next(self): # real signature unknown; restored from __doc__\n pass", "title": "" }, { "docid": "46f2f911de7f690e96ba88b6ff07dbd8", "score": "0.57239234", "text": "def next(self): # real signature unknown; restored from __doc__\n pass", "title": "" }, { "docid": "1b9347dd1c6a4c19a4ea726987aa42cd", "score": "0.5716969", "text": "def next(self, offset=0):\n raise NotImplementedError() # pragma: no cover", "title": "" }, { "docid": "358ae71d1b90513963a522671a6e5754", "score": "0.57165", "text": "def exhaust(_iter):\n i = None\n for i in _iter:\n pass\n return i", "title": "" }, { "docid": "7248b835650dd95ffc5403205a7de260", "score": "0.5696087", "text": "def next(self):\n raise NotImplementedError", "title": "" }, { "docid": "7248b835650dd95ffc5403205a7de260", "score": "0.5696087", "text": "def next(self):\n raise NotImplementedError", "title": "" }, { "docid": "7248b835650dd95ffc5403205a7de260", "score": "0.5696087", "text": "def next(self):\n raise NotImplementedError", "title": "" }, { "docid": "21e664cc8b06377cc6a33c3c578b7acb", "score": "0.56915843", "text": "def __iter__(self):\n return iter(self._offsets)", "title": "" }, { "docid": "9bae4b8c87ce3e56ffbc42e25e06025b", "score": "0.5690418", "text": "def check_next_on_iterator_method(logical_line):\n res = re_next_on_iterator_method.search(logical_line)\n if res:\n yield (0, \"N337: Use next(iterator) rather than iterator.next().\")", "title": "" }, { "docid": "30c50e0ff0d639f2c11d1dc8f75f87d5", "score": "0.56844294", "text": "def next(self):\n return _almathswig.SwigPyIterator_next(self)", "title": "" }, { "docid": "48a7e9a7bd9e7b35bc8d38aac30e78d4", "score": "0.56732583", "text": "def __iter__(self):\n raise NotImplemented", "title": "" }, { "docid": "853f0551e2f422735cd99d0f64c60ea5", "score": "0.5669993", "text": "def next(self):\n return self._iter.next().resolve()", "title": "" }, { "docid": "639b9e32ba872a984919ac031b13fb59", "score": "0.56655675", "text": "def __next__(self):\n self._k += 1\n if self._k < len(self._seq):\n return self._seq[self._k]\n else:\n raise StopIteration()", "title": "" }, { "docid": "639b9e32ba872a984919ac031b13fb59", "score": "0.56655675", "text": "def __next__(self):\n self._k += 1\n if self._k < len(self._seq):\n return self._seq[self._k]\n else:\n raise StopIteration()", "title": "" }, { "docid": "b02c3528de8cb6198569e0e0b2097783", "score": "0.5665026", "text": "def next(self):\n raise NotImplementedError()", "title": "" }, { "docid": "f76bfcb8f285934974828045da1b051a", "score": "0.5659365", "text": "def wrap_task_iter(iterator):\n try:\n for i in iterator:\n yield i, None\n except Exception as e:\n yield (None, None), e", "title": "" }, { "docid": "bdd9de9c68126eb97cf1bd535f8c4da6", "score": "0.5656446", "text": "def iterfirst(iterator, count=1):\n iterator = iter(iterator)\n for i in xrange(count):\n yield iterator.next()", "title": "" }, { "docid": "7612e8c0dbf549c6361aeb9b25c1a53a", "score": "0.5654138", "text": "def __iter__(self):\n return iter(self.list())", "title": "" }, { "docid": "ff7158791d338dd32a8db9ff4b55526a", "score": "0.56535023", "text": "def __iter__(self):\n return iter(self._positive + self._negative)\n #return iter(self._positive)", "title": "" }, { "docid": "6ba30e3a03707e367a0a71e5632e052e", "score": "0.5650908", "text": "def __iter__():", "title": "" }, { "docid": "6ba30e3a03707e367a0a71e5632e052e", "score": "0.5650908", "text": "def __iter__():", "title": "" }, { "docid": "6ba30e3a03707e367a0a71e5632e052e", "score": "0.5650908", "text": "def __iter__():", "title": "" }, { "docid": "6ba30e3a03707e367a0a71e5632e052e", "score": "0.5650908", "text": "def __iter__():", "title": "" }, { "docid": "6ba30e3a03707e367a0a71e5632e052e", "score": "0.5650908", "text": "def __iter__():", "title": "" }, { "docid": "6ba30e3a03707e367a0a71e5632e052e", "score": "0.5650908", "text": "def __iter__():", "title": "" }, { "docid": "6ba30e3a03707e367a0a71e5632e052e", "score": "0.5650908", "text": "def __iter__():", "title": "" }, { "docid": "6ba30e3a03707e367a0a71e5632e052e", "score": "0.5650908", "text": "def __iter__():", "title": "" }, { "docid": "cf64161d3d1acc285a4be05854a03475", "score": "0.5641044", "text": "def next_(self, *args):\n return next(self, *args)", "title": "" }, { "docid": "d8b8855430e72130b72dbc5c355deb8a", "score": "0.5636314", "text": "def __iter__(self):\n return iter(self._wrapped)", "title": "" }, { "docid": "dc912eff285e3d3391ace1144d2e9768", "score": "0.56245196", "text": "def __next__(self):\n if self._current is None: # our convention to end a progression\n raise StopIteration()\n else:\n answer = self._current # record current value to return\n self._advance() # advance to prepare for next time\n return answer # return the answer", "title": "" }, { "docid": "907de8d883c15911d7d5539587110920", "score": "0.5621382", "text": "def __next__(self):\n if self._current is None: # our convention to end a progression\n raise StopIteration()\n else:\n answer = self._current # record current value to return\n self._advance( ) # advance to prepare for next time\n return answer # return the answer", "title": "" }, { "docid": "2dcf4e2c40be6e59b2ce7f45e8a8fc84", "score": "0.561584", "text": "def iterwhile(func, iterator):\n iterator = iter(iterator)\n while 1:\n next = iterator.next()\n if not func(next):\n raise StopIteration\n yield next", "title": "" }, { "docid": "580325fc5f9908b6af8648d6a36e278e", "score": "0.5614728", "text": "def __iter__(self):\n return self.generator()", "title": "" }, { "docid": "9bc96da4b3eb887223c12d3833b94776", "score": "0.56096697", "text": "def __iter__(self):\n raise NotImplementedError", "title": "" }, { "docid": "bbc0af257200434855794a504468ca75", "score": "0.56083983", "text": "def __iter__(self):\n for i in xrange(len(self)):\n yield self[i]", "title": "" }, { "docid": "e197760953b01cff4d35ad4d01c9f51d", "score": "0.5605366", "text": "def append(self,iterable):\n self.__iters.append(iter(iterable))", "title": "" }, { "docid": "ed3700eaeae7a15463d1ceb0c19fc293", "score": "0.5599422", "text": "def __iter__(self):\n self.iterList = iter(self.list)\n return self", "title": "" }, { "docid": "eefe4dcbe7d95cc1a472afe1b68516bd", "score": "0.5594432", "text": "def iterator(fn):\r\n setattr(fn, '_sa_instrument_role', 'iterator')\r\n return fn", "title": "" }, { "docid": "d769ba9995674ebc95ce5a36046cf3a5", "score": "0.5593157", "text": "def can_support_iterator(self):\n raise NotImplementedError()", "title": "" }, { "docid": "bb39c7d2186e7581f4456687f7fb4576", "score": "0.5592334", "text": "def __iter__(self) -> \"RobinIterator\":\n return self", "title": "" }, { "docid": "f36a336753cf5cd9a09e8717104de551", "score": "0.5591843", "text": "def __iter__(self):\n return self #Because the class is the Iterator.", "title": "" }, { "docid": "dd4399d2ea6512575255847b7c748759", "score": "0.5587322", "text": "def __iter__(self):\n yield self", "title": "" }, { "docid": "404c8d57bca900d034933df1e035a836", "score": "0.55812687", "text": "def __iter__(self):\r\n raise NotImplementedError", "title": "" }, { "docid": "404c8d57bca900d034933df1e035a836", "score": "0.55812687", "text": "def __iter__(self):\r\n raise NotImplementedError", "title": "" }, { "docid": "8c74d69d574def84d1f46c74cd92b069", "score": "0.55811983", "text": "def __iter__(self) -> Iterable[X]:\n\n return iter(self._pointer)", "title": "" } ]
b8904ef7b4372200ba43f32d2075f657
Evalutes the the top curve of a circle using xcoordinate
[ { "docid": "7bba9db2092483da7011fc322b984abe", "score": "0.6089204", "text": "def circle(xpos) : \r\n \r\n yPos = np.array(np.sqrt(1 - xpos**2))\r\n \r\n return yPos", "title": "" } ]
[ { "docid": "b097246dc5c0474d62f8675c80afe6c9", "score": "0.59327", "text": "def _x(self, curve):\n return self._c(curve)[:, 0]", "title": "" }, { "docid": "d9671ef8ff6e3f4984a09931e59f9a1b", "score": "0.5818806", "text": "def least_squares_ellipse(\r\n x,y):\r\n \r\n xy = np.array([x,y]).T\r\n ellipse =EllipseModel()\r\n ellipse.estimate(xy)\r\n xc,yc,a,b,theta_t=ellipse.params\r\n return xc,yc,a,b,theta_t", "title": "" }, { "docid": "27704febcc463ecc45f1c6f954b86004", "score": "0.5604246", "text": "def view_x(x):\n return int(inner_area * norm_x(x)) + radius", "title": "" }, { "docid": "c52743972f4997ee9d4b13760fa85d0e", "score": "0.5589874", "text": "def __curve__(x, a, b, c):\n return a * np.exp(b * x) + c", "title": "" }, { "docid": "095c2440bfa6ebdd4346f9ba9b5ecd1d", "score": "0.5493687", "text": "def evaluate_circle(eval_info):\n x, y = eval_info.x - 0.5, eval_info.y - 0.5\n distance = math.sqrt(x*x + y*y)\n inner_radius, outer_radius = min_max(eval_info.evaluate('inner_radius', eval_info.x, eval_info.y),\n eval_info.evaluate('outer_radius', eval_info.x, eval_info.y))\n background = eval_info.evaluate('background', eval_info.x, eval_info.y)\n if inner_radius < distance < outer_radius:\n hardness = eval_info.evaluate('hardness', eval_info.x, eval_info.y)\n distance = (distance - inner_radius) / (outer_radius - inner_radius)\n t = smooth_step(hardness, 1.0, distance) if hardness < 1.0 else 0.0\n return eval_info.evaluate('color', eval_info.x, eval_info.y).lerp(background, t)\n return eval_info.evaluate('background', eval_info.x, eval_info.y)", "title": "" }, { "docid": "dede154b648b2be87770617a4ac10389", "score": "0.54920495", "text": "def runXY(self, x=0.0):\n return CFractalO_Z.runXY(self, x)", "title": "" }, { "docid": "4497898876673efd7d75350b1ff49b93", "score": "0.5472464", "text": "def evaluate(x,y):\n return np.sin(x+y) + (x-y)**2 - 1.5*x + 2.5*y + 1.", "title": "" }, { "docid": "7f61568307b5db481b5ed564faab3547", "score": "0.5416151", "text": "def find_best_circle(x, y):\n method_2 = \"leastsq\"\n\n x_m, y_m = np.mean(x), np.mean(y)\n def calc_R(xc, yc):\n \"\"\"\n calculate the distance of each 2D points from the center (xc, yc)\n \"\"\"\n return ((x-xc)**2 + (y-yc)**2) ** (1/2)\n\n def f_2(c):\n \"\"\"\n calculate the algebraic distance between the data points and\n the mean circle centered at c=(xc, yc)\n \"\"\"\n Ri = calc_R(*c)\n return Ri - Ri.mean()\n\n center_estimate = x_m, y_m\n center_2, ier = optimize.leastsq(f_2, center_estimate)\n\n xc_2, yc_2 = center_2\n Ri_2 = calc_R(*center_2)\n R_2 = Ri_2.mean()\n residu_2 = sum((Ri_2 - R_2)**2)\n\n return xc_2, yc_2, R_2", "title": "" }, { "docid": "3506a30431d2514ef058ff6ea356f944", "score": "0.5407638", "text": "def nextCoordinateCircle(self):", "title": "" }, { "docid": "2d675a0aed149b505cebcff29f6c94c7", "score": "0.53905815", "text": "def xcor(self):\n return self._position[0]", "title": "" }, { "docid": "9879060b17f46ad4cd2c0a7777982d26", "score": "0.5381142", "text": "def high(k, c, x, y):\n return round(abs(k*x - y + c) / (k**2 + 1)**0.5, 2)", "title": "" }, { "docid": "be7e0a296e67b6457539e693f941c80b", "score": "0.53663397", "text": "def rowlandcircle(y,rowland_y0=rowland_y0,rowland_z0=rowland_z0,rowland_r0=rowland_r0,neg=False):\n z = sqrt(rowland_r0**2 - (y-rowland_y0)**2) \n if neg: return -z+rowland_z0\n else: return z+rowland_z0", "title": "" }, { "docid": "6c856136461c781cca31cb264a343321", "score": "0.5360052", "text": "def eval(self, xx, params=None):\r\n if params==None:\r\n params = self.params\r\n global _chance\r\n _chance=self.expectedMin\r\n #_eval is a static method - must be done this way because the curve_fit\r\n #function doesn't want to have any `self` object as first arg\r\n yy = self._eval(xx, *params)\r\n return yy", "title": "" }, { "docid": "5afee425487a9be76b87a54f4a307552", "score": "0.5329836", "text": "def equation(x):\r\n y = np.sqrt(x) + np.cos(x)\r\n return y", "title": "" }, { "docid": "abb235ed83e903bcef807a535d712e82", "score": "0.53118265", "text": "def eval(self, x):\r\n y = N.zeros([N.size(x),N.size(self.ordinate,1)])\r\n for i in range(N.size(y,1)):\r\n y[:,i] = N.interp(x,self.abscissa,self.ordinate[:,i])\r\n y[:,i] = N.where(x < self.abscissa[0], self.ordinate[0,i]+(x-self.abscissa[0])*(self.ordinate[0,i]-self.ordinate[1,i])/(self.abscissa[0]-self.abscissa[1]), y[:,i])\r\n y[:,i] = N.where(x > self.abscissa[-1], self.ordinate[-1,i]+(x-self.abscissa[-1])*(self.ordinate[-1,i]-self.ordinate[-2,i])/(self.abscissa[-1]-self.abscissa[-2]), y[:,i])\r\n return y", "title": "" }, { "docid": "a281149830659a95007e85a287e4a8c0", "score": "0.5310169", "text": "def radial_graphon(x, y):\n return 1 - _np.minimum(_np.sqrt(x**2 + y**2), 1)", "title": "" }, { "docid": "d9c6c4004b66690a83d42434235f56a0", "score": "0.5299486", "text": "def draw_bresenhams_circle(x_initial,y_initial,radius):\n\n # initial \n x,y,r= 0, radius , radius\n\n # define decision parameter\n d = 3 - 2 * r\n\n xi=x_initial\n yi=y_initial\n\n # initial points \n plot_circle(xi,yi,x,y)\n\n\n # while loop to iterate until x == y \n # where angle = 45deg\n \n while x <= y :\n x = x + 1\n if d<0:\n d = d + (4*x) +6\n else:\n y = y - 1;\n d = d + 4*(x-y) + 10\n plot_circle(xi,yi,x,y)", "title": "" }, { "docid": "dff473ecaf9e71406e7bbe9df0d9335b", "score": "0.5279597", "text": "def plotCircle(xm, ym, r, raster, val):\n x = -r\n y = 0\n err = 2 - 2 * r\n while True:\n raster[xm-x, ym+y] = val\n raster[xm-y, ym-x] = val\n raster[xm+x, ym-y] = val\n raster[xm+y, ym+x] = val\n r = err\n\n if (r <= y):\n y += 1\n err += y * 2 + 1 # e_xy+e_y < 0\n if (r > x or err > y):\n x += 1\n err += x * 2 + 1 # e_xy+e_x > 0 or no 2nd y-step\n\n if (x >= 0):\n break", "title": "" }, { "docid": "8b135ffd1643d869f2fee09055b021a2", "score": "0.5275285", "text": "def _drawcircle(self,x,y,rad):\r\n if(self.gpi >0):\r\n color=\"blue\"\r\n elif(self.gpi == -1 or self.gpi == -2):\r\n color = \"yellow\"\r\n elif(self.gpi == -3 or self.gpi == -5):\r\n color = \"red\"\r\n else:\r\n color = \"black\"\r\n return self.c.create_oval(x-rad,y-rad,x+rad,y+rad,width=rad/5,fill=color,outline='black')", "title": "" }, { "docid": "65b91770dc43b7fb746faa39bfb53798", "score": "0.5271553", "text": "def radius(x, y):\r\n return (x**2 + y**2)**(1/2)", "title": "" }, { "docid": "732aa1e0d3c6ed06064fdb839e61c8ce", "score": "0.5266238", "text": "def _drawcircle(self,x,y,rad):\n\t\tcolor=\"red\"\n\t\treturn self.c.create_oval(x-rad,y-rad,x+rad,y+rad,width=rad/5,fill=color,outline='black')", "title": "" }, { "docid": "e4590131b35ae6c2360c1b8333e718e5", "score": "0.525311", "text": "def eval(self, x, absolute=False):\n if absolute and self._i_0 is None:\n raise RuntimeError(\"Absolute intensity evaluation requested but \"\n \"no center intensity has been set.\")\n\n cos_psi = self._dist_to_cos_psi(x)\n i = poly.polyval(cos_psi, self._coefs)\n if absolute:\n i = i * self._i_0\n return i", "title": "" }, { "docid": "794c1b88fb2a24fc3f35a0756d2c6eda", "score": "0.5216071", "text": "def at_x(self, x):\n\n def k_x(y):\n return self.eval(x, y)\n\n return k_x", "title": "" }, { "docid": "e236b0c3aee2080cc9869ab4d8bacee1", "score": "0.5207828", "text": "def get_square(self, coord, radius):", "title": "" }, { "docid": "6deb7ee499be62374eda47692a31fad9", "score": "0.52001333", "text": "def Circle(x_radius, origin=None, superness=CIRCULAR_SUPERNESS):\n return Ellipse(x_radius, x_radius, origin=origin, superness=superness)", "title": "" }, { "docid": "cb371a73d5e0f69299496281a3e3e457", "score": "0.5196261", "text": "def get_area_circle(r):\n return -1.0", "title": "" }, { "docid": "796ee68681760d6e3883213f086879de", "score": "0.5174812", "text": "def find_zero(self, start_x):\r\n max_error = 1e-6\r\n x = start_x\r\n for i in range(100):\r\n # Calculate and plot this point.\r\n y = self.f(x)\r\n self.drawing_canvas.wdraw_circle(x, y, 4, \"\", \"green\")\r\n print(f\"({x}, {y})\")\r\n\r\n # If we have a small enough error, stop.\r\n if abs(y) < max_error:\r\n break\r\n\r\n # Update x.\r\n x -= y / self.df_dx(x)\r\n\r\n print()\r\n return x", "title": "" }, { "docid": "ddeff639e2a13346ebab7c2aeef27668", "score": "0.51691926", "text": "def plot_circle(xi,yi,x,y):\n point_1 = Point(xi+x, yi+y) \n draw_point(point_1);\n\n point_2 = Point(xi-x, yi+y) \n draw_point(point_2);\n\n point_3 = Point(xi+x, yi-y) \n draw_point(point_3);\n\n point_4 = Point(xi-x, yi-y) \n draw_point(point_4);\n\n point_5 = Point(xi+y, yi+x) \n draw_point(point_5);\n\n point_6 = Point(xi-y, yi+x) \n draw_point(point_6);\n\n point_7 = Point(xi+y,yi-x)\n draw_point(point_7);\n\n point_8 = Point(xi-y, yi-x) \n draw_point(point_8);", "title": "" }, { "docid": "e424925e146b130d7f727a088b60ff57", "score": "0.51678884", "text": "def evalPoint(self, x): # TARGET FUNCTION\n if x[1] < self.m*x[0] + self.b:\n return -1\n else:\n return 1", "title": "" }, { "docid": "b340da3056c87b17aaeac7c597f5e0c6", "score": "0.515837", "text": "def ctcoor(x,x0,dx):\n xc = round((x-x0)/dx)*dx+x0\n\n return xc", "title": "" }, { "docid": "21b4270348835eac20043940308fda01", "score": "0.5157684", "text": "def evaluate(self, x, y):", "title": "" }, { "docid": "ef4551cc1aebe2442ae7164340795aa5", "score": "0.51495236", "text": "def _create_circle(self, x, y, r, **kwargs):\r\n return self.create_oval(x-r, y-r, x+r, y+r, **kwargs)", "title": "" }, { "docid": "43d2ec9fa81ea0afa5cdfc1f1e35cb26", "score": "0.51492345", "text": "def find_radius(alpha, xi):\n\n return alpha * xi", "title": "" }, { "docid": "188137ca3c92b5e2d0fe7ae95eb166a8", "score": "0.512802", "text": "def select_circle(self, x, y, xc, yc, r, mode=\"replace\", name=\"default\", inclusive=True):\n\n # expr = \"({x}-{xc})**2 + ({y}-{yc})**2 <={r}**2\".format(**locals())\n if inclusive:\n expr = (self[x] - xc)**2 + (self[y] - yc)**2 <= r**2\n else:\n expr = (self[x] - xc)**2 + (self[y] - yc)**2 < r**2\n\n self.select(boolean_expression=expr, mode=mode, name=name)", "title": "" }, { "docid": "616df601249c58eafef75635c1e4ad98", "score": "0.5107525", "text": "def grandeur(x, y):\n return np.sqrt(x**2 + y**2) / (x - 5 * y) / 100", "title": "" }, { "docid": "512c263cf317c6774e4d37ff42b62ac6", "score": "0.5096249", "text": "def proj(self, x, c):", "title": "" }, { "docid": "3013dd7545aa7651646b89126f6e6eeb", "score": "0.50945807", "text": "def eval(self, x):\r\n y = N.zeros([N.size(x),N.size(self.ordinate,1)])\r\n x = N.array([x]).flatten()\r\n \r\n if self._mode == 1:\r\n for i in range(N.size(y,1)):\r\n for j in range(N.size(x)):\r\n try:\r\n y[j,i] = self.ordinate[self.abscissa<=x[j],i][-1]\r\n except IndexError:\r\n pass\r\n y[:,i] = N.where(x < self.abscissa[0], self.ordinate[0,i], y[:,i])\r\n y[:,i] = N.where(x > self.abscissa[-1], self.ordinate[-1,i], y[:,i])\r\n else:\r\n for i in range(N.size(y,1)):\r\n for j in range(N.size(x)):\r\n try:\r\n y[j,i] = self.ordinate[self.abscissa>=x[j],i][0]\r\n except IndexError:\r\n pass\r\n y[:,i] = N.where(x < self.abscissa[0], self.ordinate[0,i], y[:,i])\r\n y[:,i] = N.where(x > self.abscissa[-1], self.ordinate[-1,i], y[:,i])\r\n return y", "title": "" }, { "docid": "b84fa72fca80fede0683488c888dc9f8", "score": "0.50778306", "text": "def min_enclosing_rectangle(radius, x, y):\n if radius >= 0:\n return(x-radius, y-radius)", "title": "" }, { "docid": "0a62f1c2bd2af3b208a4b414ff65def8", "score": "0.5049287", "text": "def dcircle(p, xc, yc, r):\n return np.sqrt(((p - np.array([xc, yc])) ** 2).sum(-1)) - r", "title": "" }, { "docid": "d96c3a0304c48c5fd339b802ee0b7fa0", "score": "0.504733", "text": "def circle(self, x_c, y_c, r):\n def gamma(x,y):\n return (y-y_c)**2 + (x - x_c)**2 - r**2\n\n def gamma_jacobian(x,y):\n return np.array([2*(x - x_c), 2*(y-y_c)])\n\n self.gamma = gamma\n self.gamma_jacobian = gamma_jacobian", "title": "" }, { "docid": "82070e96f9cdb6c411fc29875f81b76b", "score": "0.5044992", "text": "def discretize_circle(radius, x_one):\n\n\t# particle data\n\tsim_particle_r = radius\n\n\t# store location of nodes \n\tsim_particles = []\n\n\t# store area of nodes\n\tsim_particles_area = []\n\n\t# get angle\n\tsim_num_theta = 8\n\tsim_theta_interval = 2. * np.pi / (float(sim_num_theta))\n\tangle = 0.\n\n\t# get matrix\n\tmatrix = []\n\ta = [np.cos(angle), -np.sin(angle)]\n\tmatrix.append(a)\n\ta = [np.sin(angle), np.cos(angle)]\n\tmatrix.append(a)\n\n\t# add first point\n\tp = [1., 0.]\t\t\n\tp = mult(matrix, p)\n\tsim_particles.append(p)\n\tarea = sim_theta_interval * (1. - (1. + x_one) * (1. + x_one) * 0.25)\n\tsim_particles_area.append(area)\n\tprint_point(p, area, \"angle = %4.6e\\n\" %(angle))\n\n\t# add first point\n\tp = [x_one, 0.]\t\t\n\tp = mult(matrix, p)\n\tsim_particles.append(p)\n\tsim_particles_area.append(area)\n\tprint_point(p, area, \"angle = %4.6e\\n\" %(angle))\n\n\tcontinue_add = True\n\tx_pre_pre = 1.\n\tx_pre = x_one\n\tcounter = 1\n\t# loop over internal points\n\twhile continue_add == True:\n\t\t# \n\t\tval = (x_pre + x_pre_pre) * (x_pre + x_pre_pre) - 4. * area / (sim_theta_interval)\n\n\t\tif val < 0.:\n\t\t\tcontinue_add = False\n\n\t\tif continue_add == True:\n\n\t\t\tcounter = counter + 1\n\n\t\t\tif counter % 2 != 0:\n\n\t\t\t\tprint('counter %d\\n' % (counter))\n\n\t\t\t\t# get next internal point\n\t\t\t\tx_new = np.sqrt(val) - x_pre\n\n\n\t\t\t\t#\n\t\t\t\tp = [x_new, 0.]\n\t\t\t\tp = mult(matrix, p)\n\n\t\t\t\t# add point and area\n\t\t\t\tsim_particles.append(p)\n\t\t\t\tsim_particles_area.append(area)\n\n\t\t\t\tarea_new = sim_theta_interval * ((x_pre + x_pre_pre) * (x_pre + x_pre_pre) * 0.25 - (x_pre + x_new) * (x_pre + x_new) * 0.25)\n\n\t\t\t\tprint_point(p, area_new)\n\n\t\t\t\tx_pre_pre = x_pre\n\t\t\t\tx_pre = x_new\n\n\t\t\t\t# if len(sim_particles) > 20:\n\t\t\t\t\t# continue_add = False\n\t\t\telse:\n\t\t\t\tprint('skipped')\n\n\n\t# end of while loop\n\n\t# skip some points\n\tsim_particles_new = []\n\tsim_particles_area_new = []\n\tfor i in xrange(len(sim_particles)):\n\t\tcontinue_i = True\n\n\t\tif i > 1:\n\t\t\tif (i+1) % 2 != 0:\n\t\t\t\tcontinue_i = False\n\n\t\tif continue_i == True:\n\n\t\t\tx = [sim_particles[i][0], sim_particles[i][1]]\n\t\t\tsim_particles_new.append(x)\n\t\t\tsim_particles_area_new.append(sim_particles_area[i])\n\n\t\t\t# skip some points\n\tsim_particles_nn = []\n\tsim_particles_area_nn = []\n\tfor i in xrange(len(sim_particles_new)):\n\t\t\n\t\tx = [sim_particles_new[i][0], sim_particles_new[i][1]]\n\t\tsim_particles_nn.append(x)\n\n\t\tx_next = [0., 0.]\n\t\tif i < len(sim_particles_new) - 2:\n\t\t\tx_next = [sim_particles_new[i+1][0], sim_particles_new[i+1][1]]\n\n\t\tx_prev = [0., 0.]\n\t\tif i > 0:\n\t\t\tx_prev = [sim_particles_new[i-1][0], sim_particles_new[i-1][1]]\n\n\t\t# area\n\t\tarea_new = 0.\n\t\tif i == 0:\n\t\t\tarea_new = sim_theta_interval * (1. * 1. - (1. + x[0]) * (1. + x[0]) * 0.25)\n\t\telif i == len(sim_particles) - 1:\n\t\t\tarea_new = sim_theta_interval * ((x_prev[0] + x[0]) * (x_prev[0] + x[0]) * 0.25)\n\t\telse:\n\t\t\tarea_new = sim_theta_interval * ((x_prev[0] + x[0]) * (x_prev[0] + x[0]) * 0.25 - (x_next[0] + x[0]) * (x_next[0] + x[0]) * 0.25)\n\n\t\tsim_particles_area_nn.append(area_new)\n\n\t# rotate all points\n\tsim_particles_new_new = []\n\tsim_particles_area_new_new = []\n\tfor theta in xrange(sim_num_theta):\n\t\tangle = theta * sim_theta_interval\n\n\t\t# get matrix\n\t\tmatrix = []\n\t\ta = [np.cos(angle), -np.sin(angle)]\n\t\tmatrix.append(a)\n\t\ta = [np.sin(angle), np.cos(angle)]\n\t\tmatrix.append(a)\n\n\t\tfor i in xrange(len(sim_particles_nn)):\n\t\t\tx_old = [radius * sim_particles_nn[i][0], radius * sim_particles_nn[i][1]]\n\t\t\tarea = radius * radius * sim_particles_area_nn[i]\n\n\t\t\tx_new = mult(matrix, x_old)\n\n\t\t\tsim_particles_new_new.append(x_new)\n\t\t\tsim_particles_area_new_new.append(area)\n\n\t\t# loop points in y=0 line\n\n\t# loop over angles\n\n\t# print points to csv file\n\t# generate csv file\n\tinpf = open('circle_mesh.csv','w')\n\n\t# header\n\t# inpf.write(\"i, x, y, z, area\\n\")\n\n\tfor i in xrange(len(sim_particles_new_new)):\n\t\tinpf.write(\"%d, %Lf, %Lf, %Lf, %Lf\\n\" % (i, sim_particles_new_new[i][0], sim_particles_new_new[i][1], 0., sim_particles_area_new_new[i]))\n\n\tinpf.close()", "title": "" }, { "docid": "099594bcc96d4bd738223022d8c88b43", "score": "0.50351524", "text": "def get_co_extremes_curve(obj):\n xmax = -float(\"inf\")\n xmin = float(\"inf\")\n ymax = -float(\"inf\")\n ymin = float(\"inf\")\n\n stroke_width = get_stroke_width(obj)/2\n # iterate over points of the curve's first spline\n for spline in obj.data.splines:\n for p in spline.bezier_points:\n co = obj.matrix_world * p.co\n co_handle_left = obj.matrix_world * p.handle_left\n co_handle_right = obj.matrix_world * p.handle_right\n\n if co[0] > xmax:\n xmax = co[0]\n if co[0] < xmin:\n xmin = co[0]\n if co[1] > ymax:\n ymax = co[1]\n if co[1] < ymin:\n ymin = co[1]\n\n if co_handle_right[0] > xmax:\n xmax = co_handle_right[0]\n if co_handle_right[0] < xmin:\n xmin = co_handle_right[0]\n if co_handle_right[1] > ymax:\n ymax = co_handle_right[1]\n if co_handle_right[1] < ymin:\n ymin = co_handle_right[1]\n\n if co_handle_left[0] > xmax:\n xmax = co_handle_left[0]\n if co_handle_left[0] < xmin:\n xmin = co_handle_left[0]\n if co_handle_left[1] > ymax:\n ymax = co_handle_left[1]\n if co_handle_left[1] < ymin:\n ymin = co_handle_left[1]\n\n return xmin - stroke_width, xmax + stroke_width, ymin - stroke_width, ymax + stroke_width", "title": "" }, { "docid": "024b73e349f8382f2fa57d0de8ae098e", "score": "0.50280523", "text": "def x_constraint(q, xy):\n x = ( self.hip_ro.getX() + self.L[0]*np.cos(q[0]) - self.L[1]*np.cos(q[0]+q[1]) ) - xy[0]\n return x", "title": "" }, { "docid": "960633e6105984ee973419981f422beb", "score": "0.5027098", "text": "def createOval(self, x, y, r, color = \"GREY\"):\n\t\tself.dc.SetBrush(wx.Brush(color, wx.SOLID))\n\t\tself.dc.SetPen(wx.Pen(color))\t\t \n\t\ty = self.maxy - y + self.yoffset\n\t\tox = x / self.scale\n\t\tox += self.xoffset\n\t\tself.dc.DrawCircle(ox, y / self.scale, r)", "title": "" }, { "docid": "09eca8e317ba818b5c3b18d07a9385ad", "score": "0.5016905", "text": "def calc_radius(self):\n y_eval = self.y_eval\n fit = self.best_fit_m\n if y_eval and fit:\n curve_rad = ((1 + (2*fit[0]*y_eval + fit[1])**2)**1.5) / np.absolute(2*fit[0])\n self.radius_of_curvature = curve_rad\n return", "title": "" }, { "docid": "e74047669ce5d081b0ad1cdb7a0f5e65", "score": "0.5014061", "text": "def _pchallxy(self):\n pdata=self._rchallxy()\n ep.llxy(pdata)\n return", "title": "" }, { "docid": "a5ef7fb963ea8aae820811470efad998", "score": "0.49985623", "text": "def xy(self, curve):\n n = self._c(curve)\n print 'xy', type(n)\n return n[:, 0], n[:, 1]", "title": "" }, { "docid": "3dd145bb7c509c3deda62d10af8e8b4d", "score": "0.49924806", "text": "def ellipse_params(self):\n return self._params.x[:5]", "title": "" }, { "docid": "b378f26d7c2de685f6dc02d293bd7584", "score": "0.49912798", "text": "def __call__(self, x):\n return self._horner_evaluation(x)", "title": "" }, { "docid": "1b4bfafcb54821fb9af31e1e44d0db9b", "score": "0.49897653", "text": "def pointInsideEllipse(self, pt, majorAxis, f1 = None, f2 = None):\n\t\trx,ry,rz = pt\n\t\tx,y,z = self.parameters[\"X\"], self.parameters[\"Y\"], self.parameters[\"Z\"]\n\t\tdx = (y-majorAxis)/2\n\t\t#print \"Testing\",rx,ry,rz,\"major axis=\",self.majorAxis\n\t\tif not f1:\n\t\t\tf1y = 2*dx\n\t\t\tf1x = x/2\n\t\telse:\n\t\t\tf1x,f1y = f1\n\t\tif not f2:\n\t\t\tf2y = y-(2*dx)\n\t\t\tf2x = x/2\n\t\telse:\n\t\t\tf2x,f2y = f2\n\t\t\t\n\t\tp1 = (f1x-rx,f1y-ry)\n\t\tp2 = (f2x-rx,f2y-ry)\n\t\td1 = math.sqrt(p1[0]*p1[0]+p1[1]*p1[1])+math.sqrt(p2[0]*p2[0]+p2[1]*p2[1])\n\t\treturn d1 < majorAxis", "title": "" }, { "docid": "de8a390e95294b04788e76102eeeeeea", "score": "0.49846998", "text": "def DrawCurve():\n #plot the e* r* curve from roering 2007\n x = np.arange(0.01, 1000, 0.1)\n plt.plot(x, R_Star_Model(x), 'k-', linewidth=2, label='Equation 5')", "title": "" }, { "docid": "9730c523b338c454cfd45a84b08fe4b7", "score": "0.49798942", "text": "def evaluate_single_true(self, x):\r\n x0 = 15 * x[0] # [0, 15.]\r\n x1 = 20 * x[1] - 5.0 # [-5, 15]\r\n # x0 = x[0]\r\n # x1 = x[1]\r\n a = 1\r\n b = old_div(5.1, (4 * pow(numpy.pi, 2.0)))\r\n c = old_div(5, numpy.pi)\r\n r = 6\r\n s = 10\r\n t = old_div(1, (8 * numpy.pi))\r\n y = numpy.array([(a * pow(x1 - b * pow(x0, 2.0) + c * x0 - r, 2.0) + s * (1 - t) * numpy.cos(x0) + s)])\r\n if self._minimize:\r\n return y \r\n else:\r\n return -y", "title": "" }, { "docid": "d43f2a9b007d4f2025b06d0ced6cc780", "score": "0.49737012", "text": "def _circumcircle(self):\n ax, ay = self.p1\n bx, by = self.p2\n cx, cy = self.p3\n d = (ax * (by - cy) + bx * (cy - ay) + cx * (ay - by)) * 2\n x = ((ax**2 + ay**2)*(by - cy) +\n (bx**2 + by**2)*(cy - ay) +\n (cx**2 + cy**2)*(ay - by)) / d\n y = ((ay**2 + ax**2)*(cx - bx) +\n (by**2 + bx**2)*(ax - cx) +\n (cy**2 + cx**2)*(bx - ax)) / d\n r = math.sqrt((x - ax)**2 + (y - ay)**2)\n return ((x, y), r)", "title": "" }, { "docid": "039fd6e70023437d196206fbb904b076", "score": "0.49734527", "text": "def ellipse_fit():\n xk,yk=np.load(\"ellipse.npy\").T\n A=np.column_stack((xk**2,xk,xk*yk,yk,yk**2))\n b=np.ones_like(xk)\n a,b,c,d,e = la.lstsq(A,b)[0]\n plot_ellipse(a,b,c,d,e)\n plt.plot(xk,yk,'k.')\n plt.show()", "title": "" }, { "docid": "8b5f6ed6f5091056ca4cab814961ab87", "score": "0.49656135", "text": "def arc_prior(self,xy):\n return self.expected_distance", "title": "" }, { "docid": "4622c22c43fdf7bbcf76f1265cbc31d5", "score": "0.49614462", "text": "def link_circle_up_lineandcircle(x, y, r, ax, first=0):\n ax.plot(x, y, '-', c='black',alpha=0.7)\n ax.plot(x[1], y[1], 'o', ms=r, lw=2, alpha=0.7, mfc='orange')\n\n if first > 0:\n ax.plot(x[0], y[0], 'o', ms=first, lw=2, alpha=0.7, mfc='orange')", "title": "" }, { "docid": "d723893fc0341cde97f5145c8725fb65", "score": "0.49511576", "text": "def getPriceBarOpenScenePoint(self):\n\n openPrice = 0.0\n high = 0.0\n low = 0.0\n\n if self.priceBar != None:\n openPrice = self.priceBar.open\n high = self.priceBar.high\n low = self.priceBar.low\n\n priceMidpoint = (high + low) * 0.5\n\n x = 0.0\n yOpen = -1.0 * (openPrice - priceMidpoint)\n yHigh = -1.0 * (high - priceMidpoint)\n yLow = -1.0 * (low - priceMidpoint)\n\n # Return value.\n rv = self.mapToScene(QPointF(x, yOpen))\n\n return rv", "title": "" }, { "docid": "6923003fb3fd68cb6a1a90282bcb7bf2", "score": "0.4947209", "text": "def lineSearch(self):\n theta = minimize_scalar(lambda x: self.calculateZ(x), bounds = (0,1), method = 'Bounded')\n return theta.x", "title": "" }, { "docid": "ec3095d250ef5dc57a13ad5351873dde", "score": "0.4946811", "text": "def draw_circle(image, x_center, y_center, radius, value=0):\n new_image = np.copy(image)\n (height, width) = image.shape\n for x in range(width):\n for y in range(height):\n if (x - x_center)**2 + (y-y_center)**2 <= radius**2:\n new_image[y][x] = value\n return new_image", "title": "" }, { "docid": "b1d79736c82fee38715a70e2d84dcf96", "score": "0.49436665", "text": "def get_y(self, x):\n result = self.m_circle0.get_y(abs(x))\n if result:\n return max(result)\n else:\n return None", "title": "" }, { "docid": "3cbe2088c08874d6d83bb1305b6861ce", "score": "0.4943518", "text": "def evaluation_radius(self):\n return self.parameters[\"semi_major\"].quantity", "title": "" }, { "docid": "6dfc9ef0c3727cff4bc111886701d2ba", "score": "0.49417406", "text": "def _obj_func(point):\n x, y = point\n ct = np.cos(np.pi / 4)\n st = np.sin(np.pi / 4)\n xn = ct * x + st * y\n yn = ct * y - st * x\n x = xn\n y = yn\n return (\n 3 * (1 - x) ** 2.0 * np.exp(-(x ** 2) - (y + 1) ** 2)\n - 10 * (x / 5.0 - x ** 3 - y ** 5) * np.exp(-(x ** 2) - y ** 2)\n - 1 / 3 * np.exp(-((x + 1) ** 2) - y ** 2)\n )", "title": "" }, { "docid": "77269fc7116327c2e8d1b3399be27435", "score": "0.49367395", "text": "def calc(self, x):\n \n return self._calc(x)", "title": "" }, { "docid": "746b9634de5b16d0a80a08e95d69751d", "score": "0.49333367", "text": "def bezier_circle(name=None, **kwargs):\r\n names, kwargs = utils.clean_names(name, kwargs, {'curve_name':'Bezier', 'obj_name':'bezier', 'priority_curve':'current', 'priority_obj':'new'}, 'curve')\r\n kwargs, _ = pn.clean_kwargs(kwargs, {'r':1, 'h':None})\r\n r = kwargs['r']\r\n h = kwargs['h']\r\n if h is None:\r\n h = r*(np.sqrt(2)/2 - 4*(0.5**3))/(3*(0.5**3)) # handle length for cubic bezier approx. of a circle\r\n\r\n path_obj = core.CurveObject(names['obj_name'], core.Curve(names['curve_name']))\r\n path_obj.to_coll(names['coll_name'])\r\n\r\n spl = path_obj().data.splines.new(type='BEZIER')\r\n spl.bezier_points.add(3)\r\n spl.bezier_points[0].co = (-r, 0, 0)\r\n spl.bezier_points[1].co = (0, r, 0)\r\n spl.bezier_points[2].co = (r, 0, 0)\r\n spl.bezier_points[3].co = (0, -r, 0)\r\n\r\n spl.bezier_points[0].handle_right = (-r, h, 0)\r\n spl.bezier_points[0].handle_left = (-r, -h, 0)\r\n\r\n spl.bezier_points[1].handle_right = (h, r, 0)\r\n spl.bezier_points[1].handle_left = (-h, r, 0)\r\n\r\n spl.bezier_points[2].handle_right = (r, -h, 0)\r\n spl.bezier_points[2].handle_left = (r, h, 0)\r\n\r\n spl.bezier_points[3].handle_right = (-h, -r, 0)\r\n spl.bezier_points[3].handle_left = (h, -r, 0)\r\n\r\n spl.use_cyclic_u = True\r\n spl.order_u = 4\r\n spl.order_v = 4\r\n spl.resolution_u = 12\r\n spl.resolution_v = 12\r\n spl.tilt_interpolation = 'LINEAR' #('LINEAR', 'CARDINAL', 'BSPLINE', 'EASE')\r\n\r\n return path_obj", "title": "" }, { "docid": "faf780f17c87efa8da6bdc8587ad497d", "score": "0.4927781", "text": "def cot(x):\n return cos(x) / sin(x)", "title": "" }, { "docid": "98e7d6f491ddad1007d995284700ab6e", "score": "0.49218547", "text": "def example_circle(p):\r\n if p[0]**2+p[1]**2<200*2:\r\n return 1\r\n else:\r\n return 0", "title": "" }, { "docid": "7302d97e60495458df40982072ed5eac", "score": "0.49186602", "text": "def getescaque_from_mousexy(xy):\n return (\n floor((xy[0]-x0)/a),\n floor((xy[1]-y0)/a)\n )", "title": "" }, { "docid": "381f809e9e3b5a8373201b6652459861", "score": "0.49111947", "text": "def ellipse(x_radius=1.0, y_radius=1.0, theta=10., xc=0.0, yc=0.0):\r\n angles = np.deg2rad(np.arange(180.0, -180.0-theta, step=-theta))\r\n x_s = x_radius*np.cos(angles) + xc # X values\r\n y_s = y_radius*np.sin(angles) + yc # Y values\r\n pnts = np.c_[x_s, y_s]\r\n return pnts", "title": "" }, { "docid": "7dda64734ae5bc8dd252fc87f533fada", "score": "0.49097386", "text": "def test_approx_circle(self):\n \n arc1 = CubicBezier(\n complex(0,0),\n complex(0,109.66797),\n complex(-88.90345,198.57142),\n complex(-198.57142,198.57142)\n )\n \n self.assertAlmostEqual(arc1.point(0), (0j))\n self.assertAlmostEqual(arc1.point(0.1), (-2.59896457+32.20931647j))\n self.assertAlmostEqual(arc1.point(0.2), (-10.12330256+62.76392816j))\n self.assertAlmostEqual(arc1.point(0.3), (-22.16418039+91.25500149j))\n self.assertAlmostEqual(arc1.point(0.4), (-38.31276448+117.27370288j))\n self.assertAlmostEqual(arc1.point(0.5), (-58.16022125+140.41119875j))\n self.assertAlmostEqual(arc1.point(0.6), (-81.29771712+160.25865552j))\n self.assertAlmostEqual(arc1.point(0.7), (-107.31641851+176.40723961j))\n self.assertAlmostEqual(arc1.point(0.8), (-135.80749184+188.44811744j))\n self.assertAlmostEqual(arc1.point(0.9), (-166.36210353+195.97245543j))\n self.assertAlmostEqual(arc1.point(1), (-198.57142+198.57142j))\n \n arc2 = CubicBezier(\n complex(-198.57142,198.57142),\n complex(-109.66797-198.57142,0+198.57142),\n complex(-198.57143-198.57142,-88.90345+198.57142),\n complex(-198.57143-198.57142,0),\n )\n \n self.assertAlmostEqual(arc2.point(0), (-198.57142+198.57142j))\n self.assertAlmostEqual(arc2.point(0.1), (-230.78073675+195.97245543j))\n self.assertAlmostEqual(arc2.point(0.2), (-261.3353492+188.44811744j))\n self.assertAlmostEqual(arc2.point(0.3), (-289.82642365+176.40723961j))\n self.assertAlmostEqual(arc2.point(0.4), (-315.8451264+160.25865552j))\n self.assertAlmostEqual(arc2.point(0.5), (-338.98262375+140.41119875j))\n self.assertAlmostEqual(arc2.point(0.6), (-358.830082+117.27370288j))\n self.assertAlmostEqual(arc2.point(0.7), (-374.97866745+91.25500149j))\n self.assertAlmostEqual(arc2.point(0.8), (-387.0195464+62.76392816j))\n self.assertAlmostEqual(arc2.point(0.9), (-394.54388515+32.20931647j))\n self.assertAlmostEqual(arc2.point(1), (-397.14285+0j))\n\n arc3 = CubicBezier(\n complex(-198.57143-198.57142,0),\n complex(0-198.57143-198.57142,-109.66797 ),\n complex(88.90346-198.57143-198.57142,-198.57143),\n complex(-198.57142,-198.57143)\n )\n\n self.assertAlmostEqual(arc3.point(0), (-397.14285+0j))\n self.assertAlmostEqual(arc3.point(0.1), (-394.54388515-32.20931675j))\n self.assertAlmostEqual(arc3.point(0.2), (-387.0195464-62.7639292j))\n self.assertAlmostEqual(arc3.point(0.3), (-374.97866745-91.25500365j))\n self.assertAlmostEqual(arc3.point(0.4), (-358.830082-117.2737064j))\n self.assertAlmostEqual(arc3.point(0.5), (-338.98262375-140.41120375j))\n self.assertAlmostEqual(arc3.point(0.6), (-315.8451264-160.258662j))\n self.assertAlmostEqual(arc3.point(0.7), (-289.82642365-176.40724745j))\n self.assertAlmostEqual(arc3.point(0.8), (-261.3353492-188.4481264j))\n self.assertAlmostEqual(arc3.point(0.9), (-230.78073675-195.97246515j))\n self.assertAlmostEqual(arc3.point(1), (-198.57142-198.57143j))\n \n arc4 = CubicBezier(\n complex(-198.57142,-198.57143),\n complex(109.66797-198.57142,0-198.57143),\n complex(0,88.90346-198.57143),\n complex(0,0),\n )\n \n self.assertAlmostEqual(arc4.point(0), (-198.57142-198.57143j))\n self.assertAlmostEqual(arc4.point(0.1), (-166.36210353-195.97246515j))\n self.assertAlmostEqual(arc4.point(0.2), (-135.80749184-188.4481264j))\n self.assertAlmostEqual(arc4.point(0.3), (-107.31641851-176.40724745j))\n self.assertAlmostEqual(arc4.point(0.4), (-81.29771712-160.258662j))\n self.assertAlmostEqual(arc4.point(0.5), (-58.16022125-140.41120375j))\n self.assertAlmostEqual(arc4.point(0.6), (-38.31276448-117.2737064j))\n self.assertAlmostEqual(arc4.point(0.7), (-22.16418039-91.25500365j))\n self.assertAlmostEqual(arc4.point(0.8), (-10.12330256-62.7639292j))\n self.assertAlmostEqual(arc4.point(0.9), (-2.59896457-32.20931675j))\n self.assertAlmostEqual(arc4.point(1), (0j))", "title": "" }, { "docid": "d6c4879bea74ce4f25c8626bea8197a6", "score": "0.4903492", "text": "def point(self, p, x):\n return self.plec(x,p)", "title": "" }, { "docid": "7413ef98d33018684d2a8893c6bd497b", "score": "0.48979694", "text": "def calc_R(x,y, xc, yc):\n return np.sqrt((x - xc) ** 2 + (y - yc) ** 2)", "title": "" }, { "docid": "10df1db25aa46ec9491e6a5b6a1a19ff", "score": "0.48944274", "text": "def GetRadiusPointFromExt(self,x,y):\n _cx, _cy = self.__center.getCoords()\n _r = self.__radius\n centerPoint=point.Point(_cx,_cy)\n outPoint=point.Point(x,y)\n vector=Vector(outPoint,centerPoint)\n vNorm=vector.Norm()\n newNorm=abs(vNorm-_r)\n magVector=vector.Mag()\n magVector.Mult(newNorm)\n newPoint=magVector.Point()\n intPoint=point.Point(outPoint+newPoint)\n return intPoint.getCoords()", "title": "" }, { "docid": "bdb2d52d573b012519950106a44deaa4", "score": "0.48942062", "text": "def get_x(self):\n return self.x", "title": "" }, { "docid": "bdb2d52d573b012519950106a44deaa4", "score": "0.48942062", "text": "def get_x(self):\n return self.x", "title": "" }, { "docid": "46ecd061550643b855366a117562d37f", "score": "0.48905843", "text": "def x(self):\n return self.rectangle[0]", "title": "" }, { "docid": "25f7c5c825837fa0d335001a82c7f033", "score": "0.487487", "text": "def solve_circ_x(r, a, b, y):\n # (x-a)^2 + (y-b)^2 = r^2, where a and b are centre co-ordinates\n # So: x^2 - 2ax + a^2 + (y-b)^2 - r^2 = 0\n # Calculate quadratic coefficients and get roots:\n quad_a = 1.0\n quad_b = -2*a\n quad_c = a**2 + ((y-b)**2) - r**2\n return [ quad_roots(quad_a,quad_b,quad_c) ]", "title": "" }, { "docid": "41b1d93170a6b19ada772d46e608d810", "score": "0.48681855", "text": "def get_curvative(self, y):\n raise NotImplementedError()", "title": "" }, { "docid": "2bd99bad2125f26840bf624ee7c06294", "score": "0.48669684", "text": "def get_curvature_radius(self):\n\n def circle_equation(c, points):\n D, E, F = c[0], c[1], c[2]\n return [x ** 2 + y ** 2 + D * x + E * y + F for x, y in points]\n\n radius = 0\n if any(self.peak_rhs_vertex): # success to find another peak\n vertexs = np.array([self.peak_lhs_vertex, self.peak_rhs_vertex, self.peak_mid_vertex]).reshape([3, 2])\n points = vertexs * [conf.x_scale, conf.y_scale]\n # points = [[peak_x_1 * conf.x_scale, peak_y_1 * conf.y_scale],\n # [peak_x_2 * conf.x_scale, peak_y_2 * conf.y_scale],\n # [peak_x_m * conf.x_scale, peak_y_m * conf.y_scale]]\n D, E, F = fsolve(circle_equation, [0, 0, 0], args=(points))\n radius = np.sqrt(D ** 2 + E ** 2 - 4 * F) / 2\n # centroid = [-D / 2, -E / 2]\n # print(\"radius=\", radius)\n return radius", "title": "" }, { "docid": "7205f64bde8f88f1aa6ad1f14e870544", "score": "0.4866869", "text": "def fit_circle(edge): \n def error(x_c, y_c):\n center = np.array([x_c, y_c])\n return np.std([np.linalg.norm(x - center) for x in edge])**2\n \n # optimize\n center0 = np.mean(edge, axis=0)\n x_c0, y_c0 = center0[0], center0[1] \n center = leastsq(error, x_c0, y_c0)[0]\n R = np.mean([np.linalg.norm(x - center) for x in edge])\n return R", "title": "" }, { "docid": "948fd384f14a6b492d87ac1a682fc405", "score": "0.48623908", "text": "def get_closest_point(x0, y0, a, b, c):\n\n x = (b * (b * x0 - a * y0) - a * c) / (a ** 2 + b ** 2)\n y = (a * (-b * x0 + a * y0) - b * c) / (a ** 2 + b ** 2)\n\n return x, y", "title": "" }, { "docid": "c2aeeba93e2e1d24eaf92bcc68bc2c0b", "score": "0.48618832", "text": "def calc_R(x,y, xc, yc):\r\n return np.sqrt((x-xc)**2 + (y-yc)**2)", "title": "" }, { "docid": "accb295b940c85eb84cb8d942230de9b", "score": "0.48599398", "text": "def circle_area(radius):\n pass # YOUR CODE HERE", "title": "" }, { "docid": "08a66f2ab3aa6cce65c6daefae0ea478", "score": "0.48593855", "text": "def get_circle(radius, points):\n x_f = lambda x: sin((x/float(points))*(pi*2))*radius\n y_f = lambda y: cos((y/float(points))*(pi*2))*radius\n return [(x_f(i), y_f(i)) for i in range(points)]", "title": "" }, { "docid": "e4a0e6dc340b9a4cd94af73beaf8d1e7", "score": "0.48581046", "text": "def _calc_R(x,y, xc, yc):\n return np.sqrt((x-xc)**2 + (y-yc)**2)", "title": "" }, { "docid": "5cd318e71daad6e64f2de6437fa5def0", "score": "0.48576367", "text": "def find_eyeball_position(end_points, cx, cy):\r\n x_ratio = (end_points[0] - cx) / (cx - end_points[2])\r\n y_ratio = (cy - end_points[1]) / (end_points[3] - cy)\r\n if x_ratio > 3:\r\n return 1\r\n elif x_ratio < 0.33:\r\n return 2\r\n elif y_ratio < 0.33:\r\n return 3\r\n else:\r\n return 0", "title": "" }, { "docid": "158b7404cfbeeee5786767998a10fd11", "score": "0.48540547", "text": "def __call__(self, x):\n if np.isclose(self.dx(), 0):\n return float('nan')\n return self.dy()/self.dx()*(x - self.p1[0]) + self.p1[1]", "title": "" }, { "docid": "da9f7adcc95138d9308ffd403cf232af", "score": "0.4849693", "text": "def top(self):\n top = np.array([0, -self.length, 0])\n top = self.tip + self.R @ top\n return top", "title": "" }, { "docid": "ec86aca1829b1e5441a21b285e4a5b63", "score": "0.48464137", "text": "def circle(center, radius):\n return affinity.scale(Point(center).buffer(1),\n xfact=radius, yfact=radius)", "title": "" }, { "docid": "b2e0456167ad92b73c0c47b3d79b0d8e", "score": "0.48464093", "text": "def top_edge_center(self) -> agx.Vec3:\n te = self.top_edge\n return 0.5 * ( te.p1 + te.p2 )", "title": "" }, { "docid": "8d0c72f70b7c82c65be5b2651f726d56", "score": "0.48437613", "text": "def func():\n x = motor0.read()[motor_field0]['value']\n y = motor1.read()[motor_field1]['value']\n m = np.array([x, y]) # point to evaluate at\n \n v = Imax * np.exp(-np.sum((m - center) ** 2) / (2 * sigma ** 2))\n return v", "title": "" }, { "docid": "06e75ce2b3297612ec7b77a9a4a4fa0a", "score": "0.4841612", "text": "def ellipse(self, c:tuple, rx=0, ry=0, **kwargs):\n attrs = {\n 'cx': c[0], 'cy': c[1],\n 'rx': rx, 'ry': ry,\n **kwargs,\n }\n SubElement(self.parent, 'circle', _normalize(attrs))", "title": "" }, { "docid": "1c7870ea7bb3ef7d44ea9194e7f6b4dc", "score": "0.483933", "text": "def evaluate(self, x, y):\n pass", "title": "" }, { "docid": "9a3e9c818b078bdf33dbb39882aad02b", "score": "0.4837395", "text": "def x(self, val=None):\n p = self.GetPosition()\n if val is None:\n return p[0]\n self.pos(val, p[1], p[2])\n return self", "title": "" }, { "docid": "7121867f865fb777374b47ba5c5380d9", "score": "0.4830569", "text": "def evalat(self,pts):\n pts=pts+self.shift # add offset\n return splineinterpol(self.c,pts,self.degree)", "title": "" }, { "docid": "cf266fcbf82fb248febae236b95c2d15", "score": "0.48298258", "text": "def calc_R(c):\n return sqrt((x - c[0]) ** 2 + (y - c[1]) ** 2)", "title": "" }, { "docid": "2aad55e511dfd91debe3bc866382adf6", "score": "0.48278028", "text": "def calc_R(x,y, xc, yc):\n return np.sqrt((x-xc)**2 + (y-yc)**2)", "title": "" }, { "docid": "ae36eb0fa6d4656bdde2b85c0524ac5c", "score": "0.48268032", "text": "def eval(self,x): \r\n y = N.zeros([N.size(x),N.size(self.ordinate,1)])\r\n for i in range(N.size(y,1)):\r\n y[:,i] = N.interp(x,self.abscissa,self.ordinate[:,i])\r\n return y", "title": "" }, { "docid": "e13e0e71297d434477654a99ea0f8690", "score": "0.48266605", "text": "def calc(num_x1, num_y1, num_x2, num_y2):\n result = sqrt()", "title": "" }, { "docid": "b918521dc0013a850223f754ed245e6e", "score": "0.48230478", "text": "def draw_circle_outline(image, x_center, y_center, radius, value=0):\n new_image = np.copy(image)\n (height, width) = image.shape\n for x in range(width):\n for y in range(height):\n if (x - x_center)**2 + (y-y_center)**2 == radius**2:\n new_image[y][x] = value\n return new_image", "title": "" }, { "docid": "4cea1336d3dc32f4ddc79b5e0f981806", "score": "0.48162046", "text": "def Ellipse(x_radius, y_radius, origin=None, superness=CIRCULAR_SUPERNESS):\n if not origin:\n origin = Point(0,0)\n w = origin + west * x_radius\n e = origin + east * x_radius\n n = origin + north * y_radius\n s = origin + south * y_radius\n\n w_n = CubicBezier(w,\n w + north * y_radius * superness,\n n + west * x_radius * superness,\n n)\n n_e = CubicBezier(n,\n n + east * x_radius * superness,\n e + north * y_radius * superness,\n e)\n e_s = CubicBezier(e,\n e + south * y_radius * superness,\n s + east * x_radius * superness,\n s)\n s_w = CubicBezier(s,\n s + west * x_radius * superness,\n w + south * y_radius * superness,\n w)\n return BezierPath.fromSegments([w_n, n_e, e_s, s_w])", "title": "" } ]
e8beb57de21a98204f851d42285263eb
Delete a push rule. Args specify the row to be deleted and can be any of the columns in the push_rule table, but below are the standard ones
[ { "docid": "ed5a6eee00e3991a616774f9628d1ffd", "score": "0.7285327", "text": "def delete_push_rule(self, user_name, rule_id):\n yield self._simple_delete_one(\n PushRuleTable.table_name,\n {'user_name': user_name, 'rule_id': rule_id},\n desc=\"delete_push_rule\",\n )\n\n self.get_push_rules_for_user.invalidate((user_name,))\n self.get_push_rules_enabled_for_user.invalidate((user_name,))", "title": "" } ]
[ { "docid": "d6f4919bf5fa7ac48d2a7c4d6719df1c", "score": "0.74705184", "text": "def deletePgPushRule(self, pgPushRuleUri):\r\n return self.proto.dbpool.runInteraction(self._deletePgPushRule, pgPushRuleUri)", "title": "" }, { "docid": "c237d86d544a2804f4bb3bd3e16bb690", "score": "0.7154085", "text": "def deleteHanaPushRule(self, hanaPushRuleUri):\r\n return self.proto.dbpool.runInteraction(self._deleteHanaPushRule, hanaPushRuleUri)", "title": "" }, { "docid": "f272b4cfff5de736d49a6fa0fa68aaae", "score": "0.66449344", "text": "def deleteOraPushRule(self, oraPushRuleUri):\r\n return self.proto.dbpool.runInteraction(self._deleteOraPushRule, oraPushRuleUri)", "title": "" }, { "docid": "ede815e0e89e3413b840784f0dfcf4b0", "score": "0.6498791", "text": "def delete_rule(RuleId=None, ChangeToken=None):\n pass", "title": "" }, { "docid": "e211b8f663056d903a0f83886a62bfb9", "score": "0.6033907", "text": "def deleteRule(logger, source, table, version=None):\n\n if version:\n if version == 4:\n return Command.executeIp(logger, IpConstant.IPV4, IpOption.RULE, IpAction.DELETE, \n IpConstant.FROM, source, IpConstant.TABLE, table) \n elif version == 6:\n return Command.executeIp(logger, IpConstant.IPV6, IpOption.RULE, IpAction.DELETE, \n IpConstant.FROM, source, IpConstant.TABLE, table) \n\n rc = Command.executeIp(logger, IpOption.RULE, IpAction.DELETE, \n IpConstant.FROM, source, IpConstant.TABLE, table) \n return rc", "title": "" }, { "docid": "538d8239820725a17e3bb99bae2a0606", "score": "0.5898676", "text": "def deleteRule(ruleId, token=\"\", version=\"stable\", format=\"json\"):\n return _delete(\n \"rules/{}\".format(ruleId), token=token, version=version, format=format\n )", "title": "" }, { "docid": "c4f8c6bf8ee4cad08fa0994d23af0cad", "score": "0.5852958", "text": "def push_delete(self, branch, *args):\n phlgit_push.delete(self._clone, self._remote, branch, *args)", "title": "" }, { "docid": "c8f1cc508bcf6e9c769402d18f6c1a4f", "score": "0.580274", "text": "def delete(self, pop3guid, jobguid=\"\",executionparams=None):", "title": "" }, { "docid": "2480443ed9fe918f7666b62c8d5bd5a2", "score": "0.57965034", "text": "def delete(self, message, args):\n\n conn = self.connection\n\n if not has_power(message):\n comment = 'this command needs admin privilages :/'\n conn.privmsg(self.channel, comment)\n else:\n # see if we're deleting multiple rows\n if not args:\n row_num = 1\n else:\n try:\n row_num = int(args[0])\n except ValueError:\n row_num = 1\n\n if delete_rows(1, row_num+1):\n comment = 'deleted ' + str(row_num) + ' rows'\n conn.privmsg(self.channel, comment)", "title": "" }, { "docid": "0179a747ea81d41574fd7abe2b67bc3b", "score": "0.575793", "text": "def delete_ovs_flow_rules(self, bridge_id, table_id, flow_id, priority):\n pass", "title": "" }, { "docid": "6bc4ac08b94f79f12f7408af82f483fe", "score": "0.5698065", "text": "def delete_rate_based_rule(RuleId=None, ChangeToken=None):\n pass", "title": "" }, { "docid": "be578b3472c74c024b76012d4a05d2f2", "score": "0.564285", "text": "def delete(sql, *args, **kwargs):\n assert \"delete\" in sql.lower(), 'This function requires a delete statement, provided: {}'.format(sql)\n CoyoteDb.execute_and_commit(sql, *args, **kwargs)", "title": "" }, { "docid": "13fa8c08a3821955395886b2958ed727", "score": "0.56378263", "text": "def del_flowrule (self, id=None, match=None, action=None):\n if id is not None:\n for f in self.flowrules:\n if f.id == id:\n self.flowrules.remove(f)\n return True\n else:\n deletable = []\n ret = False\n for f in self.flowrules:\n if f.match == match or f.action == action:\n deletable.append(f)\n for f in deletable:\n self.flowrules.remove(f)\n ret = True\n return ret", "title": "" }, { "docid": "cb644afa5a6c5fad9798760ebb5bb9b4", "score": "0.5616721", "text": "def delete(self, rule_id):\n json_data = request.data\n try:\n purge_replicas = None\n params = loads(json_data)\n if 'purge_replicas' in params:\n purge_replicas = params['purge_replicas']\n except ValueError:\n return generate_http_error_flask(400, 'ValueError', 'Cannot decode json parameter list')\n\n try:\n delete_replication_rule(rule_id=rule_id, purge_replicas=purge_replicas, issuer=request.environ.get('issuer'), vo=request.environ.get('vo'))\n except AccessDenied as error:\n return generate_http_error_flask(401, 'AccessDenied', error.args[0])\n except UnsupportedOperation as error:\n return generate_http_error_flask(401, 'UnsupportedOperation', error.args[0])\n except RuleNotFound as error:\n return generate_http_error_flask(404, 'RuleNotFound', error.args[0])\n except Exception as error:\n print(format_exc())\n return str(error), 500\n return '', 200", "title": "" }, { "docid": "2223626eba29fab6734d656c9bda8c43", "score": "0.5614312", "text": "def delete_rule(self, id, **kwargs):\n endpoint = '{0}/{1}/rules/{2}'.format(\n self.endpoint,\n self['id'],\n id\n )\n return self.request('DELETE', endpoint=endpoint, query_params=kwargs)", "title": "" }, { "docid": "b1dd60832244f989a9780247aeac10f0", "score": "0.56125", "text": "def test_sales_rule_rule_repository_v1_delete_by_id_delete(self):\n pass", "title": "" }, { "docid": "2866563608e2f7f47dda727f3cb0f5e9", "score": "0.5610072", "text": "def delete_rule(self, topic, subscription, rule, **kwargs):\n # type: (Union[str, TopicDescription], Union[str, SubscriptionDescription], Union[str, RuleDescription], Any) -> None # pylint:disable=line-too-long\n try:\n topic_name = topic.name # type: ignore\n except AttributeError:\n topic_name = topic\n try:\n subscription_name = subscription.name # type: ignore\n except AttributeError:\n subscription_name = subscription\n try:\n rule_name = rule.name # type: ignore\n except AttributeError:\n rule_name = rule\n self._impl.rule.delete(topic_name, subscription_name, rule_name, api_version=constants.API_VERSION, **kwargs)", "title": "" }, { "docid": "b882e7932b1c80454e9dfb10ff059f34", "score": "0.56092304", "text": "def delete_firewall_rule(rule_name):\n gcloud_command_args = ['gcloud', 'compute', 'firewall-rules', 'delete',\n rule_name, '--quiet']\n return_code, _, err = utils.run_command(gcloud_command_args)\n if return_code:\n print(err)", "title": "" }, { "docid": "b17767cacf56eba966ebf1180df634f7", "score": "0.55595416", "text": "async def remove_channel_command_rule(context, arguments):\n return await remove_command_rule(context, arguments, \"channel\")", "title": "" }, { "docid": "c2e10e84cf7ef75bdb553c05a2280e42", "score": "0.5555668", "text": "def deleteRow(self, val='True', **kwargs):\n \n pass", "title": "" }, { "docid": "3697534be893e3cbbe7b1d84dadd6a09", "score": "0.5541282", "text": "def delete_job(self, name):\n response = self.client.list_targets_by_rule(Rule=name) \n target_ids = []\n for target in response.get('Targets', []):\n target_ids.append(target.get('Id'))\n delete_targets = self.client.remove_targets(\n Rule=name,\n Ids=target_ids,\n Force=True\n )\n delete_rule = self.client.delete_rule(Name=name,Force=True)\n return delete_rule", "title": "" }, { "docid": "fb45f835a2d83c20cc1c5c827928d8fa", "score": "0.55409527", "text": "def removeRow(self, row: int) -> None:\n ...", "title": "" }, { "docid": "a1368180cea3342d524cb13b2de60a48", "score": "0.5520681", "text": "def test_delete_notification_rule(self):\n pass", "title": "" }, { "docid": "0c0bbce03d20d48700a9462b8a132cb6", "score": "0.5507451", "text": "def delprop_row(self, table, row_id, dst=\"nb\"):\n return getattr(self.xmlproxy, \"%s.%s.delRow\" % (dst, table))(row_id)", "title": "" }, { "docid": "f34a0970a79f52ff29cfe35555a1ee56", "score": "0.54742295", "text": "def remove(self, pattern):\n session = Session()\n rule = session.query(Rule).filter_by(pattern=pattern).first()\n \"\"\":type rule: rule\"\"\"\n\n if rule:\n session.delete(rule)\n session.commit()\n return True\n\n return False", "title": "" }, { "docid": "38a75e095bc7e17f3f053486fbd75cb7", "score": "0.54663897", "text": "def delete_rows_perm(hash_str):\n\n # find song with the hash\n result = SPREADSHEET.spreadsheets().values().get(spreadsheetId=SPREADSHEET_ID,\n range='datadump!G2:G').execute()\n\n row = -1\n for (i, entry) in reversed(list(enumerate(result['values']))):\n if len(entry) == 1 and entry[0] == hash_str:\n row = i\n break\n if row == -1:\n print('can\\'t find song with provided hash')\n return\n\n delete_row_body = {\"requests\": [{\"deleteDimension\": {\n \"range\": {\n \"sheetId\": BACKUP_SHEET_ID,\n \"dimension\": \"ROWS\",\n \"startIndex\": row+1,\n \"endIndex\": row+2\n }\n }}, ], }\n result = SPREADSHEET.spreadsheets().batchUpdate(\n spreadsheetId=SPREADSHEET_ID, body=delete_row_body).execute()\n\n # error logging\n open('log/reply.txt', 'a').write(str(result)+'\\n')\n print('deleted entry ' + hash_str +\n ' (row ' + str(row+2) + ') from datadump')", "title": "" }, { "docid": "34d7c28c6fed111de5f78bb71ba06c83", "score": "0.5418787", "text": "def delete_rule(self) -> str:\n return pulumi.get(self, \"delete_rule\")", "title": "" }, { "docid": "59e6041a5d55e1c028606782a8b4863a", "score": "0.54181087", "text": "def req_req_ondelete(row):\n\n db = current.db\n table = db.scheduler_task\n query = (table.function_name == \"req_add_from_template\") & \\\n (table.args == \"[%s]\" % row.id)\n db(query).delete()", "title": "" }, { "docid": "068689e7ccb877828cc927814286defb", "score": "0.5414267", "text": "def delete_rule_group(RuleGroupId=None, ChangeToken=None):\n pass", "title": "" }, { "docid": "e9b09152229634665eea25948846eeaf", "score": "0.540382", "text": "def delete_stream_rule(T, value):\n # find the rule id\n result = T.get_stream_rules()\n if \"data\" not in result:\n click.echo(click.style(\"💔 There are no rules to delete!\", fg=\"red\"), err=True)\n else:\n rule_id = None\n for rule in result[\"data\"]:\n if rule[\"value\"] == value:\n rule_id = rule[\"id\"]\n break\n if not rule_id:\n click.echo(\n click.style(f'🙃 No rule could be found for \"{value}\"', fg=\"red\"),\n err=True,\n )\n else:\n results = T.delete_stream_rule_ids([rule_id])\n if \"errors\" in results:\n click.echo(_error_str(results[\"errors\"]), err=True)\n else:\n click.echo(f\"🗑 Deleted stream rule for {value}\", color=\"green\")", "title": "" }, { "docid": "b917ff5354655456192f86245229df55", "score": "0.53812283", "text": "def delete_ovs_flow_actions(self, bridge_id, table_id, flow_id, action, priority=2000):\n pass", "title": "" }, { "docid": "08b3a72f6053c71916af1f1a1a84f46a", "score": "0.53683907", "text": "def delete_rule(rule_id, system_id, **kwargs):\n\t\ttry:\n\t\t\tsystem = SystemService().filter(pk = system_id, state__name = 'Active').first()\n\t\t\tescalation_rule = EscalationRuleService().filter(pk = rule_id, system = system).first()\n\t\t\tif system is None or escalation_rule is None:\n\t\t\t\treturn {\"code\": \"800.400.002\"}\n\t\t\tif escalation_rule.delete():\n\t\t\t\treturn {'code': '800.200.001', 'Message': 'Rule deleted successfully'}\n\t\texcept Exception as ex:\n\t\t\tlgr.exception(\"Delete Escalation Rule exception %s\" % ex)\n\t\treturn {\"code\": \"800.400.001\"}", "title": "" }, { "docid": "3cfced446bab7b186cd9d4c21ad3898a", "score": "0.5363295", "text": "def delete ( self, object, trait, row ):\r\n del getattr( object, trait )[ row ]", "title": "" }, { "docid": "9aea1069f02a12fa50861414194c2963", "score": "0.5357321", "text": "def test_delete_row_arg(self):\n self.insert()\n self.tbl.delete(where=(\"age=?\", 25))\n data = self.tbl.select()\n assert self.check_data(self.idata[1:], data)", "title": "" }, { "docid": "e7ef2b69bf3d50ed45329f1c04939cf8", "score": "0.53314275", "text": "def delete_command(self, pars, args):\n test = self.test(pars, [0], args, [1], [])\n if test:\n return test\n try:\n res = self.table.delete_trip(int(args[0]))\n except KeyError as e:\n return \"There is no trip with id:{}\".format(e)\n except Exception as e:\n return \"{}\".format(e)\n return \"Deleted {}\".format(res.get_print())", "title": "" }, { "docid": "c3f4150c02f2592291e5cd385359b53f", "score": "0.53247637", "text": "def delete_row(pmid, exp, row):\n\n target = next(get_article_object(pmid))\n experiments = eval(target.experiments)\n elem = experiments[exp]\n locations = elem[\"locations\"]\n locations.pop(row)\n Articles.update(\n experiments=experiments).where(\n Articles.pmid == pmid).execute()", "title": "" }, { "docid": "63676fc082a07ef000810278f6ddcde3", "score": "0.5286498", "text": "def delete_flow(self, FlowArn: str) -> Dict:\n pass", "title": "" }, { "docid": "e0db398b64ca00c5388d622d0352c60e", "score": "0.52783185", "text": "def delete(conn, schedule_id):\n with db.commit(conn) as c:\n c.execute(queries.remove_schedule, [schedule_id])", "title": "" }, { "docid": "9da834aa1fcda032a0ed4f30c2589d9c", "score": "0.5276415", "text": "def delete(*args, **kwargs):\n \n pass", "title": "" }, { "docid": "8c93c22138b3735da2a2f62a012b19cb", "score": "0.5263093", "text": "def DeleteLiveRecordRule(self, request):\n try:\n params = request._serialize()\n headers = request.headers\n body = self.call(\"DeleteLiveRecordRule\", params, headers=headers)\n response = json.loads(body)\n model = models.DeleteLiveRecordRuleResponse()\n model._deserialize(response[\"Response\"])\n return model\n except Exception as e:\n if isinstance(e, TencentCloudSDKException):\n raise\n else:\n raise TencentCloudSDKException(type(e).__name__, str(e))", "title": "" }, { "docid": "3210097d09f1a652fd0537c884535c30", "score": "0.5250537", "text": "def delete(self, where: str, match: str = \"\") -> list:\n query = \"MATCH(p{})\".format(match)\n if where is not None:\n query += \" WHERE(p.{})\".format(where)\n query += \" DELETE p\"\n\n return self.redis_connection.execute_command(\"GRAPH.QUERY\",\n self.graph_name,\n query)", "title": "" }, { "docid": "2909d6da7923004c9f1ddb05cc3f16a3", "score": "0.52471197", "text": "def remove_rule(self, src_port, src_vlan, dst_port, dst_vlan):\n return self.__channel.remove_rule(src_port, src_vlan, dst_port, dst_vlan)", "title": "" }, { "docid": "8f50687b491b467664de23b41884acaa", "score": "0.52403337", "text": "def delete(self):\n self.check_auth_context()\n self.rule.delete()\n trigger_session_update(self.rule.org, ['monitoring'])", "title": "" }, { "docid": "6da2306dcf6903b20cf812057b0f4dc1", "score": "0.52330625", "text": "def delete(self, table_id, row_id):\n return \"DELETE FROM %d WHERE ROWID = '%d'\" % (table_id, row_id)", "title": "" }, { "docid": "797d898d555e97ea6a53ff37660da6ee", "score": "0.5229726", "text": "def delete(self, pars, args):\n test = self.test(pars, [0], args, [0, 1], [])\n if test:\n return test\n if len(args) == 1:\n self.table.delete(args[0])\n return \"Table {} deleted\".format(args[0])\n else:\n self.table.delete()\n return \"Table deleted\"", "title": "" }, { "docid": "e950ff5bdddff4a9a59ac5b6c412827e", "score": "0.5224578", "text": "def delete(self, data):\n element = data['elements'][0]\n self.database.get_database().delete(\n 'medical_history_table',\n ('id=%s', [element['id']])\n )\n self.database.get_database().delete(\n 'prescription_table',\n ('medical_history_id=%s', [element['id']])\n )\n self.database.get_database().delete(\n 'labaratory_order_table',\n ('medical_history_id=%s', [element['id']])\n )\n self.database.get_database().delete(\n 'service_table',\n ('medical_history_id=%s', [element['id']])\n )\n self.database.commit()\n\n # return message\n msg = dict()\n msg['type'] = self.handler\n msg['method'] = 'delete'\n msg['elements'] = list()\n\n model = dict()\n model['id'] = element['id']\n msg['elements'].append({\n 'id': element['id']\n })\n return msg", "title": "" }, { "docid": "abe03ec11391b017fd7596bee2932a78", "score": "0.5221895", "text": "def test_delete_rule(self, client, org_name, status_code, expected_post_delete_count):\n org = Organization.objects.get(name=org_name)\n rule = MembershipRuleFactory.create(group__organization=org)\n response = client.delete('/membership-rules/{}/'.format(rule.id))\n assert response.status_code == status_code, response.content\n assert MembershipRule.objects.count() == expected_post_delete_count", "title": "" }, { "docid": "02b3ecca4d37a8930c2c21a078e5714c", "score": "0.52201533", "text": "def delete(args):\n c.mass_delete(args.elemfile,filetype(args.elemfile))", "title": "" }, { "docid": "4aa4f189e8c645dba63751c60f9b8b55", "score": "0.5211893", "text": "def flowdel(\n self, match=None, priority=None, out_port=valve_of.ofp.OFPP_ANY, strict=False\n ):\n command = valve_of.ofp.OFPFC_DELETE\n if strict:\n command = valve_of.ofp.OFPFC_DELETE_STRICT\n return self.flowmod(\n match=match,\n priority=priority,\n command=command,\n out_port=out_port,\n out_group=valve_of.ofp.OFPG_ANY,\n )", "title": "" }, { "docid": "818946943e64d6f05aedd8108e728bd6", "score": "0.52042705", "text": "def deleteMatches():\n sql = \"DELETE FROM matches\"\n sqlNoReturn(sql, ())", "title": "" }, { "docid": "68bfaa4c0b40f044f0f2f9aac0614cf8", "score": "0.51993054", "text": "def delete_association(self, id_row_to_delete):\n my_connection = mysql.connector.connect(user=self.user, password=self.password, database='openfoodfacts')\n cursor = my_connection.cursor()\n query = (\"DELETE FROM Swap WHERE id = %s\") % (\"\\'\" + str(id_row_to_delete) + \"\\'\")\n cursor.execute(query)\n my_connection.commit()\n cursor.close()\n my_connection.close()", "title": "" }, { "docid": "5521b8de78a13b1ad17e8f39bf445830", "score": "0.51947975", "text": "def delete_rules(request):\n rules_id = request.GET.getlist(\"rules[]\")\n rules = CustomRule.objects.filter(pk__in=rules_id, user=request.user)\n for rule in rules:\n os.remove(rule.path)\n rules.delete()\n return JsonResponse({\"ok\": True})", "title": "" }, { "docid": "4e9996dfe7fb6a3eff5feebfe42303fe", "score": "0.5183437", "text": "def delete(self, job):\n pass", "title": "" }, { "docid": "62697c5777d4874d413331384d9f360e", "score": "0.51709497", "text": "def delete_command():\n backend.delete_entry(selected_tuple[0])", "title": "" }, { "docid": "8497f7a2cd508e5924a8f98754c2f8cf", "score": "0.5142415", "text": "def test_delete_row(self):\n vizual.delete_row(\n dataset_name='ABC',\n row=10,\n validate=True\n )", "title": "" }, { "docid": "843b7f2b729490c351e49ec6577ed93f", "score": "0.51401174", "text": "def delete(self, pipeline=None):\r\n connection = pipeline if pipeline is not None else self.connection\r\n connection.delete(self.key)", "title": "" }, { "docid": "d273e4e8dded6a207c25e74bf38d015d", "score": "0.5135248", "text": "def test_delete_rule(self, pretty_print, mist_core, owner_api_token):\n uri = mist_core.uri + '/api/v2/rules/{rule}'.format(rule='example-rule') \n request = MistRequests(api_token=owner_api_token, uri=uri)\n request_method = getattr(request, 'DELETE'.lower())\n response = request_method()\n assert_response_ok(response)\n print('Success!!!')", "title": "" }, { "docid": "06d07fe01b4811e1793c490013dd39b3", "score": "0.5121173", "text": "async def remove_category_command_rule(context, arguments):\n return await remove_command_rule(context, arguments, \"category\")", "title": "" }, { "docid": "d2cab661671819430ff14bbfff0ddbe3", "score": "0.51207864", "text": "def delete_ovs_flow_qualifiers(self, bridge_id, table_id, flow_id, field, priority=2000):\n pass", "title": "" }, { "docid": "9a0e9e491565b3de62c9b34164e0687b", "score": "0.5102072", "text": "def deleteMatches():\n delete_rows_from(\"matches\")", "title": "" }, { "docid": "97a5d7494e635188ac145cf9d51c71d1", "score": "0.5097392", "text": "def test_that_when_deleting_a_rule_succeeds_the_delete_method_returns_true(self):\n self.conn.delete_rule.return_value = {}\n result = boto_cloudwatch_event.delete(Name=rule_name, **conn_parameters)\n\n self.assertTrue(result.get(\"deleted\"))\n self.assertEqual(result.get(\"error\"), None)", "title": "" }, { "docid": "df1fbaef54f13024f8a964cb9fee5fc6", "score": "0.5071407", "text": "def delete_row(table, rowKey):\n\n hasBinaryFiles = [\"Elements\"]\n hasGraphData = [\"Elements\"]\n\n transport = roboearth.openDBTransport()\n client = transport['client']\n try:\n client.deleteAllRow(table, rowKey)\n roboearth.closeDBTransport(transport)\n if table in hasBinaryFiles:\n hdfs.rm_dir(os.path.join(roboearth.UPLOAD_DIR, table.lower(), rowKey.replace('.', '/')))\n if table in hasGraphData:\n sesame.rm(rowKey, table)\n \n except IOError, err:\n roboearth.closeDBTransport(transport)\n print table, rowKey\n raise roboearth.DBWriteErrorException(\"Can't delete data: \" + err.__str__())", "title": "" }, { "docid": "cdb3703fb98060a85e0180d66e72c7ba", "score": "0.50448376", "text": "def delete_item(args_content):\n\n db = get_db()\n\n db.session.query(g.Items).filter(\n g.Items.name == args_content[\"name\"],\n g.Items.sell_in == args_content[\"sell_in\"],\n g.Items.quality == args_content[\"quality\"],\n ).delete()\n db.session.commit()", "title": "" }, { "docid": "dd49c9a76b469b0ade5862f9c577842f", "score": "0.50361496", "text": "def test_delete_sentence_rule(self):\n response = self.client.open(\n '/api/v1/sentenceRule/{sentenceID}'.fpgapiat(sentenceID=56),\n method='DELETE')\n self.assert200(response,\n 'Response body is : ' + response.data.decode('utf-8'))", "title": "" }, { "docid": "937c6847af19b308445a444db34ee397", "score": "0.5030893", "text": "def deletePlayers():\n delete_rows_from(\"players\")", "title": "" }, { "docid": "19906dc2d39c88aab007acc31b54d595", "score": "0.5029024", "text": "def delete(self, *args, **kwargs):\n pass", "title": "" }, { "docid": "19906dc2d39c88aab007acc31b54d595", "score": "0.5029024", "text": "def delete(self, *args, **kwargs):\n pass", "title": "" }, { "docid": "272bea0f4b92a61105c3ae941453060d", "score": "0.4990585", "text": "def delete(self, *args):\n for arg in args:\n db.session.delete(arg)\n db.session.commit()", "title": "" }, { "docid": "23ac0ae8fb086f7f99f92728458cf0df", "score": "0.49884114", "text": "def remove_job(cursor, arguments):\n valid_args = ('job_title', 'job_id')\n remove_helper(cursor, arguments, valid_args, TableName.job.value)", "title": "" }, { "docid": "11f5ad329f2d2b83a4e4b6a3f27957ed", "score": "0.498534", "text": "def delete_firewall_rule(compute, project, name):\n return compute.firewalls().delete(project=project, firewall=name).execute()", "title": "" }, { "docid": "92e7e1fc8b3ca11144d667cb3b51e674", "score": "0.4979946", "text": "def gmdDeleteRecord(*args):\n return _gmdcc.gmdDeleteRecord(*args)", "title": "" }, { "docid": "d501c816ea08e682bfe7ee1bcf710852", "score": "0.4975381", "text": "def delete_by_template(self, template):\n wc = self._template_to_where_clause(template)\n q=\"delete from \" + self._table_name + \" \"+wc[0]\n result=self._run_q(q,args=wc[1],fetch=False)\n return result", "title": "" }, { "docid": "5ed89561ccfef63a214eecabd5a0ab17", "score": "0.49744585", "text": "def flushRules(logger, table, version=None):\n\n if version:\n if version == 4:\n return Command.executeIp(logger, IpConstant.IPV4, IpOption.RULE, IpAction.DELETE, \n IpConstant.TABLE, table) \n elif version == 6:\n return Command.executeIp(logger, IpConstant.IPV6, IpOption.RULE, IpAction.DELETE, \n IpConstant.TABLE, table) \n\n rc = Command.executeIp(logger, IpOption.RULE, IpAction.DELETE, \n IpConstant.TABLE, table) \n return rc", "title": "" }, { "docid": "26b513808112da5533bb6e3675fecfe8", "score": "0.49617982", "text": "def remove_row(board, row):\n del board[row]\n return [[0 for _ in range(COLUMN_COUNT)]] + board", "title": "" }, { "docid": "2f82519dfc08327f176712d8e5a96747", "score": "0.49586958", "text": "def delete_ruleset(self, id, **kwargs):\n endpoint = '{0}/{1}'.format(\n self.endpoint,\n self['id']\n )\n return self.request('DELETE', endpoint=endpoint, query_params=kwargs)", "title": "" }, { "docid": "67ec7b0ab994c2f60a3e2d79c9d8e0c4", "score": "0.49519235", "text": "def delete(table, id_):\n\n pass", "title": "" }, { "docid": "49bfd181696be867db962e0fa9bd50ab", "score": "0.49474186", "text": "def test_delete_rows_args(self):\n self.insert()\n self.tbl.delete(where=(\"age=? OR height>?\", (25, 70)))\n data = self.tbl.select()\n assert self.check_data(self.idata[2:], data)", "title": "" }, { "docid": "e0278bfdd772e16cebb4498ede9ca4d0", "score": "0.4938715", "text": "def do_delete(self, args):\n self.delete_code(args)", "title": "" }, { "docid": "cc68a83c009b20ec7c5cab2a8375059a", "score": "0.49356103", "text": "def delete_firewall_rule(self, firewall_rule):\n return self._delete(self.firewall_rule_path % (firewall_rule))", "title": "" }, { "docid": "8862ee7fc3881e091ac8d918354b72ae", "score": "0.49315062", "text": "def _remove_policy(self, sec, ptype, rule):\n rule_removed = super()._remove_policy(sec, ptype, rule)\n if rule_removed:\n self.log_casbin_change('remove', ', '.join([ptype] + rule))\n return rule_removed", "title": "" }, { "docid": "1613fe1f6093f45e5cbaccefef7095f9", "score": "0.4930228", "text": "def remove_rule(self, rule_name):\n if not isinstance(rule_name, str):\n raise TypeError(\"object '%s' was not a string\" % rule_name)\n\n if rule_name not in self.rule_names:\n raise GrammarError(\"'%s' is not a rule in Grammar '%s'\" % (rule_name, self))\n\n # Check if rule with name 'rule_name' is a dependency of another rule in this\n # grammar.\n i = self.rule_names.index(rule_name)\n rule = self._rules[i]\n if rule.reference_count > 0:\n raise GrammarError(\"Cannot remove rule '%s' as it is referenced by a RuleRef \"\n \"object in another rule.\" % rule_name)\n\n self._rules.pop(i)", "title": "" }, { "docid": "f8289cbd12ef67a9b4e3e1c2d802f08f", "score": "0.49288583", "text": "def handle_snapshot_delete(self, graph_db, body):\n uuid = body['payload']['snapshot_id']\n OpenstackResource(uuid).remove_resource(graph_db)", "title": "" }, { "docid": "a4fc09c4c035dca1b583ffbda7a362c3", "score": "0.4925392", "text": "def delete_alerts(self, keys):\n with self._table.batch_writer() as batch:\n for rule_name, alert_id in keys:\n batch.delete_item(Key={'RuleName': rule_name, 'AlertID': alert_id})", "title": "" }, { "docid": "509c4954a5200cf32a9c52b28f9b1997", "score": "0.49228776", "text": "def modifyHanaPushRule(self, hanaPushRuleUri, specDelta):\r\n return self.proto.dbpool.runInteraction(self._modifyHanaPushRule, hanaPushRuleUri, specDelta)", "title": "" }, { "docid": "493835612dcf87608f40f842705a745b", "score": "0.49147606", "text": "def deleteMatches():\n connect_execute(\"DELETE FROM match;\")", "title": "" }, { "docid": "e82216b9049015f932eb6bf327f0694f", "score": "0.49089867", "text": "def deleteMatches():\n ConnectAndCommit(\"Delete From Match_Record;\")", "title": "" }, { "docid": "5943c4b0fa488b150c90bef16e9ed783", "score": "0.49081096", "text": "def lr_policy_del(self, router, priority=None, match=None,\n if_exists=False):", "title": "" }, { "docid": "78a2b0b5569b4b2e94cc8274e97ce176", "score": "0.49036655", "text": "def DeleteLiveTimeShiftRule(self, request):\n try:\n params = request._serialize()\n headers = request.headers\n body = self.call(\"DeleteLiveTimeShiftRule\", params, headers=headers)\n response = json.loads(body)\n model = models.DeleteLiveTimeShiftRuleResponse()\n model._deserialize(response[\"Response\"])\n return model\n except Exception as e:\n if isinstance(e, TencentCloudSDKException):\n raise\n else:\n raise TencentCloudSDKException(type(e).__name__, str(e))", "title": "" }, { "docid": "8bd4581ebc92fdda61eebe2c72051338", "score": "0.49013898", "text": "def modifyPgPushRule(self, pgPushRuleUri, specDelta):\r\n return self.proto.dbpool.runInteraction(self._modifyPgPushRule, pgPushRuleUri, specDelta)", "title": "" }, { "docid": "22313576fcaff4ca040ab75ade0fb2d4", "score": "0.48965284", "text": "def delete(self, sql, parameters=()):\n\n self._commit(sql, parameters)", "title": "" }, { "docid": "6835fa378890fa48f10453af00869700", "score": "0.48863187", "text": "def DeleteLiveSnapshotRule(self, request):\n try:\n params = request._serialize()\n headers = request.headers\n body = self.call(\"DeleteLiveSnapshotRule\", params, headers=headers)\n response = json.loads(body)\n model = models.DeleteLiveSnapshotRuleResponse()\n model._deserialize(response[\"Response\"])\n return model\n except Exception as e:\n if isinstance(e, TencentCloudSDKException):\n raise\n else:\n raise TencentCloudSDKException(type(e).__name__, str(e))", "title": "" }, { "docid": "c365fb20ff3439d6247d41610e5c681f", "score": "0.4873127", "text": "def delete_e_sql(target_name, src, dst):\n if not src.startswith('#'):\n src = ('(' + src + ')')\n\n if not dst.startswith('#'):\n dst = ('(' + dst + ')')\n\n query_sql = ['delete', 'edge', 'from', src, 'to', dst, 'where', \"@class = '\" + target_name + \"'\"]\n return ' '.join(query_sql)", "title": "" }, { "docid": "d4362b3a5dfc7f560ad3571ee7713204", "score": "0.48709288", "text": "def delete_storage_item(\n server_context: ServerContext, type: str, row_id: int, container_path: str = None\n):\n url = server_context.build_url(STORAGE_CONTROLLER, \"delete.api\", container_path)\n payload = {\"type\": type, \"props\": {\"rowId\": row_id}}\n\n return server_context.make_request(url, json=payload)", "title": "" }, { "docid": "7893434dc74d40eb7ae2fe9bb7ad20a6", "score": "0.4870633", "text": "def delete(node_path: List[str], source_db_alias: str, source_table: str):\n with mara_db.dbs.cursor_context('mara') as cursor:\n cursor.execute(f'''\nDELETE FROM data_integration_incremental_copy_status\nWHERE node_path = {'%s'} AND source_table = {'%s'}\n''', (node_path, f'{source_db_alias}.{source_table}'))", "title": "" }, { "docid": "489916a4ca5e3ffda0d7267f8596d662", "score": "0.48594025", "text": "def delete_from(args):\n with _at_line(args.line_id) as line:\n if line.line_no < 0:\n raise LineNotFound(\n f\"Cannot alter line because {args.line_id} doesn't exist\"\n )\n line -= args.images", "title": "" }, { "docid": "1af005455095cd669160089dcd237d31", "score": "0.48522767", "text": "def DeleteLiveTranscodeRule(self, request):\n try:\n params = request._serialize()\n headers = request.headers\n body = self.call(\"DeleteLiveTranscodeRule\", params, headers=headers)\n response = json.loads(body)\n model = models.DeleteLiveTranscodeRuleResponse()\n model._deserialize(response[\"Response\"])\n return model\n except Exception as e:\n if isinstance(e, TencentCloudSDKException):\n raise\n else:\n raise TencentCloudSDKException(type(e).__name__, str(e))", "title": "" }, { "docid": "7d8efcf4216b07e25a69c2ece0618b4d", "score": "0.4848725", "text": "def deleteRowSQL(self, rowObject):\n args = []\n tableInfo = self.schema[rowObject.rowTableName] \n # build where clause\n for keyColumn, type in tableInfo.rowKeyColumns:\n args.append(self.quote_value(rowObject.findAttribute(keyColumn),\n type))\n\n return self.getTableInfo(rowObject).deleteSQL % tuple(args)", "title": "" }, { "docid": "bab74d54146ad9822a70081df3fc7d52", "score": "0.48478937", "text": "def __delete(shift):\n if Shifts.delete_by_id(shift.id):\n dbSession.commit()\n return True", "title": "" }, { "docid": "4f9386e49909baf99933767820ff868a", "score": "0.4847296", "text": "def remove(self):\n self.conn.delete(self.links[\"delete\"][\"href\"])", "title": "" }, { "docid": "0c880a4d343583fe980be457ddbb0118", "score": "0.48409936", "text": "def delete_triple(self) :\n\t\t\n\t\tsub = request.params['sub']\n\t\tpred = request.params['pred']\n\t\tobj = request.params['obj']\n\t\t\n\t\tsub = self._parse_prim_escape(sub)\n\t\tpred = self._parse_prim_escape(pred)\n\t\tobj = self._parse_prim_escape(obj)\n\t\t\n\t\tif sub != None and pred != None and obj != None :\n\t\t\tret = g.sparql.doQuery(\"DELETE { %s %s %s }\" % (sub, pred, obj))\n\t\t\tprint \"DELETE { %s %s %s }\" % (sub, pred, obj)\n\t\t\treturn \"DELETE { %s %s %s }:\\n%s\" % (sub, pred, obj, ret)", "title": "" } ]
7e0ac660f95bc7e4dacced0fbe04d5bf
Detects the ball using color thresholding
[ { "docid": "b32ec2d27abc563b3e381ef5a3c38254", "score": "0.70945865", "text": "def detect_ball(self):\n\n #read webcam image\n ret, img_in = self.capture.read()\n\n #convert to HSV colorspace\n img_hsv = cv2.cvtColor(img_in, cv2.COLOR_BGR2HSV)\n\n #color thresholding\n img_threshold = cv2.inRange(img_hsv,\n (self.ball_H_range[0], self.ball_S_range[0],\n self.ball_V_range[0]),\n (self.ball_H_range[1], self.ball_S_range[1],\n self.ball_V_range[1]))\n\n #Gaussian blur\n blur = cv2.GaussianBlur(img_threshold, (5, 5), 0)\n\n #find contours in blurred image\n image, contours, hierarchy = cv2.findContours(blur, cv2.RETR_EXTERNAL,\n cv2.CHAIN_APPROX_NONE)\n\n #isolate largest contour (should be the ball)\n if len(contours) > 0:\n biggest_contour = max(contours, key=cv2.contourArea)\n\n #find smallest circles enclosing biggest contour\n (x, y), radius = cv2.minEnclosingCircle(biggest_contour) #smallest enclosing circle\n centerX = int(x) #center X coordinate\n centerY = int(y) #center Y coordinate\n radius = int(radius) #radius\n\n #draw circles on image at ball and hole locations\n img_circle = cv2.circle(img_in, (centerX, centerY), radius, (0, 0, 255), 2)\n img_circle = cv2.circle(img_in, (self.hole_X, self.hole_Y), self.hole_rad,\n (51, 255, 255), 2)\n\n #ball x- and y- coordinates\n self.ball_pose.position.x = centerX\n self.ball_pose.position.y = centerY\n\n print(\"pixel ideation\")\n print(\"x:{}\\ty:{}\".format(centerX,centerY))\n\n q = np.array([centerX,centerY]) \n off_set_to_game_orig = np.array([507,341])\n q = q + off_set_to_game_orig\n q[0] = -q[0] #flip handedness\n R = np.array([[0,1],[-1,0]]) #-90 degree rotation matrix\n q = np.matmul(R,q)\n q = q * 0.0023\n\n q[0] = q[0] + 0.455\n q[1] = q[1] + -0.494\n print(\"positional ideation\")\n print(\"x:{}\\ty:{}\".format(q[0],q[1]))\n\n\n # T = np.array([[0,-1,0,0.715],[-1,0,0,-0.0174],[0,0,-1,1.65],[0,0,0,1]])\n # b = np.array([centerX * 0.002,centerY * 0.002,1,1]) \n\n # a = np.matmul(T,b) \n # print(\"x:{}\\ty:{}\".format(a[0],a[1]))\n\n #publish ball position\n self.ball_pose_pub.publish(self.ball_pose)\n\n elif len(contours) == 0:\n img_circle = cv2.circle(img_in, (self.hole_X, self.hole_Y), self.hole_rad,\n (51, 255, 255), 2)\n\n #convert cv2 image to ROS image\n img_out = self.bridge.cv2_to_imgmsg(img_circle, \"bgr8\")\n\n #publish image for display\n self.image_pub.publish(img_out)", "title": "" } ]
[ { "docid": "d9a6b39ff0cc5578f9266d595f94fbd6", "score": "0.71530026", "text": "def detect_color(image):\n # Resize the frame, blur it, and convert it to the HSV color space\n frame = imutils.resize(image, width=FRAME_SIZE)\n\n blurred = cv2.GaussianBlur(frame, (11, 11), 0)\n hsv = cv2.cvtColor(blurred, cv2.COLOR_BGR2HSV)\n # For each color in dictionary check object in frame\n for key, value in upper.items():\n # Construct a mask for the color from dictionary`1, then perform\n # A series of dilations and erosions to remove any small\n # blobs left in the mask\n kernel = np.ones((9, 9), np.uint8)\n mask = cv2.inRange(hsv, lower[key], upper[key])\n mask = cv2.morphologyEx(mask, cv2.MORPH_OPEN, kernel)\n mask = cv2.morphologyEx(mask, cv2.MORPH_CLOSE, kernel)\n\n # find contours in the mask and initialize the current\n # (x, y) center of the ball\n contours = cv2.findContours(mask.copy(), cv2.RETR_EXTERNAL,\n cv2.CHAIN_APPROX_SIMPLE)[-2]\n # only proceed if at least one contour was found\n if len(contours) > 0:\n # find the largest contour in the mask, then use\n # it to compute the minimum enclosing circle and\n # centroid\n c = max(contours, key=cv2.contourArea)\n ((x, y), radius) = cv2.minEnclosingCircle(c)\n M = cv2.moments(c)\n center = (int(M[\"m10\"] / M[\"m00\"]), int(M[\"m01\"] / M[\"m00\"]))\n\n # only proceed if the radius meets a minimum size. Correct this value for your obect's size\n if MIN_RADIUS < radius < MAX_RADIUS:\n # draw the circle and centroid on the frame,\n # then update the list of tracked points\n cv2.circle(frame, (int(x), int(y)), int(radius), colors[key], 2)\n cv2.putText(frame, key + str(radius), (int(x - radius), int(y - radius)), cv2.FONT_HERSHEY_SIMPLEX, 0.6,\n colors[key], 2)\n\n object_size = np.pi * radius * radius\n if object_size <= SMALL:\n object_size = \"small\"\n elif object_size >= LARGE:\n object_size = \"large\"\n else:\n object_size = \"medium\"\n\n object_location = \"middle\"\n if center[0] <= LEFT:\n object_location = \"left\"\n elif center[0] >= RIGHT:\n object_location = \"right\"\n detected_object = detected_object(key, object_size, object_location)\n # cv2.imshow(\"Frame\", frame)\n return detected_object\n\n no_image = detected_object(\"none\", \"none\", \"none\")\n # cv2.imshow(\"Frame\", frame)\n return no_image", "title": "" }, { "docid": "9c5fd04ed3fd369ab6aed9a8dd2e0695", "score": "0.67433006", "text": "def light_detection(self, box, image):\n (startX, startY, endX, endY) = box\n cropped_frame = image[startY:endY, startX:endX]\n hsv = cv2.cvtColor(cropped_frame, cv2.COLOR_BGR2HSV)\n mask1 = cv2.inRange(hsv, self.lower_red1, self.upper_red1)\n mask2 = cv2.inRange(hsv, self.lower_red2, self.upper_red2)\n maskg = cv2.inRange(hsv, self.lower_green, self.upper_green)\n masky = cv2.inRange(hsv, self.lower_yellow, self.upper_yellow)\n maskr = cv2.add(mask1, mask2)\n\n self.size = cropped_frame.shape\n\n # Hough transformation applied to circles\n # Circular masked applied to each object (red, greeen, yellow)\n r_circles = cv2.HoughCircles(maskr, cv2.HOUGH_GRADIENT, 1, 80, param1=50, param2=10, minRadius=0, maxRadius=30)\n g_circles = cv2.HoughCircles(maskg, cv2.HOUGH_GRADIENT, 1, 60, param1=50, param2=10, minRadius=0, maxRadius=30)\n y_circles = cv2.HoughCircles(masky, cv2.HOUGH_GRADIENT, 1, 30, param1=50, param2=5, minRadius=0, maxRadius=30)\n\n red = self.detect(r_circles, maskr, \"Red\", cropped_frame)\n green = self.detect(g_circles, maskg, \"Green\", cropped_frame)\n yellow = self.detect(y_circles, masky, \"Yellow\", cropped_frame)\n\n result = int(red) + int(green) + int(yellow)\n if result > 1:\n return \"Error\"\n if red:\n return \"Red\"\n elif yellow:\n return \"Yellow\"\n elif green:\n return \"Green\"\n else:\n return \"None\"", "title": "" }, { "docid": "f1ac0998761a95d8774e845147fe4572", "score": "0.6661328", "text": "def trace_walls(im, r_thresh=240, g_thresh=100, b_thresh=100):\n red = im[:, :, 0] * 255\n green = im[:, :, 1] * 255\n blue = im[:, :, 2] * 255\n return (red > r_thresh) & (green < g_thresh) & (blue < b_thresh)", "title": "" }, { "docid": "6e1fa4c210960b5643005093ed99f404", "score": "0.6647707", "text": "def color_threshold(img):\n\timg_hsv = cv2.cvtColor(img, cv2.COLOR_BGR2HSV)\n\tmask1 = cv2.inRange(img_hsv, (0,50,20), (5,255,255))\n\tmask2 = cv2.inRange(img_hsv, (175,50,20), (180,255,255))\n\tlower_white = np.array([0,0,168])\n\tupper_white = np.array([230,240,255])\n\tmask3 = cv2.inRange(img_hsv,lower_white,upper_white)\n\tmask = cv2.bitwise_or(mask1,mask2)\n\tmask = cv2.bitwise_or(mask3,mask)\n\toutput = cv2.bitwise_and(img,img,mask=mask)\n\n\tkernel = np.ones([3,3])\n\timg_erosion = cv2.erode(output,kernel,iterations=5)\n\timg_dilation = cv2.dilate(img_erosion,kernel,iterations=3)\n\toutput = img_dilation\n\n\t# cv2.imshow('threshold',output)\n\t# cv2.waitKey(0)\n\n\treturn output", "title": "" }, { "docid": "d1e25c9f0bcb414ada47d0057a9a9f2d", "score": "0.6631537", "text": "def color_detector(self, image):\n\n states = [TrafficLight.GREEN, TrafficLight.YELLOW, TrafficLight.RED]\n\n best_state = TrafficLight.UNKNOWN\n for state in states:\n\n _image = image.copy()\n\n if state == TrafficLight.RED:\n _image = _image[int(image.shape[0] * 0.2) - 3: int(image.shape[0] * 0.2) + 3,\n int(image.shape[1] * 0.5) - 3: int(image.shape[1] * 0.5) + 3]\n if state == TrafficLight.YELLOW:\n _image = _image[int(image.shape[0] * 0.55) - 3: int(image.shape[0] * 0.55) + 3,\n int(image.shape[1] * 0.5) - 3: int(image.shape[1] * 0.5) + 3]\n if state == TrafficLight.GREEN:\n _image = _image[int(image.shape[0] * 0.85) - 3: int(image.shape[0] * 0.85) + 3,\n int(image.shape[1] * 0.5) - 3: int(image.shape[1] * 0.5) + 3]\n\n if _image[np.where(np.squeeze(_image) > 250)].shape[0] > 15:\n best_state = state\n\n return best_state", "title": "" }, { "docid": "72442ac82755b473880613708f06dec7", "score": "0.66144353", "text": "def color_threshold(image, sthresh=(0, 255), vthresh=(0, 255), lthresh=(0, 255)):\n\n hls = cv2.cvtColor(image, cv2.COLOR_RGB2HLS)\n s_channel = hls[:, :, 2]\n s_binary = np.zeros_like(s_channel)\n s_binary[(s_channel >= sthresh[0]) & (s_channel <= sthresh[1])] = 1\n\n l_channel = hls[:, :, 1]\n l_binary = np.zeros_like(l_channel)\n l_binary[(l_channel >= lthresh[0]) & (l_channel <= lthresh[1])] = 1\n\n hsv = cv2.cvtColor(image, cv2.COLOR_RGB2HSV)\n v_channel = hsv[:, :, 2]\n v_binary = np.zeros_like(v_channel)\n v_binary[(v_channel >= vthresh[0]) & (v_channel <= vthresh[1])] = 1\n\n output = np.zeros_like(s_channel)\n output[(s_binary == 1) & (v_binary == 1) & (l_binary == 1)] = 1\n return output", "title": "" }, { "docid": "f08a4d5ddb5cb0caa5b3f952253f0093", "score": "0.6452521", "text": "def detect_kulka(frame, diff):\n # Erosion/dilatation morphology filtering:\n # Erosion removes small artefacts, but \"thins\" our difference.\n # Dilatation brings \"thickness\" back, but not on removed artefacts.\n kernel = np.ones((5,5),np.uint8)\n diff = cv2.erode(diff, kernel, iterations=1)\n diff = cv2.dilate(diff, kernel, iterations=3)\n\n # Treshold \"diff\" to get a \"mask\" with our ball.\n status, mask = cv2.threshold(diff, 20, 255,\n cv2.THRESH_BINARY)\n\n # Convert BGR frame to HSV to get \"Hue\".\n hsv = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV)\n\n # Calculate histogram of the Hue (channel 0 of HSV)\n h_hist = cv2.calcHist([hsv], channels=[0],\n mask=mask, histSize=[6],\n ranges=[0, 179])\n\n # Heuristic to differentiate red and non-red color (edges of histogram vs center)\n edge = h_hist[0] + h_hist[-1]\n center = sum(h_hist) - edge\n\n if abs(edge - center) < 200:\n # Difference not big enough.\n print(\"INVALID KULKA\", edge, center)\n return None\n if edge > 2000:\n # Red ball found.\n print(\"RED KULKA\", edge, center)\n servo.set(1800)\n else:\n # Non-red ball found.\n print(\"NON-RED KULKA\", edge, center)\n servo.set(1500)", "title": "" }, { "docid": "0f0b3ae7e708df4693cecc6470e758fe", "score": "0.64431417", "text": "def detector_blue(src):\r\n return detector(src, 'blue_circle')", "title": "" }, { "docid": "de995437574b319481001f113157f16c", "score": "0.6347815", "text": "def detect(self):\n if self.cv_image == None:\n return\n my_image = deepcopy(self.cv_image)\n \n thresholded = cv2.inRange(my_image,\n (self.blue_lower,self.green_lower,self.red_lower),\n (self.blue_upper,self.green_upper,self.red_upper))\n moments = cv2.moments(thresholded)\n if moments['m00'] != 0:\n #self.center_x, self.center_y = moments['m10']/moments['m00'], moments['m01']/moments['m00']\n cv2.circle(my_image,(int(self.center_x), int(self.center_y)), 5, (255,0,0))\n cv2.imshow('tracking_window', thresholded)\n cv2.imshow(\"camera_image\", my_image)\n cv2.waitKey(20)", "title": "" }, { "docid": "078caca0c0905d9a31b503ab86f8b138", "score": "0.6308924", "text": "def light_detect_incet(image):\n light_color=[]\n h,w,_=image.shape\n\n red_thresh=cv2.inRange(image[0:np.int16(h/3),:,:],np.array([20,20,240]),np.array([60,60,255]))\n yellow_thresh=cv2.inRange(image[np.int16(h/3):np.int16(2*h/3),:,:],np.array([20,200,200]),np.array([60,255,255]))\n green_thresh=cv2.inRange(image[np.int16(2*h/3):h,:,:],np.array([40,215,20]),np.array([100,255,60]))\n\n light_color.append(np.sum(red_thresh[:]))\n light_color.append(np.sum(yellow_thresh[:]))\n light_color.append(np.sum(green_thresh[:]))\n\n # cv2.imwrite('red_thresh.png',red_thresh)\n # cv2.imwrite('yellow_thresh.png', yellow_thresh)\n # cv2.imwrite('green_thresh.png', green_thresh)\n\n if np.max(light_color)==0:\n return 'GREEN'\n\n light_states={0:'RED',1:'YELLOW',2:'GREEN'}\n return light_states[np.argmax(light_color)]", "title": "" }, { "docid": "242e6e39f67bc19fda80c69a571e0a62", "score": "0.63076335", "text": "def classify_circle(circle, brightness_threshold):\n\n brightness = circle[3]\n return 1 if brightness < brightness_threshold else 0", "title": "" }, { "docid": "ac8fab2281d8cad19164c59f386e6bf4", "score": "0.6273426", "text": "def threshold(self):\n\t\t# CV2 settings for Otsu's binary thresholding\n\t\tthresh_settings = cv2.THRESH_BINARY+cv2.THRESH_OTSU\n\t\t# threshold returns the Otsu threshold value and threshed image array\n\t\tt_value, t_img = cv2.threshold(self.image, 0, 255, thresh_settings)\n\t\t# Check four corners of image; the most common color there is likely\n\t\t# to be the background color\n\t\tcorners = (t_img[0][0], t_img[-1][-1], t_img[0][-1], t_img[-1][0])\n\t\t# Get the most common corner pixel (any if tied)\n\t\tif Counter(corners).most_common(1)[0][0] <= t_value:\n\t\t\t# If background is darker than the threshold value, invert black/white\n\t\t\t# In cases where the Otsu technique was not needed, t_value will be 0.\n\t\t\tt_img = cv2.bitwise_not(t_img)\n\t\tself.image = t_img", "title": "" }, { "docid": "6aae0f447492211cd6e4b507c67b4dbb", "score": "0.62695223", "text": "def detector_green(src):\r\n return detector(src, 'green_circle')", "title": "" }, { "docid": "639da0d39a3f022bd13158721d1e2a39", "score": "0.6262569", "text": "def get_classification(self, image):\n\n box = self.get_box(image)\n #rospy.logerr(\"got image\")\n img = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)\n if not box:\n #rospy.logerr(\"Couldn't locate lights\")\n return TrafficLight.UNKNOWN\n i = 0\n class_image = cv2.resize(img[box[0]:box[2], box[1]:box[3]], (32, 32))\n\timg_hsv=cv2.cvtColor(class_image, cv2.COLOR_RGB2HSV)\n\n\tlower_red = np.array([0,50,50])\n\tupper_red = np.array([10,255,255])\n\tmask0 = cv2.inRange(img_hsv, lower_red, upper_red)\n\n\tlower_red = np.array([160,50,50])\n\tupper_red = np.array([180,255,255])\n\tmask1 = cv2.inRange(img_hsv, lower_red, upper_red)\n\t\n\tmask = mask0+mask1\n\n\toutput_img = class_image.copy()\n\toutput_img[np.where(mask==0)] = 0\n\t\n\tmask2 = cv2.inRange(img_hsv, (36, 25, 25), (70, 255,255))\n\toutput_img2 = class_image.copy()\n\toutput_img2[np.where(mask2==0)] = 0\n\t\n\tred_count = cv2.countNonZero(output_img[:, :, 0]) \n\tgreen_count = cv2.countNonZero(output_img2[:, :, 1]) \n #print('red_count', red_count, 'green_count', green_count)\n if red_count > green_count:\n rospy.loginfo('RED')\n return TrafficLight.RED\n else:\n rospy.loginfo('GREEN')\n return TrafficLight.GREEN", "title": "" }, { "docid": "a00d574ac8b423f6d3c2de68fac85292", "score": "0.6198463", "text": "def light_detect_image(sess,image,threshold):\n img_boxes,img_scores,img_classes=general_detect(sess, image)\n img_boxes=threshold_boxes(threshold,img_boxes,img_scores,img_classes)\n img_incets=get_image_incets(image,img_boxes)\n # i=0\n # for incet in img_incets:\n # cv2.imwrite('firstincet%d.png'%i,incet)\n # i=i+1\n #rospy.logerr('tl_classifer,len incets:%d'%(len(img_incets)))\n lights=[]\n for incet in img_incets:\n lights.append(light_detect_incet(incet))\n\n return lights", "title": "" }, { "docid": "289fe0690fdd50de98a1ec507c0ca577", "score": "0.6156104", "text": "def box_follows_hstart(self,image,black_start,threshold):\n dark_count = 0\n test_vline = image.crop((black_start + 2,0,black_start+3,image.size[1]-1))\n test_data = vline.getdata()\n for datum in test_data:\n try:\n datum = datum[0]\n except:\n pass\n if datum < 128:\n dark_count += 1\n if dark_count > threshold: return True\n return False", "title": "" }, { "docid": "2898eb93b38d69e1bac0726aa3aa6b3e", "score": "0.6118568", "text": "def create_binary_image(image):\n\n def bin_it(image, threshold):\n output_bin = np.zeros_like(image)\n output_bin[(image >= threshold[0]) & (image <= threshold[1])] = 1\n return output_bin\n\n # convert image to hls colour space\n hls = cv2.cvtColor(image, cv2.COLOR_RGB2HLS).astype(np.float)\n\n # binary threshold values\n bin_thresh = [20, 255]\n\n # rgb thresholding for yellow\n lower = np.array([225, 180, 0], dtype=\"uint8\")\n upper = np.array([255, 255, 170], dtype=\"uint8\")\n mask = cv2.inRange(image, lower, upper)\n rgb_y = cv2.bitwise_and(image, image, mask=mask).astype(np.uint8)\n rgb_y = cv2.cvtColor(rgb_y, cv2.COLOR_RGB2GRAY)\n rgb_y = bin_it(rgb_y, bin_thresh)\n\n # rgb thresholding for white (best)\n lower = np.array([100, 100, 200], dtype=\"uint8\")\n upper = np.array([255, 255, 255], dtype=\"uint8\")\n mask = cv2.inRange(image, lower, upper)\n rgb_w = cv2.bitwise_and(image, image, mask=mask).astype(np.uint8)\n rgb_w = cv2.cvtColor(rgb_w, cv2.COLOR_RGB2GRAY)\n rgb_w = bin_it(rgb_w, bin_thresh)\n\n # hls thresholding for yellow\n lower = np.array([20, 120, 80], dtype=\"uint8\")\n upper = np.array([45, 200, 255], dtype=\"uint8\")\n mask = cv2.inRange(hls, lower, upper)\n hls_y = cv2.bitwise_and(image, image, mask=mask).astype(np.uint8)\n hls_y = cv2.cvtColor(hls_y, cv2.COLOR_HLS2RGB)\n hls_y = cv2.cvtColor(hls_y, cv2.COLOR_RGB2GRAY)\n hls_y = bin_it(hls_y, bin_thresh)\n\n im_bin = np.zeros_like(hls_y)\n im_bin[(hls_y == 1) | (rgb_y == 1) | (rgb_w == 1)] = 1\n\n return im_bin", "title": "" }, { "docid": "ee3ed9e3ffb79283672a3ada6cd6b52a", "score": "0.61108655", "text": "def check_status(self):\n img = self.get_image()\n # Get the rows, columns, and channel values from the image\n\t\trows,columns,channels = img.shape\n\t\t# Convert image to greyscale\n\t\tbw = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)\n # Process image using Binary Thresholding\n\t\tif lane_type:\n\t\t\tret,thresh = cv2.threshold(bw, 235, 255, cv2.THRESH_BINARY)\n\t\telse:\n\t\t\tret,thresh = cv2.threshold(bw, 100, 255, cv2.THRESH_BINARY_INV)\n\t\t# Initial values\n\t\tprog = 0\n\t\tstart1_low = 0\n\t\tstart2_low = 0\n\t\tend1_low = 0\n\t\tend2_low = 0\n\t\tstart1_high = 0\n\t\tstart2_high = 0\n\t\tend1_high = 0\n\t\tend2_high = 0\n\t\tlow = 200\n\t\thigh = 140\n\n\t\t# Detect potential lane markings at the 'low' pixel row value\n\t\tfor i in range(0, columns):\n\t\t\t# Uncomment the below line to draw a line on the row that is being detected:\n\t\t\t#thresh.itemset((low, i), 155)\n\t\t\tif(thresh.item(low, i) is 255 and prog is 0):\n\t\t\t\tstart1_low = i\n\t\t\t\tprog = 1\n\t\t\telif(thresh.item(low, i) is 0 and prog is 1):\n\t\t\t\tend1_low = i\n\t\t\t\tprog = 2\n\t\t\telif(thresh.item(low, i) is 255 and prog is 2):\n\t\t\t\tstart2_low = i\n\t\t\t\tprog = 3\n\t\t\telif(thresh.item(low, i) is 0 and prog is 3):\n\t\t\t\tend2_low = i\n\t\t\t\tprog = 4\n\t\t\tthresh.itemset((low, i), 155)\n\t\tprog = 0\n\n\t\t# Detect potential lane markings at the 'high' pixel row value\n\t\tfor j in range(0, columns):\n\t\t\t# Uncomment the below line to draw a line on the row that is being detected:\n\t\t\t#thresh.itemset((high, j), 155)\n\t\t\tif(thresh.item(high, j) is 255 and prog is 0):\n\t\t\t\tstart1_high = j\n\t\t\t\tprog = 1\n\t\t\telif(thresh.item(high, j) is 0 and prog is 1):\n\t\t\t\tend1_high = j\n\t\t\t\tprog = 2\n\t\t\telif(thresh.item(high, j) is 255 and prog is 2):\n\t\t\t\tstart2_high = j\n\t\t\t\tprog = 3\n\t\t\telif(thresh.item(high, j) is 0 and prog is 3):\n\t\t\t\tend2_high = j\n\t\t\t\tprog = 4\n\t\t\tthresh.itemset((high, j), 155)\n\n\t\t# Condense the two-point lane detection to a single point at the midpoint of the lane\n\t\tif start1_low != 0 and end1_low != 0:\n\t\t\tlow_1 = (start1_low + end1_low) / 2\n\t\telse:\n\t\t\tlow_1 = 0\n\t\tif start2_low != 0 and end2_low != 0:\n\t\t\tlow_2 = (start2_low + end2_low) / 2\n\t\telse:\n\t\t\tlow_2 = 0\n\t\tif start1_high != 0 and end1_high != 0:\n\t\t\thigh_1 = (start1_high + end1_high) / 2\n\t\telse:\n\t\t\thigh_1 = 0\n\t\tif start2_high != 0 and end2_high != 0:\n\t\t\thigh_2 = (start2_high + end2_high) / 2\n\t\telse:\n\t\t\thigh_2 = 0\n\n\t\t# If only one lane is detected, determine which direction it is 'slanting' to tell the car which way to go\n\t\tif high_2 == 0 or low_2 == 0:\n\t\t\tif high_1 != 0 and low_1 != 0:\n\t\t\t\tif high_1 > low_1:\n\t\t\t\t\t# TODO: Turn Right\n\t\t\t\t\treturn .75\n\t\t\t\telif low_1 > high_1:\n\t\t\t\t\t# TODO: Turn Left\n\t\t\t\t\treturn -.75\n\t\t# Case where lanes are detected on both lines. [Using midpoint for preemptive adjustments]\n\t\telif high_1 != 0 and high_2 != 0 and low_1 != 0 and low_2 != 0:\n\t\t\t# Default forward state\n\t\t\tmidpoint = (high_1 + high_2) / 2\n\t\t\tif midpoint >= (columns / 2) + 50:\n\t\t\t\t# TODO: Turn Right\n\t\t\t\treturn .25\n\t\t\telif midpoint < (columns / 2) - 50:\n\t\t\t\t# TODO: Turn Left\n\t\t\t\treturn -.25\n\t\t\telse:\n\t\t\t\t# TODO: Continue Straight\n\t\t\t\treturn 0\n return", "title": "" }, { "docid": "51a26aa2b3f499ffdc8e386cefe0bcf9", "score": "0.6097198", "text": "def prop_dark(img, threshold):\n if len(img.shape) != 2:\n raise ValueError(\"img must be two dimensional. Shape: {}\".format(img.shape))\n return (img < threshold).sum() / (img.shape[0] * img.shape[1])", "title": "" }, { "docid": "645ba87f230958e6da9cbf2b627b4c7f", "score": "0.60849065", "text": "def _thresholding(self, img_gray):\n if self.colorbot:\n img_dark = 255*np.ones((192, 256))\n img_cb = np.multiply(img_dark-img_gray, self.mask_cb)\n blob_pixels = np.where(img_cb > 50) #fb play with value\n else:\n blob_pixels = np.where(img_gray > self.thresh)\n blob_pixels = np.asarray(blob_pixels)\n\n return blob_pixels", "title": "" }, { "docid": "818132b353aa1badd9d9b91856f90fc2", "score": "0.6076555", "text": "def detect(self, image):\r\n # Todo: implement object detection logic\r\n input = self.preprocess_image(image)\r\n output = self.network.predict(input)\r\n ball_detection, post1_detection, post2_detection = self.process_yolo_output(output)\r\n return ball_detection, post1_detection, post2_detection", "title": "" }, { "docid": "40c83259fcb5912ac9308d3128b43aa5", "score": "0.6049595", "text": "def get_classification(self, image):\r\n\r\n hsv_image = cv2.cvtColor(image,cv2.COLOR_BGR2HSV)\r\n\r\n # red has hue 0 - 10 & 160 - 180 add another filter \r\n\r\n # TODO use Guassian mask\r\n\r\n R_min1 = np.array([0, 100, 100],np.uint8)\r\n\r\n R_max1 = np.array([10, 255, 255],np.uint8) \r\n\r\n\r\n\r\n R_min2 = np.array([160, 100, 100],np.uint8)\r\n\r\n R_max2 = np.array([179, 255, 255],np.uint8)\r\n\r\n\r\n\r\n threshed1 = cv2.inRange(hsv_image, R_min1, R_max1) \r\n\r\n threshed2 = cv2.inRange(hsv_image, R_min2, R_max2) \r\n\r\n if cv2.countNonZero(threshed1) + cv2.countNonZero(threshed2) > 47:\r\n\r\n return TrafficLight.RED\r\n\r\n\r\n\r\n Y_min = np.array([40.0/360*255, 100, 100],np.uint8)\r\n\r\n Y_max = np.array([66.0/360*255, 255, 255],np.uint8)\r\n\r\n threshed3 = cv2.inRange(hsv_image, Y_min, Y_max)\r\n\r\n if cv2.countNonZero(threshed3) > 47:\r\n\r\n return TrafficLight.YELLOW\r\n\r\n\r\n\r\n G_min = np.array([90.0/360*255, 100, 100],np.uint8)\r\n\r\n G_max = np.array([140.0/360*255, 255, 255],np.uint8)\r\n\r\n threshed4 = cv2.inRange(hsv_image, G_min, G_max)\r\n\r\n if cv2.countNonZero(threshed4) > 47:\r\n\r\n return TrafficLight.GREEN\r\n\r\n\r\n\r\n\r\n\r\n return TrafficLight.UNKNOWN", "title": "" }, { "docid": "ed58a472e53ff035f07e160f423448c1", "score": "0.6022678", "text": "def threshold_value(img):\n\n is_color = len(img.shape) == 3\n is_grey = len(img.shape) == 2\n\n if is_color:\n gray = cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)\n elif is_grey:\n gray = img.copy()\n\n slices = gray.mean(axis = 1)[20:gray.shape[0]-30]\n is_white = any(x > 0.9*255 for x in slices)\n if is_white:\n return 0.98\n else:\n return 0.9", "title": "" }, { "docid": "77ab4fea63066d20e89505a5bd304298", "score": "0.60175616", "text": "def thresh(\n img: np.ndarray,\n threshold=127,\n max_value=255,\n kernel_size=3,\n thresh_type=cv2.THRESH_BINARY_INV,\n) -> np.ndarray:\n img = to_gray(img)\n img = cv2.GaussianBlur(img, (kernel_size, kernel_size), 0)\n return cv2.threshold(img, threshold, max_value, thresh_type)[1]", "title": "" }, { "docid": "459b4ae4bb9925ce88eaa47966eba06f", "score": "0.60122377", "text": "def ballHit(state):\n paddle_xpos = state[TOP_PADDLE_ROW, SCREEN_L:SCREEN_R, 0]\n paddlex = next((i for i, x in enumerate(paddle_xpos) if x), MIDDLE_X)\n\n ball_xpos = np.sum(state[BOTTOM_BLOCK_ROW:TOP_PADDLE_ROW, SCREEN_L:SCREEN_R, 0], axis=0)\n ballx = next((i for i, x in enumerate(ball_xpos) if x != 0), MIDDLE_X)\n\n ball_ypos = np.sum(state[BOTTOM_BLOCK_ROW:TOP_PADDLE_ROW, SCREEN_L:SCREEN_R, 0], axis=1)\n # unlike featureExtractor, add BOTTOM_BLOCK_ROW because bally otherwise offset relative to TOP_PADDLE_ROW\n # (don't need to do this for ballx and paddlex because they're in the same frame of reference (only black space considered))\n bally = next((i for i, x in enumerate(ball_ypos) if x != 0), MIDDLE_Y) + BOTTOM_BLOCK_ROW\n\n ##reward if exact hit: ballx matches paddlex and bally matches paddle y\n if ballx - paddlex < PADDLE_LEN and ballx - paddlex > 0 and abs(bally - TOP_PADDLE_ROW) < 10 :\n ## alternatively, reward if ball really close to hitting paddle\n #if abs(ballx - paddlex) < 20 and abs(bally - TOP_PADDLE_ROW) < 15 :\n return True\n return False", "title": "" }, { "docid": "63250fafa4bd17ad15abb4cce75d36a0", "score": "0.5995494", "text": "def box_follows_hstart(self,image,black_start,threshold):\n dark_count = 0\n test_hline = image.crop((0,black_start + 2,image.size[0]-1,black_start + 3))\n test_data = hline.getdata()\n for datum in test_data:\n try:\n datum = datum[0]\n except:\n pass\n if datum < 128:\n dark_count += 1\n if dark_count > threshold: return True\n return False", "title": "" }, { "docid": "afdd291f1b96b5eaec748c3266fba2b7", "score": "0.59946936", "text": "def solarize(img, threshold):\n \n for x, y, col in img:\n\n # Invert the values of all RGB components less than 128,\n # leaving components with higher values unchanged.\n\n red, green, blue = col\n\n if red < threshold:\n red = 255 - red\n\n if green < threshold:\n green = 255 - green\n\n if blue < threshold:\n blue = 255 - blue\n\n col = create_color(red, green, blue)\n set_color(img, x, y, col)", "title": "" }, { "docid": "10ac05cffd256be9722d70c6a05ce7e6", "score": "0.596265", "text": "def cany_oper(image):\n\tkernel_size = 3\n\tlow_threshold = 1\n\thigh_threshold = 10\n\n\tgray_img = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)\n\tblur_gray = cv2.GaussianBlur(gray_img,(kernel_size, kernel_size), 0)\n\n\tedges = cv2.Canny(blur_gray, low_threshold, high_threshold)\n\n\treturn edges", "title": "" }, { "docid": "194648d010fff9a6550977349c14472a", "score": "0.59609044", "text": "def solarize(img, threshold):\n\n for x, y, col in img:\n\n # Invert the values of all RGB components less than 128,\n # leaving components with higher values unchanged.\n\n red, green, blue = col\n\n if red < threshold:\n red = 255 - red\n\n if green < threshold:\n green = 255 - green\n\n if blue < threshold:\n blue = 255 - blue\n\n col = create_color(red, green, blue)\n set_color(img, x, y, col)", "title": "" }, { "docid": "4750f0d4809efe60218d7e2204c13fe1", "score": "0.59552777", "text": "def _color_thresh(img, rgb_thresh=(160, 160, 160)):\n # Create an array of zeros same xy size as img, but single channel\n color_select = np.zeros_like(img[:, :, 0])\n\n # Require that each pixel be above all three threshold values in RGB\n # above_thresh will now contain a boolean array with \"True\"\n # where threshold was met\n above_thresh = (img[:, :, 0] > rgb_thresh[0]) \\\n & (img[:, :, 1] > rgb_thresh[1]) \\\n & (img[:, :, 2] > rgb_thresh[2])\n\n # Index the array of zeros with the boolean array and set to 1\n color_select[above_thresh] = 1\n # Return the binary image\n return color_select", "title": "" }, { "docid": "bc90d3db20fc995785b9337edda7d3b7", "score": "0.5954343", "text": "def hit_detection(Ball, bricks):\n hit = (Ball.x+Ball.radius) > bricks.x and (Ball.x-Ball.radius) < bricks.x+bricks.width\\\n and (Ball.y < bricks.y+bricks.height) and (Ball.y+Ball.radius) > bricks.y\n return hit", "title": "" }, { "docid": "78b5e73adb7798f3e5b6f416aece0df4", "score": "0.5953951", "text": "def __hsl_threshold(input, hue, sat, lum):\n out = cv2.cvtColor(input, cv2.COLOR_BGR2HLS)\n return cv2.inRange(out, (hue[0], lum[0], sat[0]), (hue[1], lum[1], sat[1]))", "title": "" }, { "docid": "b821c35dce99c6d07ccf951b022c45e1", "score": "0.5942114", "text": "def threshold_test(img):\n img_gray = cv.cvtColor(img, cv.COLOR_BGR2GRAY)\n img_lap = cv.Laplacian(img_gray, cv.CV_64F, ksize=5)\n abs_sobel64f = np.absolute(img_lap)\n sobel_8u = np.uint8(abs_sobel64f)\n blurred = cv.blur(sobel_8u, (3, 3))\n thresh, output_binthresh = cv.threshold(blurred, 28, 255, cv.THRESH_BINARY)\n\n output_adapthresh = cv.adaptiveThreshold(img_gray, 255.0, cv.ADAPTIVE_THRESH_MEAN_C, cv.THRESH_BINARY, 11, -20.0)\n cv.imshow(\"Adaptive Thresholding\", output_adapthresh)\n cv.waitKey(0)\n return output_adapthresh", "title": "" }, { "docid": "80279d2b8ad41b7a88fc17b22ff357c5", "score": "0.59312123", "text": "def color_threshold(image, min, max): \n c1_mask = channel_threshold(image[:,:,0], min=min[0], max=max[0])\n c2_mask = channel_threshold(image[:,:,1], min=min[1], max=max[1])\n c3_mask = channel_threshold(image[:,:,2], min=min[2], max=max[2])\n binary = np.zeros_like(c1_mask)\n binary[(c1_mask == 1) & (c2_mask==1) & (c3_mask==1)] = 1\n return binary", "title": "" }, { "docid": "268109b1f182d38683656b937eeff8f9", "score": "0.5925151", "text": "def get_classification(self, image_bgr):\n # TODO implement light color prediction\n image_rgb = cv2.cvtColor(image_bgr, cv2.COLOR_BGR2RGB)\n pimg = np.expand_dims(image_rgb, axis=0)\n fetches = [\n self.boxes, self.scores, self.classes, self.num_detections]\n tic = rospy.get_time()\n (boxes, scores, classes, num) = self.sess.run(\n fetches,\n feed_dict={self.image: pimg})\n toc = rospy.get_time()\n if TIME_EXECUTION:\n rospy.logwarn(\"Detection took %f secs\" % (toc - tic))\n debug_image = image_rgb.copy()\n tl_indics = (classes == self.traffic_light_id)\n tl_boxes = boxes[tl_indics]\n tl_scores = scores[tl_indics]\n if self.reduce_computation:\n # use only the higest scoring box\n if len(tl_scores) != 0:\n highest_score = np.argmax(scores)\n tl_boxes = tl_boxes[highest_score: highest_score + 1]\n tl_scores = [tl_scores[highest_score]]\n\n tl_state = TrafficLight.UNKNOWN\n tl_text = 'Unknown'\n tl_color = (0, 0, 0)\n for i, tl_box in enumerate(tl_boxes):\n if tl_scores[i] > self.min_conf_thresh:\n h, w = debug_image.shape[:2]\n tl = (int(w * tl_boxes[i, 1]), int(h * tl_boxes[i, 0]))\n br = (int(w * tl_boxes[i, 3]), int(h * tl_boxes[i, 2]))\n area = (br[1] - tl[1]) * (br[0] - tl[0])\n if area < 12:\n continue\n\n patch = cv2.resize(\n image_rgb[tl[1]: br[1], tl[0]: br[0]], (32, 64))\n patch = np.expand_dims(patch, axis=0)\n predicted_prob = self.sess.run(\n self.classifier_out,\n feed_dict={self.classifier_in: patch}).squeeze()\n predicted_light = predicted_prob.argmax(axis=-1)\n max_prob = predicted_prob[predicted_light]\n if predicted_light == 0:\n tl_state = TrafficLight.UNKNOWN\n tl_text = 'Unknown: %.2f' % max_prob\n tl_color = (255, 255, 255)\n elif predicted_light == 1:\n tl_state = TrafficLight.GREEN\n tl_text = 'Green: %.2f' % max_prob\n tl_color = (0, 255, 0)\n elif predicted_light == 2:\n tl_state = TrafficLight.YELLOW\n tl_text = 'Yellow: %.2f' % max_prob\n tl_color = (255, 255, 0)\n elif predicted_light == 3:\n tl_state = TrafficLight.RED\n tl_text = 'Red: %.2f' % max_prob\n tl_color = (255, 0, 0)\n\n tl_center = (int(0.5 * (br[0] + tl[0])),\n int(0.5 * (br[1] + tl[1])))\n put_text(debug_image, tl_text, tl_center, tl_color)\n cv2.rectangle(\n debug_image, tl, br, tl_color, thickness=2)\n return tl_state, debug_image", "title": "" }, { "docid": "c6b199e9f3e4b2817d2a5092ef38cc58", "score": "0.592151", "text": "def detect_object(self, color):\n\n\n # define the lower and upper boundaries of the basic colors\n GREEN_RANGE = ((29, 86, 6), (64, 255, 255))\n RED_RANGE = ((139, 0, 0), (255, 160, 122))\n BLUE_RANGE = ((0, 128, 128), (65, 105, 225))\n\n # initialize the values in case there is no object\n x_position = 0\n y_position = 0\n size = 0\n\n # chose the color to find\n if color == \"red\":\n color_range = RED_RANGE\n if color == \"green\":\n color_range = GREEN_RANGE\n if color == \"blue\":\n color_range = BLUE_RANGE\n\n # get image type from camera\n image = self.__cameraFrontal.getImage()\n\n # apply color filters to the image\n filtered_image = cv2.inRange(image.data, color_range[0], color_range[1])\n rgb = cv2.cvtColor(image.data, cv2.COLOR_BGR2RGB)\n\n\n # Apply threshold to the masked image\n ret,thresh = cv2.threshold(filtered_image,127,255,0)\n im,contours,hierarchy = cv2.findContours(thresh,cv2.RETR_TREE,cv2.CHAIN_APPROX_SIMPLE)\n # Find the index of the largest contour\n for c in contours:\n if c.any != 0:\n areas = [cv2.contourArea(c) for c in contours]\n max_index = np.argmax(areas)\n cnt=contours[max_index]\n if max(areas) > 0.0:\n x,y,w,h = cv2.boundingRect(cnt)\n cv2.rectangle(rgb,(x,y),(x+w,y+h),(0,255,0),2)\n x_position = (w/2)+x\n y_position = (h/2)+y\n size = w*h\n\n # show the frame to our screen\n cv2.imshow(\"Frame\", rgb)\n print \"frame\"\n key = cv2.waitKey(1) & 0xFF\n\n return size, x_position, y_position", "title": "" }, { "docid": "ae33b610f2ed668531b7f72cc65484af", "score": "0.591236", "text": "def rock_thresh(img):\n # Convert BGR to HSV\n hsv = cv2.cvtColor(img, cv2.COLOR_RGB2HSV, 3)\n\n # Define range of yellow colors in HSV\n yellow_lower_bound = np.array([20, 150, 100], dtype='uint8')\n yellow_upper_bound = np.array([50, 255, 255], dtype='uint8')\n\n # Threshold the HSV image to get only yellow colors\n mask = cv2.inRange(hsv, yellow_lower_bound, yellow_upper_bound)\n return mask", "title": "" }, { "docid": "c880380d58ab87bef55c17cea7294408", "score": "0.5903217", "text": "def dir_threshold(img, sobel_kernel=3, thresh=(0, np.pi/2)):\n\t# Apply the following steps to img\n\t# 1) Convert to grayscale\n\tgray = cv2.cvtColor(img, cv2.COLOR_RGB2GRAY)\n\t\n\t# 2) Take the gradient in x and y separately\n\tsobelx = cv2.Sobel(gray, cv2.CV_64F, 1, 0, ksize=sobel_kernel)\n\tsobely = cv2.Sobel(gray, cv2.CV_64F, 0, 1, ksize=sobel_kernel)\n\t\n\t# 3) Take the absolute value of the x and y gradients\n\t# and use np.arctan2(abs_sobely, abs_sobelx) to calculate the direction of the gradient \n\tdir_sobel = np.arctan2(np.absolute(sobely), np.absolute(sobelx))\n\n\t# 4) Create a binary mask where direction thresholds are met\n\tbinary_output = np.zeros_like(gray)\n\tbinary_output[(dir_sobel >= thresh[0] ) & (dir_sobel <= thresh[1])] = 1\n\n\t# 5) Return this mask as your binary_output image\n\treturn binary_output", "title": "" }, { "docid": "69ba2761b552f17042d5edb8533d0ab2", "score": "0.5861191", "text": "def thresholded_image(self, image):\n # convert image to hsv\n image_hsv = cv.CreateImage(cv.GetSize(image), image.depth, 3)\n cv.CvtColor(image, image_hsv, cv.CV_BGR2HSV)\n # threshold the image\n image_threshed = cv.CreateImage(cv.GetSize(image), image.depth, 1)\n cv.InRangeS(image_hsv, self.min_thresh, self.max_thresh, image_threshed)\n return image_threshed", "title": "" }, { "docid": "0b58ce8442604eaa65b649450c8d953e", "score": "0.5841043", "text": "def get_mask(img,color,inRange=[5,50,50]):\n\tframe = img.copy()\n\thsv=cv2.cvtColor(frame,cv2.COLOR_BGR2HSV)\n\tl_color=np.array([i-j for i,j in zip(color,inRange)])\n\tu_color=np.array([i+j for i,j in zip(color,inRange)])\n\tmask=cv2.inRange(hsv, l_color, u_color)\n\tblur_mask=cv2.blur(mask, (5,5))\n\tret,thresh_mask = cv2.threshold(blur_mask,200,255,cv2.THRESH_BINARY)\n\n\n\treturn thresh_mask", "title": "" }, { "docid": "9f54ba0cbd50e9dc5f12802659a879e9", "score": "0.58368117", "text": "def sketch_image(img):\n # Convert to gray scale\n img_gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)\n\n # Apply median filter\n img_gray = cv2.medianBlur(img_gray, 5)\n\n # Detect edges using cv2.Laplacian()\n edges = cv2.Laplacian(img_gray, cv2.CV_8U, ksize=5)\n\n # Threshold the edges image:\n ret, thresholded = cv2.threshold(edges, 70, 255, cv2.THRESH_BINARY_INV)\n\n return thresholded", "title": "" }, { "docid": "f98672f3addecc04a096ba8992d27c72", "score": "0.58329284", "text": "def get_classification(self, image):\n #set the default state as UNKNOWN\n state = TrafficLight.UNKNOWN\n\n img_hsv = cv2.cvtColor(image, cv2.COLOR_BGR2HSV)\n\n\n lower_red = np.array([0,50,50])\n upper_red = np.array([10,255,255])\n red1 = cv2.inRange(img_hsv, lower_red , upper_red)\n\n lower_red = np.array([170,50,50])\n upper_red = np.array([180,255,255])\n red2 = cv2.inRange(img_hsv, lower_red , upper_red)\n\n red_converted_img = cv2.addWeighted(red1, 1.0, red2, 1.0, 0.0)\n\n\n lower_green = np.array([50,50,50])\n upper_green = np.array([70,255,255])\n green_converted_img = cv2.inRange(img_hsv, lower_green, upper_green)\n\n\n lower_yellow = np.array([20,50,50])\n upper_yellow = np.array([40,255,255])\n yellow_converted_img = cv2.inRange(img_hsv, lower_yellow, upper_yellow)\n\n\n red_blur_img = cv2.GaussianBlur(red_converted_img,(15,15),cv2.BORDER_DEFAULT)\n green_blur_img = cv2.GaussianBlur(green_converted_img,(15,15),cv2.BORDER_DEFAULT)\n yellow_blur_img = cv2.GaussianBlur(yellow_converted_img,(15,15),cv2.BORDER_DEFAULT)\n\n\n # Finds circles in a grayscale image using the Hough transform\n # https://docs.opencv.org/2.4/modules/imgproc/doc/feature_detection.html#houghcircles\n red_circles = cv2.HoughCircles(red_blur_img,cv2.HOUGH_GRADIENT,1,40, param1=70,param2=30,minRadius=10,maxRadius=150)\n green_circles = cv2.HoughCircles(green_blur_img,cv2.HOUGH_GRADIENT,1,40, param1=70,param2=30,minRadius=10,maxRadius=150)\n yellow_circles = cv2.HoughCircles(yellow_blur_img,cv2.HOUGH_GRADIENT,1,40, param1=70,param2=30,minRadius=10,maxRadius=150)\n\n\n if red_circles is not None:\n state = TrafficLight.RED\n elif green_circles is not None:\n state = TrafficLight.GREEN\n elif yellow_circles is not None:\n state = TrafficLight.YELLOW\n else:\n state = TrafficLight.UNKNOWN\n\n return state", "title": "" }, { "docid": "8142af5f77c216c2b9c9a354931d64c0", "score": "0.5830477", "text": "def threshold_img(img):\n\n is_color = len(img.shape) == 3\n is_grey = len(img.shape) == 2\n\n t = threshold_value(img)\n\n if is_color:\n gray = cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)\n elif is_grey:\n gray = img.copy()\n\n blurred = cv2.GaussianBlur(gray, (3, 3), 0)\n (_, thresh) = cv2.threshold(blurred, t*255, 1, cv2.THRESH_BINARY_INV)\n\n return thresh", "title": "" }, { "docid": "dc772c5da96d4a185bd13661ec55181b", "score": "0.58226156", "text": "def pick_verts_by_AO_color(obj, min_threshold = 0.00, max_threshold = .95):\n\n mesh = obj.data \n \n vcol_layer = mesh.vertex_colors.get(\"AO\")\n if vcol_layer == None:\n return\n \n #select and add color\n if \"AO_select\" not in obj.data.vertex_colors:\n vcol = obj.data.vertex_colors.new(name = \"AO_select\")\n else:\n vcol = obj.data.vertex_colors.get(\"AO_select\")\n \n bme = bmesh.new()\n bme.from_mesh(obj.data)\n bme.verts.ensure_lookup_table()\n bme.faces.ensure_lookup_table() \n bme.edges.ensure_lookup_table()\n \n ao_select_color_layer = bme.loops.layers.color[\"AO_select\"]\n ao_bake_color_layer = bme.loops.layers.color[\"AO\"]\n \n to_select = set() \n for f in bme.faces: \n for loop in f.loops:\n col = loop[ao_bake_color_layer]\n if any([(col[0] < max_threshold and col[0] > min_threshold),\n (col[1] < max_threshold and col[1] > min_threshold),\n (col[2] < max_threshold and col[2] > min_threshold)]):\n #loop.vert.select_set(True) #actually select the vert\n to_select.add(loop.vert)\n loop[ao_select_color_layer] = Color((1,.2, .2)) #set the color so that it can be viewed in object mode, yay\n \n for f in bme.faces:\n f.select_set(False)\n for ed in bme.edges:\n ed.select_set(False)\n \n for v in bme.verts:\n if v in to_select:\n v.select_set(True)\n else:\n v.select_set(False) \n \n bme.select_flush_mode()\n bme.to_mesh(obj.data)\n bme.free()\n \n if \"AO\" not in bpy.data.materials:\n mat = bpy.data.materials.new(\"AO\")\n mat.use_shadeless = True\n mat.use_vertex_color_paint = True\n else:\n mat = bpy.data.materials.get(\"AO\")\n mat.use_shadeless = True\n mat.use_vertex_color_paint = True\n \n \n #obj.data.vertex_colors.active = vcol\n print('setting the active vertex color')\n obj.data.vertex_colors.active = vcol\n for ind, v_color in enumerate(obj.data.vertex_colors):\n if v_color == vcol:\n break\n obj.data.vertex_colors.active_index = ind\n obj.data.vertex_colors.active = vcol\n \n print(vcol.name)\n print(obj.data.vertex_colors.active.name)\n \n #to_select = []\n #for poly in mesh.polygons:\n # for loop_index in poly.loop_indices:\n # loop_vert_index = mesh.loops[loop_index].vertex_index\n # col = vcol_layer.data[loop_index].color\n # if any([col[0] < max_threshold and col[0] > min_threshold, col[1] < max_threshold and col[1] > min_threshold, col[2] < max_threshold and col[2] > min_threshold]):\n # to_select.append(mesh.vertices[loop_vert_index])\n \n #for v in mesh.vertices:\n # v.select = False\n #for ed in mesh.edges:\n # ed.select = False\n #for f in mesh.polygons:\n # f.select = False", "title": "" }, { "docid": "aa5096f18fc042d6d05bb8b73795704f", "score": "0.58208394", "text": "def track_landmarks(toBeTracked,landmarks,frame):\r\n for i in landmarks:\r\n #comparing whether a particular landmark is to be tracked or not\r\n if i[0] in toBeTracked:\r\n cv2.circle(frame,(i[1],i[2]),10,(255,0,0),cv2.FILLED)\r\n #overlaying a bigger dot on the specific points to be tracked with blue color.\r\n return frame", "title": "" }, { "docid": "dbdb93f0e8ca7b8db7f190d28509c7b7", "score": "0.5819529", "text": "def emphasize_target(self, image, target_obj, threshold=0.60):\n # TODO: make color_seg_image label current working target.\n image_copybgr = image.copy()[..., ::-1]\n output_dict = self.run_inference_for_single_image(image_copybgr)\n boxes = output_dict['detection_boxes']\n classes = output_dict['detection_classes']\n scores = output_dict['detection_scores']\n masks = output_dict.get('detection_masks')\n\n idx = np.where(scores >= threshold)\n classes = classes[idx] # this is an array of objects that is detected\n masks = masks[idx]\n # boxes = boxes[idx]\n\n # target object를 masking하는 과정\n [obj_masknum, height, width] = masks.shape\n seg = np.zeros((height, width), dtype=np.uint8)\n for x, i in enumerate(classes):\n idx = np.where(masks[x] != 0)\n seg[idx] = i\n\n # changed code snippet from convert_color_seg\n height = seg.shape[0]\n width = seg.shape[1]\n channel = IMAGE_CHANNEL\n\n shape = (height, width, channel)\n color_label = np.zeros(shape=shape, dtype=np.uint8)\n\n list0 = np.where(seg == target_obj)\n color = RL_Obj_List[target_obj][1]\n color_label[list0[0], list0[1]] = color\n\n color_label = cv2.resize(color_label, (int(shape[1] / 2), int(shape[0] / 2)))\n cv2.imshow(\"color_seg_show\", color_label)\n cv2.moveWindow(\"color_seg_show\", 0, 390)\n cv2.waitKey(1)", "title": "" }, { "docid": "0645f48ea92201b5eb06b397b0e3f21b", "score": "0.5815656", "text": "def go_straight_until_black(self, speed):\n self.go(speed, speed)\n while True:\n color = ColorSensor(3)\n if color.get_reflected_light_intensity() <= 10:\n break\n self.stop()", "title": "" }, { "docid": "fcf11bfd949f86009164b48cd37fba57", "score": "0.57992035", "text": "def canny(img, low_threshold, high_threshold):\r\n return cv2.Canny(img, low_threshold, high_threshold)", "title": "" }, { "docid": "fcf11bfd949f86009164b48cd37fba57", "score": "0.57992035", "text": "def canny(img, low_threshold, high_threshold):\r\n return cv2.Canny(img, low_threshold, high_threshold)", "title": "" }, { "docid": "2cade9bc4bda0df1323be7439b0d03a7", "score": "0.5780074", "text": "def img_lane_detect(img):\n\twhite_mask = HSV_mask(img, white_threshold)\n\tyellow_mask = HSV_mask(img, yellow_threshold)\n\tcolor_mask = white_mask | yellow_mask\n\n\timg_gray = grayscale(img)\n\tblur_gray = gaussian_blur(img, kernel_size)\n\tedges = canny(blur_gray, low_threshold, high_threshold)\n\tmasked_edges = region_of_interest(edges, vertices)\n\n\t# white_mask = HSV_mask(img, white_threshold)\n\t# yellow_mask = HSV_mask(img, yellow_threshold)\n\t# color_mask = white_mask | yellow_mask\n\t# masked_edges[color_mask==0] = 0 # apply color mask\n\n\tlined_edges = hough_lines(masked_edges, rho, theta, threshold, min_line_len, max_line_gap)\n\tmasked_lined_edges = region_of_interest(lined_edges, vertices)\n\n\treturn weighted_img(masked_lined_edges, img, α=0.8, β=1., γ=0.)", "title": "" }, { "docid": "cf6e6e6f5352ab7cd2fd0139ece09302", "score": "0.5779461", "text": "def hls_select(self, img, thresh=(0,255)):\n hls = cv2.cvtColor(img, cv2.COLOR_BGR2HLS)\n s = hls[:,:,2]\n bin_img = np.zeros_like(s)\n bin_img[(thresh[0] < s) & (s <= thresh[1])] = 1\n return bin_img", "title": "" }, { "docid": "a0887571427f1b3c22997d3e555e5ea5", "score": "0.5766189", "text": "def detect(self, circles, mask, text, image):\n r = 5\n bound = 4.0 / 10\n\n if circles is not None:\n y_circles = np.uint16(np.around(circles))\n\n for i in y_circles[0, :]:\n if i[0] > self.size[1] or i[1] > self.size[0] or i[1] > self.size[0] * bound:\n continue\n\n h, s = 0.0, 0.0\n for m in range(-r, r):\n for n in range(-r, r):\n\n if (i[1] + m) >= self.size[0] or (i[0] + n) >= self.size[1]:\n continue\n h += mask[i[1] + m, i[0] + n]\n s += 1\n if h / s > 50:\n if self.visualize:\n cv2.circle(image, (i[0], i[1]), i[2] + 10, (0, 255, 0), 2)\n cv2.circle(mask, (i[0], i[1]), i[2] + 30, (255, 255, 255), 2)\n cv2.putText(image, 'YELLOW', (i[0], i[1]), cv2.FONT_HERSHEY_SIMPLEX, 1, (255, 0, 0), 2,\n cv2.LINE_AA)\n return True\n return False", "title": "" }, { "docid": "6eb9f342191beb31c4904792ba114be0", "score": "0.57584167", "text": "def labelize_patch(patch):\n\n patch_h, patch_w = patch.shape\n mid_h, mid_w = patch_h // 2, patch_w // 2\n sm_patch = patch[mid_h - 8:mid_h + 8, mid_w - 8:mid_w + 8]\n\n df = np.mean(sm_patch)\n\n return 1 if df > foreground_threshold else 0", "title": "" }, { "docid": "657b8b46b460f8ad37d8319b788b0a09", "score": "0.5751427", "text": "def combine_sobel_hls_threshold(img):\n ksize, bright = 3, 0\n image = np.copy(img)\n # Clahe \n aug = CLAHE(p=bright)\n clahe = augment_and_clahe(aug, image)\n \n sx_thresh=(45, 255)\n s_thresh=(130, 255)\n abso_sobelx = (10, 255)\n abso_sobely = (2, 255)\n mag_tresh = (90, 255)\n dir_thresh=(0.7, 1.2)\n \n hls = cv2.cvtColor(clahe, cv2.COLOR_RGB2HLS)\n cv2.imshow(\"hls\", hls)\n l_channel = hls[:,:,1]\n s_channel = hls[:,:,2]\n # Sobel x\n sobelx = cv2.Sobel(l_channel, cv2.CV_64F, 1, 0) # Take the derivative in x\n abs_sobelx = np.absolute(sobelx) # Absolute x derivative to accentuate lines away from horizontal\n scaled_sobel = np.uint8(255*abs_sobelx/np.max(abs_sobelx))\n \n # Threshold x gradient\n sxbinary = np.zeros_like(scaled_sobel)\n sxbinary[(scaled_sobel >= sx_thresh[0]) & (scaled_sobel <= sx_thresh[1])] = 1\n \n # Threshold color channel\n s_binary = np.zeros_like(s_channel)\n s_binary[(s_channel >= s_thresh[0]) & (s_channel <= s_thresh[1])] = 1\n # Stack each channel\n color_binary = np.dstack(( np.zeros_like(sxbinary), sxbinary, s_binary)) * 255\n \n \n gradx = abs_sobel_thresh(l_channel, s_channel, orient='x', thresh_max=(abso_sobelx[0], abso_sobelx[1]))\n grady = abs_sobel_thresh(l_channel, s_channel, orient='y', thresh_max=(abso_sobely[0], abso_sobely[1]))\n mag_binary = mag_thresh(l_channel, s_channel, ksize, mag_thresh=(mag_tresh[0], mag_tresh[1]))\n dir_binary = dir_threshold(l_channel, s_channel, ksize, thresh=(dir_thresh[0], dir_thresh[1]))\n # Combine the two binary thresholds\n combined_binary = np.zeros_like(sxbinary)\n combined_binary[((mag_binary == 1) & (dir_binary == 1)) | ((gradx == 1) & (grady == 1))] = 1\n # ((s_binary == 1) & (sxbinary == 1)) | \n \n return combined_binary", "title": "" }, { "docid": "32e94e505029992dc7f9313bd1f1d5e9", "score": "0.5747643", "text": "def rock_thresh(img, boundary =([100,100,0], [200,200,70])):\n lower = np.array(boundary[0], dtype = \"uint8\")\n upper = np.array(boundary[1], dtype = \"uint8\")\n # create mask\n mask = cv2.inRange(img, lower, upper)\n # apply image masking\n output = cv2.bitwise_and(img, img, mask = mask)\n # convert result to gray\n output = cv2.cvtColor(output, cv2.COLOR_RGB2GRAY)\n # create temp image for our rock\n zeros = np.zeros_like(img[:,:,0])\n try:\n # get the closest coordinate of the transformed rock\n closest_x = max(output.nonzero()[1])\n closest_y = max(output.nonzero()[0])\n # make the rock look bigger instead of just dot\n zeros[closest_y:closest_y+5,closest_x:closest_x+5] = 1\n return zeros\n except:\n #if no rock is in image, return zeros\n return zeros", "title": "" }, { "docid": "4238cd302655a75089d7c26b0e7d27ca", "score": "0.57430637", "text": "def canny_edges(image, sigma=1.0, low_thresh=50, high_thresh=100):", "title": "" }, { "docid": "702ff331fff4534b3dadc9bdbdb61b11", "score": "0.57397246", "text": "def canny(img, low_threshold, high_threshold):\r\n\treturn cv2.Canny(img, low_threshold, high_threshold)", "title": "" }, { "docid": "1f0741f4761aa4596e976331b2d4d656", "score": "0.57390994", "text": "def goalpost_detected_rgba(self):\n detected_rgba = self._detected_rgba.copy()\n detected_rgba[3] = 1\n return detected_rgba", "title": "" }, { "docid": "841d9923367fad4da10b4a99865f3220", "score": "0.5738236", "text": "def binary_mask(image): \n hsv = cv2.cvtColor(image, cv2.COLOR_RGB2HSV)\n luv = cv2.cvtColor(image, cv2.COLOR_RGB2LUV)\n \n yellow_mask = color_threshold(hsv, [0,80,200],[255,255,255])\n white_mask = color_threshold(hsv, [0,0,218],[255,255,255])\n luv_mask = color_threshold(luv, [215,95,140], [254,106,148])\n \n combined_binary = np.zeros_like(yellow_mask)\n combined_binary[(yellow_mask == 1) | (white_mask == 1) | (luv_mask==1)] = 1\n return combined_binary", "title": "" }, { "docid": "23f1bee3b0264b2ac7a3a92dbb199c8b", "score": "0.5730831", "text": "def blueThresholdForPixel(pixel, image):\n x_interest, y_interest = pixel\n B = image[x_interest,y_interest]\n deltaB = 100\n testB = np.logical_and(image[:,:]<B+deltaB,image[:,:]>B-deltaB)\n return testB", "title": "" }, { "docid": "e954a388112beec3261f787e88f1213e", "score": "0.57267696", "text": "def get_object(color):\n global img\n \n\n hsv = cv.cvtColor(img, cv.COLOR_BGR2HSV)\n\n \n \n # sim\n if color == 'red' :\n lower = np.array([139, 0, 0], dtype=np.uint8)\n upper = np.array([255, 255, 255], dtype=np.uint8)\n elif color == 'green' :\n lower = np.array([50, 119, 4], dtype=np.uint8)\n upper = np.array([88, 255, 255], dtype=np.uint8)\n elif color == 'black' :\n lower = np.array([83, 35, 13], dtype=np.uint8)\n upper = np.array([142, 179, 43], dtype=np.uint8)\n # elif color == 'yellow' :\n # lower = np.array([20, 120, 0], dtype=np.uint8)\n # upper = np.array([62, 255, 255], dtype=np.uint8)\n # # real world\n # lower = np.array([20, 120, 0], dtype=np.uint8)\n # upper = np.array([62, 255, 255], dtype=np.uint8)\n\n # lower,upper = get_color('yellow','morning','path')\n mask = cv.inRange(hsv, lower, upper)\n kernel = np.ones((5, 5), dtype=np.uint8)\n mask = cv.GaussianBlur(mask, (5, 5), 0)\n mask = cv.erode(mask, kernel)\n mask = cv.erode(mask, kernel)\n mask = cv.dilate(mask, kernel)\n mask = cv.dilate(mask, kernel)\n return mask", "title": "" }, { "docid": "7b9312347c15e283da06e9e954c95794", "score": "0.57265425", "text": "def dir_threshold(img, sobel_kernel=3, thresh=(0, np.pi/2)):\n # Grayscale\n gray = cv2.cvtColor(img, cv2.COLOR_RGB2GRAY)\n # Calculate the x and y gradients\n sobelx = cv2.Sobel(gray, cv2.CV_64F, 1, 0, ksize=sobel_kernel)\n sobely = cv2.Sobel(gray, cv2.CV_64F, 0, 1, ksize=sobel_kernel)\n # Take the absolute value of the gradient direction,\n # apply a threshold, and create a binary image result\n absgraddir = np.arctan2(np.absolute(sobely), np.absolute(sobelx))\n binary_output = np.zeros_like(absgraddir)\n binary_output[(absgraddir >= thresh[0]) & (absgraddir <= thresh[1])] = 1\n\n # Return the binary image\n return binary_output", "title": "" }, { "docid": "be6d9c932b2b8bfcd11d18bea53fc141", "score": "0.57227176", "text": "def colour_binary(rgb, hls, gray):\n # combine colour binary thresholds\n r_binary = binary_threshold(rgb[:, :, 0], (220, 255))\n s_binary = binary_threshold(hls[:, :, 2], (170, 255))\n gray_binary = binary_threshold(gray, (220, 255))\n\n # set colour binary\n colour_binary = np.zeros_like(r_binary)\n colour_binary[((r_binary == 1) | (s_binary == 1) | (gray_binary == 1))] = 1\n\n return colour_binary", "title": "" }, { "docid": "7ec7842f11272fda313351108f9ab07d", "score": "0.5722496", "text": "def tests(self):\n t = matrix([[ cos(self.alpha), 0, sin(self.alpha), 0],\n [ 0, -1, 0, 0],\n [ sin(self.alpha), 0, -cos(self.alpha), self.n],\n [ 0, 0, 0, 1]])\n print(t)\n while True:\n _, raw = self.cam.read()\n h, w, _ = raw.shape\n blank_image = np.zeros_like(raw)\n raw = cv2.blur(raw, (5 ,5))\n hsv = cv2.cvtColor(raw, cv2.COLOR_BGR2HSV)\n lower = np.array([165, 107, 112])\n upper = np.array([211, 215, 223])\n mask = cv2.inRange(hsv, lower, upper)\n res = cv2.bitwise_and(raw, raw, mask=mask)\n\n v = np.median(mask)\n sigma = 0.33\n canny_low = int(max(0, (1 - sigma) * v))\n canny_high = int(min(255, (1 + sigma) * v))\n\n th = cv2.Canny(mask, 1, 250)\n # th = cv2.dilate(edges, None, iterations=2)\n # th = cv2.erode(edges, None, iterations=2)\n\n cnts, hier = cv2.findContours(mask, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)\n\n x = []\n y = []\n points = []\n for cnt in cnts:\n if cv2.contourArea(cnt) > 100:\n M = cv2.moments(cnt)\n cx = int(M['m10']/M['m00'])\n cy = int(M['m01']/M['m00'])\n x.append(cx)\n y.append(cy)\n points.append([cx, cy])\n cv2.circle(raw, (cx, cy), 2, (0, 255, 0), 2)\n\n points = np.array(points)\n\n # x = np.transpose(x)\n # y = np.transpose(y)\n # A = np.vstack([x, np.ones(len(x))]).T\n # m, c = np.linalg.lstsq(A, y)[0]\n\n rows, cols = raw.shape[:2]\n if len(points) > 0:\n [vx, vy, x, y] = cv2.fitLine(points, cv2.DIST_LABEL_PIXEL, 0, 0.01, 0.01)\n\n x_min = min(points[:,0])\n x_max = max(points[:,0])\n y_min = min(points[:,1])\n y_max = max(points[:,1])\n\n lefty = (int(x_min), int([y for x, y in points if x == x_min][0]))\n righty = (int(x_max), int([y for x, y in points if x == x_max][0]))\n\n cv2.line(raw, lefty, righty, (0, 255, 0), 2)\n\n # objGRF = lefty\n # objCRF = (self.CRF[0] - objGRF[0], self.CRF[1] - objGRF[1])\n\n objCRF = self.convertToCenterRF(lefty)\n x, y, z = self.getPoint3d(objCRF[1], objCRF[0])\n floorPoint = t * np.transpose(matrix([x, y, z, 1]))\n # print(x,y,z)\n print(np.transpose(floorPoint))\n #\n # point in a center frame\n cv2.circle(raw, (int(self.CRF[0]), int(self.CRF[1])), 5, (0, 200, 200), 2)\n # cross in a center frame\n cv2.line(raw, (0, self.CRF[1]), (self.xy0[1], self.CRF[1]), (0, 200, 200), 1)\n cv2.line(raw, (self.CRF[0], 0), (self.CRF[0], self.xy0[0]), (0, 200, 200), 1)\n\n cv2.imshow(\"raw\", raw)\n # cv2.imshow(\"th_hsv\", mask)\n # cv2.imshow('bitwise', res)\n # cv2.imshow('canny', th)\n\n if cv2.waitKey(1) & 0xFF == ord('q'):\n break\n\n self.cam.release()\n cv2.destroyAllWindows()", "title": "" }, { "docid": "59da4df5c2c72e04fe519ae494cd0499", "score": "0.5719882", "text": "def _locate_traffic_lights(self, image):\n\n image_np_expanded = np.expand_dims(image, axis=0)\n\n (boxes, scores, classes) = self.sess.run(\n [self.detection_boxes, self.detection_scores, self.detection_classes],\n feed_dict={self.image_tensor: image_np_expanded}\n )\n\n boxes = np.squeeze(boxes)\n classes = np.squeeze(classes).astype(np.int32)\n scores = np.squeeze(scores)\n\n # Select only traffic lights from all possible classes and with confidence higher than 0.8\n boxes = boxes[(classes == 10) & (scores > 0.65)]\n\n return boxes", "title": "" }, { "docid": "6085481d6e297504c213aa75418fec44", "score": "0.5719768", "text": "def detect_hsv(self):\n if self.cv_image == None:\n return\n my_image = deepcopy(self.cv_image)\n self.hsv = cv2.cvtColor(my_image, cv2.COLOR_BGR2HSV)\n thresholded = cv2.inRange(self.hsv,\n (self.hue_lower,self.saturation_lower,self.value_lower),\n (self.hue_upper,self.saturation_upper,self.value_upper))\n moments = cv2.moments(thresholded)\n if moments['m00'] != 0:\n self.center_x, self.center_y = moments['m10']/moments['m00'], moments['m01']/moments['m00']\n cv2.circle(my_image,(int(self.center_x), int(self.center_y)), 5, (255,0,0))\n cv2.imshow('hsv_tracking_window', thresholded)\n cv2.imshow(\"hsv_camera_image\", my_image)\n cv2.waitKey(20)", "title": "" }, { "docid": "257705590077d231fc27300bf6607e42", "score": "0.57138515", "text": "def __init__(self):\n\n self.__hsv_threshold_hue = [38.84891468843968, 92.96928718228388]\n self.__hsv_threshold_saturation = [175.80937019569413, 255.0]\n self.__hsv_threshold_value = [122.30215535747061, 255.0]\n\n self.hsv_threshold_output = None\n\n self.__cv_dilate_src = self.hsv_threshold_output\n self.__cv_dilate_kernel = None\n self.__cv_dilate_anchor = (-1, -1)\n self.__cv_dilate_iterations = 6.0\n self.__cv_dilate_bordertype = cv2.BORDER_CONSTANT\n self.__cv_dilate_bordervalue = (-1)\n\n self.cv_dilate_output = None\n\n self.__cv_erode_src = self.cv_dilate_output\n self.__cv_erode_kernel = None\n self.__cv_erode_anchor = (-1, -1)\n self.__cv_erode_iterations = 4.0\n self.__cv_erode_bordertype = cv2.BORDER_CONSTANT\n self.__cv_erode_bordervalue = (-1)\n\n self.cv_erode_output = None\n\n self.__find_contours_input = self.cv_erode_output\n self.__find_contours_external_only = False\n\n self.find_contours_output = None\n\n self.__filter_contours_contours = self.find_contours_output\n self.__filter_contours_min_area = 1500.0\n self.__filter_contours_min_perimeter = 0.0\n self.__filter_contours_min_width = 0.0\n self.__filter_contours_max_width = 1000.0\n self.__filter_contours_min_height = 0.0\n self.__filter_contours_max_height = 1000.0\n self.__filter_contours_solidity = [0, 100.0]\n self.__filter_contours_max_vertices = 1000000.0\n self.__filter_contours_min_vertices = 0.0\n self.__filter_contours_min_ratio = 0.0\n self.__filter_contours_max_ratio = 1000.0\n\n self.filter_contours_output = None", "title": "" }, { "docid": "40a3673ff1b034a1b79e91bc9bb140d8", "score": "0.570974", "text": "def canny(img, low_threshold, high_threshold):\n return cv2.Canny(img, low_threshold, high_threshold)", "title": "" }, { "docid": "40a3673ff1b034a1b79e91bc9bb140d8", "score": "0.570974", "text": "def canny(img, low_threshold, high_threshold):\n return cv2.Canny(img, low_threshold, high_threshold)", "title": "" }, { "docid": "40a3673ff1b034a1b79e91bc9bb140d8", "score": "0.570974", "text": "def canny(img, low_threshold, high_threshold):\n return cv2.Canny(img, low_threshold, high_threshold)", "title": "" }, { "docid": "40a3673ff1b034a1b79e91bc9bb140d8", "score": "0.570974", "text": "def canny(img, low_threshold, high_threshold):\n return cv2.Canny(img, low_threshold, high_threshold)", "title": "" }, { "docid": "40a3673ff1b034a1b79e91bc9bb140d8", "score": "0.570974", "text": "def canny(img, low_threshold, high_threshold):\n return cv2.Canny(img, low_threshold, high_threshold)", "title": "" }, { "docid": "40a3673ff1b034a1b79e91bc9bb140d8", "score": "0.570974", "text": "def canny(img, low_threshold, high_threshold):\n return cv2.Canny(img, low_threshold, high_threshold)", "title": "" }, { "docid": "40a3673ff1b034a1b79e91bc9bb140d8", "score": "0.570974", "text": "def canny(img, low_threshold, high_threshold):\n return cv2.Canny(img, low_threshold, high_threshold)", "title": "" }, { "docid": "40a3673ff1b034a1b79e91bc9bb140d8", "score": "0.570974", "text": "def canny(img, low_threshold, high_threshold):\n return cv2.Canny(img, low_threshold, high_threshold)", "title": "" }, { "docid": "40a3673ff1b034a1b79e91bc9bb140d8", "score": "0.570974", "text": "def canny(img, low_threshold, high_threshold):\n return cv2.Canny(img, low_threshold, high_threshold)", "title": "" }, { "docid": "40a3673ff1b034a1b79e91bc9bb140d8", "score": "0.570974", "text": "def canny(img, low_threshold, high_threshold):\n return cv2.Canny(img, low_threshold, high_threshold)", "title": "" }, { "docid": "40a3673ff1b034a1b79e91bc9bb140d8", "score": "0.570974", "text": "def canny(img, low_threshold, high_threshold):\n return cv2.Canny(img, low_threshold, high_threshold)", "title": "" }, { "docid": "40a3673ff1b034a1b79e91bc9bb140d8", "score": "0.570974", "text": "def canny(img, low_threshold, high_threshold):\n return cv2.Canny(img, low_threshold, high_threshold)", "title": "" }, { "docid": "40a3673ff1b034a1b79e91bc9bb140d8", "score": "0.570974", "text": "def canny(img, low_threshold, high_threshold):\n return cv2.Canny(img, low_threshold, high_threshold)", "title": "" }, { "docid": "40a3673ff1b034a1b79e91bc9bb140d8", "score": "0.570974", "text": "def canny(img, low_threshold, high_threshold):\n return cv2.Canny(img, low_threshold, high_threshold)", "title": "" }, { "docid": "40a3673ff1b034a1b79e91bc9bb140d8", "score": "0.570974", "text": "def canny(img, low_threshold, high_threshold):\n return cv2.Canny(img, low_threshold, high_threshold)", "title": "" }, { "docid": "40a3673ff1b034a1b79e91bc9bb140d8", "score": "0.570974", "text": "def canny(img, low_threshold, high_threshold):\n return cv2.Canny(img, low_threshold, high_threshold)", "title": "" }, { "docid": "40a3673ff1b034a1b79e91bc9bb140d8", "score": "0.570974", "text": "def canny(img, low_threshold, high_threshold):\n return cv2.Canny(img, low_threshold, high_threshold)", "title": "" }, { "docid": "40a3673ff1b034a1b79e91bc9bb140d8", "score": "0.570974", "text": "def canny(img, low_threshold, high_threshold):\n return cv2.Canny(img, low_threshold, high_threshold)", "title": "" }, { "docid": "40a3673ff1b034a1b79e91bc9bb140d8", "score": "0.570974", "text": "def canny(img, low_threshold, high_threshold):\n return cv2.Canny(img, low_threshold, high_threshold)", "title": "" }, { "docid": "40a3673ff1b034a1b79e91bc9bb140d8", "score": "0.570974", "text": "def canny(img, low_threshold, high_threshold):\n return cv2.Canny(img, low_threshold, high_threshold)", "title": "" }, { "docid": "40a3673ff1b034a1b79e91bc9bb140d8", "score": "0.570974", "text": "def canny(img, low_threshold, high_threshold):\n return cv2.Canny(img, low_threshold, high_threshold)", "title": "" }, { "docid": "7a0ece87640c82daad333dd45bb5acd7", "score": "0.57075876", "text": "def vis_detections(im, class_name, dets, thresh=0.3):\n inds = np.where(dets[:, -1] >= thresh)[0]\n if len(inds) == 0:\n return\n im = im[0,:,:,:]\n for i in inds:\n bbox = dets[i, :4]\n score = dets[i, -1]\n print(\"%.2f confident that there is a %s at \" % (score, class_name))\n print(bbox)\n y = int(bbox[0])\n x = int(bbox[1])\n\n y2 = int(bbox[2])\n x2 = int(bbox[3])\n im[x:x2,y,0] = 255\n im[x:x2,y,1] = 0\n im[x:x2,y,1] = 0\n im[x:x2,y2,0] = 255\n im[x:x2,y2,1] = 0\n im[x:x2,y2,1] = 0\n\n im[x,y:y2,0] = 255\n im[x,y:y2,1] = 0\n im[x,y:y2,2] = 0\n im[x2,y:y2,0] = 255\n im[x2,y:y2,1] = 0\n im[x2,y:y2,2] = 0\n return im #imsave(out_file, im)", "title": "" }, { "docid": "864c77fb542976e222ba5dcaa951fec9", "score": "0.57019925", "text": "def apply_ground_filter(img):\n\n hsv = cv2.cvtColor(img, cv2.COLOR_BGR2HSV)\n lower_yellow = numpy.array([0, 200, 0])\n upper_yellow = numpy.array([100, 255, 255])\n mask = cv2.inRange(hsv, lower_yellow, upper_yellow)\n return mask", "title": "" }, { "docid": "2654af1ded9953dd39d001627214a858", "score": "0.56972146", "text": "def vis_detections(im, class_name, dets, thresh=0.5):\n inds = np.where(dets[:, -1] >= thresh)[0]\n if len(inds) == 0:\n return\n\n# im = im[:, :, (2, 1, 0)]\n \n\n # fig, ax = plt.subplots(figsize=(12, 12))\n# ax.imshow(im, aspect='equal')\n for i in inds:\n bbox = dets[i, :4]\n score = dets[i, -1]\n\n cv2.rectangle(im,(int(bbox[0]), int(bbox[1])),\n (int(bbox[2]),int(bbox[3])),\n (0,255,0),3)", "title": "" }, { "docid": "5c9aef3fde375120d1d2659faefa8564", "score": "0.5694149", "text": "def findLightSources(frame,threshold):\n \n # load the image, convert it to grayscale, and blur it\n gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)\n blurred = cv2.GaussianBlur(gray, (11, 11), 0) # Blurring eliminates noise\n\n # threshold the image to reveal light regions in the blurred image\n thresh = cv2.threshold(blurred, threshold, 255, cv2.THRESH_BINARY)[1]\n\n # perform a series of erosions and dilations to remove any small blobs of noise from the thresholded image\n thresh = cv2.erode(thresh, None, iterations=2)\n thresh = cv2.dilate(thresh, None, iterations=4)\n\n # perform a connected component analysis on the thresholded image, then initialize a mask to store only the \"large\" components\n labels = measure.label(thresh, neighbors=8, background=0)\n mask = np.zeros(thresh.shape, dtype=\"uint8\")\n\n sources = []\n # loop over the unique components\n for label in np.unique(labels):\n # if this is the background label, ignore it\n## if label == 0:\n## continue\n\n # otherwise, construct the label mask and count the number of pixels \n labelMask = np.zeros(thresh.shape, dtype=\"uint8\")\n labelMask[labels == label] = 255\n numPixels = cv2.countNonZero(labelMask)\n\n # if the number of pixels in the component is sufficiently large, then add it to our mask of \"large blobs\"\n if numPixels < 250000: # 300 pixels for large blob (arbitary, needs experimentation)\n mask = cv2.add(mask, labelMask)\n \n # find the contours in the mask, then sort them from left to right\n cnts = cv2.findContours(mask.copy(), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)\n cnts = cnts[0] if imutils.is_cv2() else cnts[1]\n cnts = contours.sort_contours(cnts)[0]\n \n # loop over the contours, finding the center of each source\n for (i, c) in enumerate(cnts):\n # Bound the sources\n (x, y, w, h) = cv2.boundingRect(c)\n ((cX, cY), radius) = cv2.minEnclosingCircle(c)\n sources.append([(cX, cY), radius,(x,y,w,h)])\n \n # return the list of sources\n return sources", "title": "" }, { "docid": "add125d79ede71372e6f572822ac91fe", "score": "0.5690292", "text": "def canny(img, low_threshold, high_threshold):\n\treturn cv2.Canny(img, low_threshold, high_threshold)", "title": "" }, { "docid": "6f266b6dce1885d16e667cf2bd33338b", "score": "0.56807655", "text": "def detect(self, source, target): \n \n foundLocations, foundWeights = self.hog.detectMultiScale(source, hitThreshold=self.hitThreshold,\n winStride=self.winStride, padding=self.padding,\n scale=self.scale, finalThreshold=self.finalThreshold,\n useMeanshiftGrouping=self.useMeanshiftGrouping)\n foundLocationsFiltered = []\n # At least one person detected?\n if len(foundLocations) > 0:\n # Filter out inside rectangles\n for ri, r in enumerate(foundLocations):\n for qi, q in enumerate(foundLocations):\n if ri != qi and self.inside(r, q):\n break\n else:\n # See if we should ignore any areas\n if self.ignoreAreas == None:\n foundLocationsFiltered.append(r)\n elif not self.insideIgnoreAreas(r):\n foundLocationsFiltered.append(r)\n # Mark objects (make sure to copy target image if you want to keep original image intact)\n if self.markObjects == True:\n self.mark(source, target, foundLocations, self.widthMultiplier, self.heightMultiplier, self.boxColor)\n self.mark(source, target, foundLocationsFiltered, self.widthMultiplier, self.heightMultiplier, self.filteredBoxColor)\n if self.ignoreAreas != None: \n self.mark(source, target, self.ignoreAreas, self.widthMultiplier, self.heightMultiplier, self.ignoreAreasBoxColor)\n # Return filtered results\n return foundLocationsFiltered", "title": "" }, { "docid": "8257bc05d1244ae8c8ad5b6344e5facb", "score": "0.5668183", "text": "def hsv_thresh(img, hsv_rangeLower = np.array([10, 100, 100]), hsv_rangeUpper = np.array([22, 230, 255])):\n\thsv = cv2.cvtColor(img, cv2.COLOR_BGR2HSV)\n\thsv_temp = cv2.inRange(hsv, hsv_rangeLower, hsv_rangeUpper)\n\tbinary_output = np.zeros_like(hsv_temp)\n\tbinary_output[(hsv_temp != 0)] = 1\n\treturn binary_output", "title": "" }, { "docid": "138b60953d822180f4c00f7b90da0e5b", "score": "0.5665024", "text": "def emphasize_targets(self, image, target_obj_list, threshold=0.60):\n # TODO: make color_seg_image label current working target.\n image_copybgr = image.copy()[..., ::-1]\n output_dict = self.run_inference_for_single_image(image_copybgr)\n boxes = output_dict['detection_boxes']\n classes = output_dict['detection_classes']\n scores = output_dict['detection_scores']\n masks = output_dict.get('detection_masks')\n\n idx = np.where(scores >= threshold)\n classes = classes[idx] # this is an array of objects that is detected\n masks = masks[idx]\n # boxes = boxes[idx]\n\n # target object를 masking하는 과정\n [obj_masknum, height, width] = masks.shape\n seg = np.zeros((height, width), dtype=np.uint8)\n for x, i in enumerate(classes):\n idx = np.where(masks[x] != 0)\n seg[idx] = i\n\n # changed code snippet from convert_color_seg\n height = seg.shape[0]\n width = seg.shape[1]\n channel = IMAGE_CHANNEL\n\n shape = (height, width, channel)\n color_label = np.zeros(shape=shape, dtype=np.uint8)\n\n for target_obj in target_obj_list:\n list0 = np.where(seg == target_obj)\n color = RL_Obj_List[target_obj][1]\n color_label[list0[0], list0[1]] = color\n\n color_label = cv2.resize(color_label, (int(shape[1] / 2), int(shape[0] / 2)))\n cv2.imshow(\"color_seg_show\", color_label)\n cv2.moveWindow(\"color_seg_show\", 0, 390)\n cv2.waitKey(1)", "title": "" }, { "docid": "d7031fbdaa8534234d84b9ea6bc2b300", "score": "0.56617", "text": "def mask_background(img):\n\n thr1 = np.abs(img - img[10, 10]).sum(2)\n thr2 = np.abs(img - img[-10, -10]).sum(2)\n thr3 = np.abs(img - img[10, -10]).sum(2)\n thr4 = np.abs(img - img[-10, 10]).sum(2)\n thr = np.dstack((thr1, thr2, thr3, thr4)).min(2)\n\n mask = np.where(thr > 90, 1, 0).astype(\"uint8\")\n mask = mask * 255\n\n return img, mask", "title": "" }, { "docid": "9b029cbd8fd81aa8b5e9a0ee36849218", "score": "0.5657228", "text": "def binarize(img):\n \n gray_img = img_as_ubyte(rgb2gray(img))\n height, width = gray_img.shape\n num_mid_gray = np.sum((gray_img < 240) & (gray_img > 15))\n if num_mid_gray < 0.1 * height * width:\n # Global Thresholding\n thresh = threshold_otsu(gray_img)\n bw_img = gray_img > thresh\n\n else:\n # Adaptive Thresholding\n if height * width >= 2000 * 1000:\n gray_img = cv2.GaussianBlur(gray_img, ksize=(11, 11), sigmaX=3, sigmaY=3)\n \n # Set the window size of the filter based on image dimensions\n win_size = int(round(min(height/60, width/60)))\n \n # Get the window mean of each pixel by filtering using an averaging filter\n window_means = rank.mean(gray_img, np.ones((win_size, win_size)))\n print gray_img.dtype\n \n # Remove the mean and threshold. Also inverts the image.\n demeaned = window_means.astype(np.float32) - gray_img.astype(np.float32) - 10\n demeaned[demeaned > 0] = 1.0\n demeaned[demeaned <= 0] = 0.0\n demeaned = img_as_float(demeaned)\n bw_img = img_as_ubyte(demeaned)\n\n # Remove small noise pixels.\n noise_size = int(0.0001 * height * width)\n bw_img = remove_small_objects(img_as_bool(bw_img), noise_size, connectivity=2)\n \n # Close gaps in edges\n bw_img = binary_closing(bw_img, square(4))\n \n # Fill small holes (less than 5% of area of image)\n hole_size = int(0.0005 * height * width)\n bw_img = remove_small_holes(img_as_bool(bw_img), area_threshold=hole_size)\n \n # Return image to original polarity.\n bw_img = ~bw_img\n\n bw_img = img_as_ubyte(bw_img)\n # plt.figure()\n # plt.imshow(bw_img, cmap=\"gray\")\n # plt.show()\n return bw_img", "title": "" }, { "docid": "c897e7ac1035aa900b65bc6880259f2c", "score": "0.56552666", "text": "def find_color_percentages(img_array):\n\n\n white_saturation_max = 36 # maximum S value for a white pixel14 from the gimp , 14*100/255\n white_value_min = 214 # minimum V value for a white pixel this is 84 * 100/255\n black_value_max = 23 # maximum V value for a black pixel, 15*100/255\n n_colors=10\n color_limits=range(0,180+int(180/n_colors),int(180/n_colors))\n #print(color_limits)\n\n white_count=0\n black_count=0\n grey_count=0\n\n mhue=0\n mval=0\n msat=0\n\n t0=time.time()\n h_arr=hsv[:,:,0]\n s_arr=hsv[:,:,1]\n v_arr=hsv[:,:,2]\n #ways to count array elements fitting thresholds\n # np.sum(myarray >= thresh)\n#np.size(np.where(np.reshape(myarray,-1) >= thresh))\n#fast array way to do same calculation\n\n #slow pixel by pixel way\n\n# t0=time.time()\n# for x in range(r[2]):\n# for y in range(r[3]):\n# mhue=hsv[x,y, 0] #\n# msat=hsv[x,y, 1] #\n# mval=hsv[x,y, 2] #the hsv values of current pixel\n# # print('x,y:'+str(x)+','+str(y)+' hue:'+str(mhue)+' val:'+str(mval)+' sat:'+str(msat))\n# if mval<black_value_max:\n# black_count+=1\n# elif msat<white_saturation_max:\n # if mval>white_value_min:\n # white_count+=1\n # else:\n # grey_count+=1\n # t1=time.time()\n # print('whitecount:'+str(white_count)+ 'greycount:'+str(grey_count)+' blackcount:'+str(black_count)+' dt:'+str(t1-t0)+' area'+str(area))#\n\n mask[0] = image[0]==0\n mask[1] = image[1]==0\n mask[2] = image[2]==0\n\n masksofi = mask[0] * mask[1] * mask[2]\n\n black_count=np.sum(v_arr<black_value_max)\n black_percentage=black_count/area\n\n\n white_mask=(s_arr<white_saturation_max) *(v_arr>white_value_min)\n\n\n\n\n white_count=np.sum(white_mask)\n white_percentage=white_count/area\n grey_count=np.sum((s_arr<white_saturation_max) *( v_arr<=white_value_min) *( v_arr>=black_value_max))\n grey_percentage=grey_count/area\n inv=np.invert(white_mask)\n color_mask=(np.invert(white_mask))*(v_arr>=black_value_max)\n colors_count=np.sum(color_mask)\n print(\"tot color count:\"+str(tot_colors))\n color_counts=[]\n for i in range(0,n_colors):\n color_percentages.append(np.sum( color_mask*(h_arr<color_limits[i+1])*(h_arr>=color_limits[i])))\n if DEBUG:\n print('color '+str(i)+' count ='+str(color_percentages[i]))\n print('color percentages:'+str(color_percentages))\n color_percentages[i]=color_percentages[i]/area #turn pixel count into percentage\n all_colors=np.zeros(3)\n all_colors[0]=white_percentage\n all_colors[1]=black_percentage\n all_colors[2]=grey_percentage\n all_colors=np.append(all_colors,color_counts)\n\n # all_colors=np.concatenate(all_colors,color_counts)\n if DEBUG:\n print('white black grey colors:'+str(all_colors)) #order is : white, black, grey, color_count[0]...color_count[n_colors]\n print('sum:'+str(np.sum(all_colors)))\n # all_colors=color_counts\n # np.append(all_colors,white_count)\n # np.append(all_colors,black_count)\n # all_colors.append(grey_count)\n\n #dominant_color_indices, dominant_colors = zip(*sorted(enumerate(all_colors), key=itemgetter(1), reverse=True))\n #above is for array, now working with numpy aray\n\n# the order of dominant colors is what ccny guys used, if we just have vector in order of color i think its just as good\n#so for now the following 3 lines are not used\n dominant_color_indices=np.argsort(all_colors, axis=-1, kind='quicksort', order=None)\n dominant_color_indices = dominant_color_indices[::-1]\n dominant_color_percentages=np.sort(all_colors, axis=-1, kind='quicksort', order=None)\n dominant_color_percentages = dominant_color_percentages[::-1]\n\n if DEBUG:\n print('color percentages:'+str(dominant_color_percentages)+' indices:'+str(dominant_color_indices))\n t1=time.time()\n return(all_colors)", "title": "" }, { "docid": "44e74dd3dc504f0c15d593b7e2c486ce", "score": "0.56493545", "text": "def hls_select(img, thresh=(0, 255)):\n hls = cv2.cvtColor(img, cv2.COLOR_RGB2HLS)\n s_channel = hls[:,:,2]\n binary_output = np.zeros_like(s_channel)\n binary_output[(s_channel > thresh[0]) & (s_channel <= thresh[1])] = 1\n\n return binary_output", "title": "" } ]
b3e43d797607acdf8b5629062592b977
Construct a new Colorscale object
[ { "docid": "f8dd8b3727da32f08a1779c47d6a6a6b", "score": "0.7413053", "text": "def __init__(\n self,\n arg=None,\n diverging=None,\n sequential=None,\n sequentialminus=None,\n **kwargs\n ):\n super(Colorscale, self).__init__('colorscale')\n\n # Validate arg\n # ------------\n if arg is None:\n arg = {}\n elif isinstance(arg, self.__class__):\n arg = arg.to_plotly_json()\n elif isinstance(arg, dict):\n arg = copy.copy(arg)\n else:\n raise ValueError(\n \"\"\"\\\nThe first argument to the plotly.graph_objs.layout.Colorscale \nconstructor must be a dict or \nan instance of plotly.graph_objs.layout.Colorscale\"\"\"\n )\n\n # Handle skip_invalid\n # -------------------\n self._skip_invalid = kwargs.pop('skip_invalid', False)\n\n # Import validators\n # -----------------\n from plotly.validators.layout import (colorscale as v_colorscale)\n\n # Initialize validators\n # ---------------------\n self._validators['diverging'] = v_colorscale.DivergingValidator()\n self._validators['sequential'] = v_colorscale.SequentialValidator()\n self._validators['sequentialminus'\n ] = v_colorscale.SequentialminusValidator()\n\n # Populate data dict with properties\n # ----------------------------------\n _v = arg.pop('diverging', None)\n self['diverging'] = diverging if diverging is not None else _v\n _v = arg.pop('sequential', None)\n self['sequential'] = sequential if sequential is not None else _v\n _v = arg.pop('sequentialminus', None)\n self['sequentialminus'\n ] = sequentialminus if sequentialminus is not None else _v\n\n # Process unknown kwargs\n # ----------------------\n self._process_kwargs(**dict(arg, **kwargs))\n\n # Reset skip_invalid\n # ------------------\n self._skip_invalid = False", "title": "" } ]
[ { "docid": "d2823f7261d8f63a796bdd0389487e34", "score": "0.63869935", "text": "def __init__(self, z_scale=1., y_scale=1., x_scale=1.):\n \n self.z_scale = z_scale\n self.y_scale = y_scale\n self.x_scale = x_scale", "title": "" }, { "docid": "56efac36e43d5f479cb58332ab8fa554", "score": "0.6211446", "text": "def __init__(self, colors, minValue, maxValue):\n self.colors = colors\n self.minValue = minValue\n self.maxValue = maxValue", "title": "" }, { "docid": "069cabe0311d57054fd5f53d4437f520", "score": "0.6159893", "text": "def __init__(self, scale=['#FFFFFF', '#000000'], opacity=1.0, domain=None):\r\n self.scale = scale\r\n self.opacity = opacity\r\n self.domain = domain", "title": "" }, { "docid": "1abaf669ca9d70a5bb6b0672c0bf467d", "score": "0.61494064", "text": "def __init__(self, color, discrete=True, k=None):\n if color not in cmaps.keys():\n msg = \"`color` option not a valid option.\"\n raise ValueError(msg)\n if (k is not None) and (not discrete):\n msg = \"`k` only specified (optionally) for discrete colormaps.\"\n raise ValueError(msg)\n self.scale = color\n self.color = cmaps[color]\n self.discrete = discrete\n self.k = k\n kwargs = {}\n kwargs[\"as_cmap\"] = not self.discrete\n if k is not None:\n kwargs[\"n_colors\"] = self.k\n self.palette = sns.color_palette(self.color, **kwargs)", "title": "" }, { "docid": "10e8510f7ea0ed4471d7911d901f0941", "score": "0.61062396", "text": "def __init__(self, red=MIN_DIM, green=MIN_DIM, blue=MIN_DIM):\r\n\r\n # Check the red channel\r\n if red > RGBColor.MAX_DIM:\r\n self._red = RGBColor.MAX_DIM\r\n elif red < RGBColor.MIN_DIM:\r\n self._red = RGBColor.MIN_DIM\r\n else:\r\n self._red = red\r\n\r\n # Check the green channel\r\n if green > RGBColor.MAX_DIM:\r\n self._green = RGBColor.MAX_DIM\r\n elif green < RGBColor.MIN_DIM:\r\n self._green = RGBColor.MIN_DIM\r\n else:\r\n self._green = green\r\n\r\n # Check the blue channel\r\n if blue > RGBColor.MAX_DIM:\r\n self._blue = RGBColor.MAX_DIM\r\n elif blue < RGBColor.MIN_DIM:\r\n self._blue = RGBColor.MIN_DIM\r\n else:\r\n self._blue = blue", "title": "" }, { "docid": "74c08b76af6d033394650797cde19765", "score": "0.60916626", "text": "def test_create_colourscale() -> None:\n colourscale = sleplet.plot_methods._convert_colourscale(\n cmocean.cm.ice,\n pl_entries=PL_ENTRIES,\n )\n np.testing.assert_equal(len(colourscale), PL_ENTRIES)", "title": "" }, { "docid": "6e3eac9fa39252f136222af7a4820b2d", "score": "0.6062424", "text": "def __init__(self, *v, **k):\n if v and isinstance(v[0], (list, tuple)): # Color(list)\n v = v[0]\n if len(v) == 0: # Color()\n r, g, b, a = 0, 0, 0, 0\n elif len(v) == 1 and v[0] is None: # Color(None)\n r, g, b, a = 0, 0, 0, 0\n elif len(v) == 1 and isinstance(v[0], Color): # Color(Color)\n r, g, b, a = v[0].r, v[0].g, v[0].b, v[0].a\n elif len(v) == 1: # Color(k)\n r, g, b, a = v[0], v[0], v[0], 1\n elif len(v) == 2: # Color(k, a)\n r, g, b, a = v[0], v[0], v[0], v[1]\n elif len(v) == 3: # Color(r, g, b)\n r, g, b, a = v[0], v[1], v[2], 1\n elif len(v) == 4: # Color(r, g, b, a)\n r, g, b, a = v[0], v[1], v[2], v[3]\n if k.get('mode') == HSB: # Color(h, s, b, a, mode=HSB)\n r, g, b = colorsys.hsv_to_rgb(r, g, b)\n\n n = k.get('base', 1)\n\n self.r = float(r) / n\n self.g = float(g) / n\n self.b = float(b) / n\n self.a = float(a) / n", "title": "" }, { "docid": "953edf8f601286bc2952bd1b21c587f5", "score": "0.60028166", "text": "def __init__(self, scale, bases, powers):\n\n if scale == 1.:\n scale = 1\n\n self._scale = scale\n self._bases = bases\n self._powers = powers", "title": "" }, { "docid": "5503c45c4e5c4b1c2182d3fbfc8f51d8", "score": "0.5998762", "text": "def __init__(self, *args):\n _gce.gce_MakeScale_swiginit(self,_gce.new_gce_MakeScale(*args))", "title": "" }, { "docid": "ecb2f9e1fd971e2901ea56650cbf6218", "score": "0.59615386", "text": "def generate_colormap(scale_range=(0.0, 1.0), hue_range=(0.8, 0.0),\n saturation_range=(1.0, 1.0), value_range=(0.8, 0.8)):\n lookup_table = vtk.vtkLookupTable()\n lookup_table.SetRange(scale_range)\n\n lookup_table.SetHueRange(hue_range)\n lookup_table.SetSaturationRange(saturation_range)\n lookup_table.SetValueRange(value_range)\n lookup_table.Build()\n return lookup_table", "title": "" }, { "docid": "4a20a9d40882ad859ee398354143a3ad", "score": "0.5944037", "text": "def __new__(cls, r, g, b, a=1.0):\n # Explicitly use floats here to disambiguate\n # between the two TColor constructors\n color = ROOT.TColor(float(r), float(g), float(b), float(a))\n self = int.__new__(cls, color.GetNumber())\n self.object = color\n return self", "title": "" }, { "docid": "77ae5394a30a15e5a3cdb989a7d56ced", "score": "0.593255", "text": "def __init__(self, width, height, scale):\r\n\t\tself.surface = cairo.ImageSurface(cairo.FORMAT_ARGB32, width*scale, height*scale)\r\n\t\tcontext = cairo.Context(self.surface)\r\n\t\tcontext.scale(scale, scale)\r\n\t\tself.context = context\r\n\t\tself.width = width\r\n\t\tself.height = height\r\n\t\tself.scale = scale", "title": "" }, { "docid": "d1a3cb8001aaceb914171bf2ee1ba4b5", "score": "0.58977276", "text": "def colormap():", "title": "" }, { "docid": "123258a59ff52123b6a2788eea733476", "score": "0.58662254", "text": "def __init__(self, data, offset):\n self.red = CIEXYZ(data, offset)\n self.green = CIEXYZ(data, offset + 12)\n self.blue = CIEXYZ(data, offset + 24)", "title": "" }, { "docid": "667f521e3528a7ea2e97519fd1423c7c", "score": "0.5830659", "text": "def __init__(self, *args, **kwargs):\n code = library.get(\"transforms/linear-scale.glsl\")\n QuantitativeScale.__init__(self, code, *args, **kwargs)", "title": "" }, { "docid": "a5c13b0e7087a3d83fd5bc10c16193fb", "score": "0.58139664", "text": "def scale(**attrs):\n return Scale(attrs)", "title": "" }, { "docid": "d36aee25db8acb28af3e4a9b00daae26", "score": "0.5806679", "text": "def __init__(self,data,color,alpha=1.,normalize=True):\n\n self.color = color\n\n self.alpha = alpha\n self.alpha_premultiplied = False\n\n self.data = data\n self.ny, self.nx = self.data.shape\n\n if normalize == True:\n self.data /= self.data.max()\n\n self.construct_rgba()", "title": "" }, { "docid": "6043260f105376df8850a998de1ae1f5", "score": "0.5778558", "text": "def __init__(self, colors, minval=0., default=None):\n self.colors = colors\n for i in range(len(self.colors)):\n c1,c2,maxval = self.colors[i]\n if i > 0:\n minval = self.colors[i-1][3]\n delta = maxval - minval\n self.colors[i] = [c1,c2,minval,maxval,delta]\n self.default = self.colors[0][0]", "title": "" }, { "docid": "352757d3a5c3e3f123b90e2826804119", "score": "0.57447696", "text": "def __init__(self, scale=0., like_normal=False):\n self.like_normal = like_normal\n self.scale = scale", "title": "" }, { "docid": "eec3d6f20442d216832107b18b143dd0", "score": "0.5729346", "text": "def __init__(self, sourceColor, normalized =False, opacity =1.0):\n super(ColorQValue, self).__init__(sourceColor, normalized, opacity)", "title": "" }, { "docid": "9682018ed90f0cbed17ab9500093157e", "score": "0.5710763", "text": "def __init__(self, color=np.array([0.5, 0.5, 0.5]),\n k_a=1.0, k_d=1.0, k_s = 1.0, alpha=1.0,\n smooth=False, wireframe=False):\n self._color = color\n self._k_a = k_a\n self._k_d = k_d\n self._k_s = k_s \n self._alpha = alpha\n self._smooth = smooth\n self._wireframe = wireframe", "title": "" }, { "docid": "0bba8e018eb893a2e4bd01772ac814de", "score": "0.5698161", "text": "def __init__(self,\n create_scale,\n create_offset,\n eps=1e-5,\n scale_init=None,\n offset_init=None,\n data_format=\"channels_last\",\n name=None):\n if utils.get_channel_index(data_format) == 1:\n axis = slice(2, None)\n else: # channel_index = -1\n axis = slice(1, -1)\n super(InstanceNorm, self).__init__(\n axis=axis,\n create_scale=create_scale,\n create_offset=create_offset,\n eps=eps,\n scale_init=scale_init,\n offset_init=offset_init,\n name=name)", "title": "" }, { "docid": "e7156eca8339a6e35f1466c9a04d9965", "score": "0.5695339", "text": "def __new__(cls, r, g, b, a):\n \n if r < 0.0 or r > 1.0:\n r = min(fabs(r)/255.0, 1.0)\n if g < 0.0 or g > 1.0:\n g = min(fabs(g)/255.0, 1.0)\n if b < 0.0 or b > 1.0:\n b = min(fabs(b)/255.0, 1.0)\n if a < 0.0 or a > 1.0:\n a = min(fabs(a)/255.0, 1.0)\n \n obj = np.asarray((r, g, b, a), FLOAT32).view(cls)\n return obj", "title": "" }, { "docid": "b12a6e40d3fffdb0ca0ae27a1141be1b", "score": "0.5690426", "text": "def __init__(\n self,\n name=None,\n shape=None,\n scale=1.0,\n bias=None,\n color_layout=ColorLayout.RGB,\n channel_first=None,\n ):\n super(ImageType, self).__init__(name, shape)\n self.scale = scale\n msg = \"color_layout should be an enum of type ct.colorlayout, i.e. one of: \" \\\n \"{ct.colorlayout.RGB, ct.colorlayout.BGR, \" \\\n \"ct.colorlayout.GRAYSCALE, ct.colorlayout.GRAYSCALE_FLOAT16}\"\n if not (isinstance(color_layout, str) or isinstance(color_layout, ColorLayout)):\n raise ValueError(msg)\n if isinstance(color_layout, str):\n if color_layout not in (\"G\", \"RGB\", \"BGR\"):\n raise ValueError(msg)\n color_layout = ColorLayout(color_layout)\n\n self.color_layout = color_layout\n if color_layout == ColorLayout.GRAYSCALE_FLOAT16:\n self.dtype = types.fp16\n if bias is None:\n if color_layout in (ColorLayout.GRAYSCALE, ColorLayout.GRAYSCALE_FLOAT16):\n self.bias = 0.0\n else:\n self.bias = [0.0, 0.0, 0.0]\n else:\n self.bias = bias\n self.channel_first = channel_first", "title": "" }, { "docid": "8d7a7ab38651b0f7bc8ab46984841421", "score": "0.56696624", "text": "def __init__(self,x,y,width,height,c):\n assert type(x) == int or type(x) == float\n assert type(y) == int or type(y) == float\n assert type(width) == int or type(width) == float\n assert type(height) == int or type(height) == float\n assert isinstance(c, colormodel.RGB)\n \n GRectangle.__init__(self,x = x,y = y,width = width,height = height, fillcolor = c)", "title": "" }, { "docid": "67e2bea545b7ffe46a145d69a9c6c558", "score": "0.56671935", "text": "def __init__(self, sigma=0.0, scale=1.0):\n self.sigma = sigma\n self.scale = scale", "title": "" }, { "docid": "3d9ab0e9341f3d3e2829818a550d5588", "score": "0.5662489", "text": "def __init__(self, width,height,color):\n self.width = width\n self.height = height\n self.x = WIDTH//2\n self.y = HEIGHT - self.height\n self.color = color", "title": "" }, { "docid": "6fc47c2e5c7fab5b6ba98b7a77ab3c32", "score": "0.565137", "text": "def __new__(cls, r, g, b):\n \n if r < 0.0 or r > 1.0:\n r = min(fabs(r)/255.0, 1.0)\n if g < 0.0 or g > 1.0:\n g = min(fabs(g)/255.0, 1.0)\n if b < 0.0 or b > 1.0:\n b = min(fabs(b)/255.0, 1.0)\n \n obj = np.asarray((r, g, b), FLOAT32).view(cls)\n return obj", "title": "" }, { "docid": "0af0612f1d6f96744619532353ec37aa", "score": "0.56107295", "text": "def _colors(self, c):\n if c == -1:\n return (0, 0)\n else:\n return self.colorscale[c % len(self.colorscale)]", "title": "" }, { "docid": "2a5a5e5aea86b1a1162130b353e0aae8", "score": "0.5603066", "text": "def newDimensionScaling(self, **attrlinks):\n return DimensionScaling(self, **attrlinks)", "title": "" }, { "docid": "af949bc29e4867e8f50762779c88f87a", "score": "0.5584126", "text": "def shapecustom_ScaleShape(*args):\n return _ShapeCustom.shapecustom_ScaleShape(*args)", "title": "" }, { "docid": "07a799b5ca63ebe3b24af15b97bf7dd6", "score": "0.5571931", "text": "def __init__(self,position,size=[1,1,1],colors=[]):\n self.position=position\n self.size=size\n self.colors=colors", "title": "" }, { "docid": "e856a71762dfc4e4b68796dd4ce5b0ec", "score": "0.5553334", "text": "def __init__(self, C=0.0, M=0.0, Y=0.0, K=1.0, *args, **kwargs): # default: Black\n cB.__init__(self, *args, **kwargs)\n self.type = 'CMYK' # can be used instead of isinstance on an object\n self.C, self.M, self.Y, self.K = C, M, Y, K", "title": "" }, { "docid": "4022f45a6a7582ba91d83ea86602c0e1", "score": "0.5551198", "text": "def _gen_scale(self, scaleName, from_, to, color, label, xpos, ypos, res, funcName, startValue):\n self.scaleDict[scaleName] = tk.Scale(self.canvas, from_=from_, to=to, orient=tk.HORIZONTAL, bg=\"black\",\n fg=\"white\", resolution=res, troughcolor=\"black\", highlightbackground=color, label=label)\n self.scaleDict[scaleName].bind(\n \"<ButtonRelease-1>\", lambda event, fn=funcName: self.draw(event.widget.get(), funcName))\n self.draw_rect(xpos, ypos, self.scaleWidth, self.scaleHeight, color)\n self.scaleDict[scaleName].place(\n x=xpos + 1, y=ypos + 1, width=self.scaleWidth - 1, height=self.scaleHeight-1)\n self.scaleDict[scaleName].set(startValue)", "title": "" }, { "docid": "9cdc325cc6d34897fc51569e1cf46866", "score": "0.55470234", "text": "def __init__(self, color):\n self.color = color", "title": "" }, { "docid": "2acbc842b8b09030908a85cd02949c94", "score": "0.5515484", "text": "def mapping_init(min_val,max_val,cmap='viridis'):\n a = min_val\n b = max_val\n colormap = plt.get_cmap(cmap)\n positions = np.linspace(a,b,len(colormap.colors), endpoint=True)\n brush = pg.ColorMap(pos=positions, color=colormap.colors)\n return brush", "title": "" }, { "docid": "80272afed122fac6198bfd352e46398b", "score": "0.5493744", "text": "def make_greyscale():\n return Colormap([\n Colormap.Point(0.0, [0.0, 0.0, 0.0]),\n Colormap.Point(1.0, [1.0, 1.0, 1.0])\n ])", "title": "" }, { "docid": "f8658b1ea6a679b2375297f3dd4e6f73", "score": "0.5480039", "text": "def __new__(cls, clr, mode='color'):\n a = clr.a\n r = clr.r * a\n g = clr.g * a\n b = clr.b * a\n d = 1 - a # identity\n m = r + d, r, r, 0, 0, g, g + d, g, 0, 0, b, b, b + d, 0, 0, 0, 0, 0, 1, 0\n m = ' '.join('%.2f' % v for v in m)\n return Filter.__new__(cls, \n '<feColorMatrix type=\"matrix\" values=\"%s\" />' % m)", "title": "" }, { "docid": "5d418604fa6c70849908392c7b5275cd", "score": "0.5438254", "text": "def __init__(self, w, c):\n self.weight = w\n self.color = c\n print(\"Created!\")", "title": "" }, { "docid": "a9613bf75a1b13f6893082d0a05031f2", "score": "0.5434451", "text": "def __init__(self):\r\n self.gamma = 2.19921875\r\n self.red = [0.64, 0.33]\r\n self.green = [0.21, 0.71]\r\n self.blue = [0.15, 0.06]\r\n self.white = [0.3127, 0.3290]\r\n self.ref_white = Vec3(0.95047, 1.00, 1.08883)", "title": "" }, { "docid": "bd0fb7cb069f80f7e96986ba87896da5", "score": "0.54146135", "text": "def new(mode, size, color=0):\n\n _check_size(size)\n\n if color is None:\n # don't initialize\n return Image()._new(core.new(mode, size))\n\n if isStringType(color):\n # css3-style specifier\n\n from . import ImageColor\n\n color = ImageColor.getcolor(color, mode)\n\n im = Image()\n if mode == \"P\" and isinstance(color, (list, tuple)) and len(color) in [3, 4]:\n # RGB or RGBA value for a P image\n from . import ImagePalette\n\n im.palette = ImagePalette.ImagePalette()\n color = im.palette.getcolor(color)\n return im._new(core.fill(mode, size, color))", "title": "" }, { "docid": "50077c95eea23a305b05daee5ffb56f8", "score": "0.5402472", "text": "def __mod__(self, other:\"Color\") -> \"Color\":\n\t\t# Scale our values to the range [0, 1]\n\t\trt = self[0]/255.\n\t\tgt = self[1]/255.\n\t\tbt = self[2]/255.\n\t\tat = self[3]/255.\n\n\t\t# Convert to premultiplied alpha\n\t\trt *= at\n\t\tgt *= at\n\t\tbt *= at\n\n\t\t# Scale other values to the range [0, 1]\n\t\tro = other[0]/255.\n\t\tgo = other[1]/255.\n\t\tbo = other[2]/255.\n\t\tao = other[3]/255.\n\n\t\t# Convert to premultiplied alpha\n\t\tro *= ao\n\t\tgo *= ao\n\t\tbo *= ao\n\n\t\t# Blend colors\n\t\trf = rt + ro * (1 - at)\n\t\tgf = gt + go * (1 - at)\n\t\tbf = bt + bo * (1 - at)\n\t\taf = at + ao * (1 - at)\n\n\t\t# Unmultiply alpha\n\t\tif af:\n\t\t\trf /= af\n\t\t\tgf /= af\n\t\t\tbf /= af\n\n\t\t# Scale back to [0, 255]\n\t\tr = int(255*rf)\n\t\tg = int(255*gf)\n\t\tb = int(255*bf)\n\t\ta = int(255*af)\n\n\t\t# create final color\n\t\treturn self.__class__(r, g, b, a)", "title": "" }, { "docid": "3745b6e1b7c5041b56dc63a8adb65cc0", "score": "0.5402008", "text": "def __init__(self, target_colour: Tuple[int, int, int]) -> None:\r\n self.colour = target_colour", "title": "" }, { "docid": "3745b6e1b7c5041b56dc63a8adb65cc0", "score": "0.5402008", "text": "def __init__(self, target_colour: Tuple[int, int, int]) -> None:\r\n self.colour = target_colour", "title": "" }, { "docid": "126d686e4381cc935463be55695a67f1", "score": "0.539249", "text": "def __init__(self, attr, valuerange = None, cmap_str = None, colormap = None):\n\t\tif valuerange is None:\n\t\t\tself.valuerange = (np.min(attr.data), np.max(attr.data))\n\t\telse:\n\t\t\tself.valuerange = valuerange\n\t\tif cmap_str is not None:\n\t\t\tself.set_cmap(cmap_str)\n\t\telse:\n\t\t\tself.cmap = cm.Reds\n\t\tself.colormap = colormap\n\t\tself.attr = attr\n\t\tself.data = self.attr.data", "title": "" }, { "docid": "3508c19eafa98a2966cac02cafb4b7c4", "score": "0.5389405", "text": "def __init__(self, filename = \"dream.png\"):\n \n #Reading image\n self.im = imread(filename)\n #Scaling image\n self.im = self.im/ 255\n #Storing info about image\n self.shape = self.im.shape\n self.min = self.im.min()\n self.max = self.im.max()\n self.t = self.im.dtype\n \n if len(self.shape) == 2:\n self.rgb = np.ravel(self.im)\n else:\n self.rgb = np.ravel(self.im.mean(axis = 2)) \n \n return", "title": "" }, { "docid": "ec218d58c14c2e69fbe7ff77426f103a", "score": "0.53724307", "text": "def __init__(self, color=None, **kwargs):\n super().__init__(color, **kwargs)", "title": "" }, { "docid": "4e54d9e294acc7062e31c40a08df81b1", "score": "0.536868", "text": "def create_log_colorscale(log_base = 2.7):\n log_base = 2.7\n i_logs = 1-np.flip(np.logspace(-log_base,0,num=50,base=log_base))\n i_lins = np.arange(0.02,1.02,.02)\n\n viridis = matplotlib.cm.get_cmap('viridis')\n\n colorscale = [[0,'rgb(255,255,255)']]\n for i_log, i_lin in zip(i_logs, i_lins):\n \n this_color = viridis(i_log)\n rgb_string = 'rgb({:.2f},{:.2f},{:.2f})'.format(\n 255*this_color[0],\n 255*this_color[1],\n 255*this_color[2])\n colorscale.append([i_lin, rgb_string])\n \n return colorscale", "title": "" }, { "docid": "5622fd1543d8ba1272dd7e031c12e1cd", "score": "0.53603", "text": "def build(self, shape, color_channels, **kwargs):", "title": "" }, { "docid": "1bfbd643715ed330974cba3da92d3071", "score": "0.5354362", "text": "def __init__(self, image_size, is_color, mean, scale,\n crop_size=0, pad=28,extend_size=300, color='BGR',\n use_cutout=False,\n use_mirroring=False,\n use_random_crop=False,\n use_center_crop=False,\n use_random_gray=False):\n torch.set_num_threads(1)\n\n self.image_size = image_size\n self.pad = pad\n self.mean = mean\n self.scale = scale\n self.crop_size = crop_size\n self.extend_size = extend_size\n self.color = color\n self.use_cutout = use_cutout\n self.use_mirroring = use_mirroring\n self.use_random_crop = use_random_crop\n self.use_center_crop = use_center_crop\n self.use_random_gray = use_random_gray\n self.save = True", "title": "" }, { "docid": "c9eb0b2adf81f12e47daa803eb98e5f4", "score": "0.5351147", "text": "def create_label_colormap():\n\n colormap = np.zeros((256, 3), dtype=np.uint8)\n\n colormap[0] = [0, 0, 0]\n colormap[1] = [128, 0, 0]\n colormap[2] = [255, 0, 0]\n colormap[3] = [0, 85, 0]\n colormap[4] = [170, 0, 51]\n colormap[5] = [255, 85, 0]\n colormap[6] = [0, 0, 85]\n colormap[7] = [0, 119, 221]\n colormap[8] = [85, 85, 0]\n colormap[9] = [0, 85, 85]\n colormap[10] = [85, 51, 0]\n colormap[11] = [52, 86, 128]\n colormap[12] = [0, 128, 0]\n colormap[13] = [0, 0, 255]\n colormap[14] = [51, 170, 221]\n colormap[15] = [0, 255, 255]\n colormap[16] = [85, 255, 170]\n colormap[17] = [170, 255, 85]\n colormap[18] = [255, 255, 0]\n colormap[19] = [255, 170, 0]\n\n return colormap", "title": "" }, { "docid": "2410357ce4fac3570171355c71725a9d", "score": "0.5306795", "text": "def __init__(self, data, offset):\n self.size_bytes, \\\n self.width_px, \\\n self.height_px, \\\n self.num_color_planes, \\\n self.num_bits_per_pixel, \\\n self.compression_type, \\\n self.pixel_data_size_bytes, \\\n self.pixels_per_meter_width, \\\n self.pixels_per_meter_height, \\\n self.num_colors_used, \\\n self.num_colors_required, \\\n self.red_bitmask, \\\n self.green_bitmask, \\\n self.blue_bitmask, \\\n self.alpha_bitmask, \\\n self.color_space_type = struct.unpack_from(\"<IiiHHIIiiIIIIIII\", data, offset=offset)\n\n self.color_space_endpoints = CIEXYZTriple(data, offset + 60)\n\n red_gamma, \\\n green_gamma, \\\n blue_gamma = struct.unpack_from(\"<III\", data, offset=offset + 96)\n\n self.red_gamma = red_gamma / 2**30\n self.green_gamma = green_gamma / 2**30\n self.blue_gamma = blue_gamma / 2**30", "title": "" }, { "docid": "b410d1943a55f2781b535904e6a86b3e", "score": "0.52871", "text": "def _scale_gradient(inputs: chex.Array, scale: float) -> chex.ArrayTree:\n del scale # Only used for the backward pass defined in _scale_gradient_bwd.\n return inputs", "title": "" }, { "docid": "41a07fc8aa2d45338f44203bc08461ee", "score": "0.52830046", "text": "def scales():", "title": "" }, { "docid": "5ab0a9467e4745ec88f205579f713ef0", "score": "0.52807784", "text": "def colormap(self, named_colormap=\"viridis\", vmin=None, vmax=None):\n if (vmin is not None and vmax is None) or (vmin is None and vmax is not None):\n raise ValueError(\"Must specify both vmin and vmax, or neither.\")\n if (\n named_colormap.literal_value is not None\n and named_colormap.literal_value not in Image._colormaps\n ):\n raise ValueError(\n \"Unknown colormap type: {}\".format(named_colormap.literal_value)\n )\n return self._from_apply(\"wf.colormap\", self, named_colormap, vmin, vmax)", "title": "" }, { "docid": "9dba902b3b125049a942f2d326ca9e4a", "score": "0.52746737", "text": "def __init__(self, centres, lenscale_bounds=Positive()):\n\n self.M, self.D = centres.shape\n self.C = centres\n self.bounds = lenscale_bounds", "title": "" }, { "docid": "9dba902b3b125049a942f2d326ca9e4a", "score": "0.52746737", "text": "def __init__(self, centres, lenscale_bounds=Positive()):\n\n self.M, self.D = centres.shape\n self.C = centres\n self.bounds = lenscale_bounds", "title": "" }, { "docid": "4d39d6d44b5bed0319996687b920d417", "score": "0.5273954", "text": "def __init__(self,\n axis,\n create_scale,\n create_offset,\n eps=1e-5,\n scale_init=None,\n offset_init=None,\n name=None):\n super(LayerNorm, self).__init__(name=name)\n if isinstance(axis, slice):\n self._axis = axis\n elif isinstance(axis, int):\n self._axis = (axis,)\n elif (isinstance(axis, collections.Iterable) and\n all(isinstance(ax, int) for ax in axis)):\n self._axis = axis\n else:\n raise ValueError(\"`axis` should be an int, slice or iterable of ints.\")\n\n self._eps = eps\n\n self._create_scale = create_scale\n self._create_offset = create_offset\n\n if self._create_scale:\n self._scale_init = scale_init or jnp.ones\n elif scale_init is not None:\n raise ValueError(\"Cannot set `scale_init` if `create_scale=False`.\")\n if self._create_offset:\n self._offset_init = offset_init or jnp.zeros\n elif offset_init is not None:\n raise ValueError(\"Cannot set `offset_init` if `create_offset=False`.\")", "title": "" }, { "docid": "4cd8ba51c401009ba0fe3b47ba47da10", "score": "0.5272858", "text": "def __init__(self,color, size, style, price):\n self.color = color\n self.size = size\n self.style = style\n self.price = price", "title": "" }, { "docid": "8c2a1887231fa6118b9cccc050d75e6f", "score": "0.5271328", "text": "def make_colormap(colors):\n##-------------------------\n\n from matplotlib.colors import LinearSegmentedColormap, ColorConverter\n from numpy import sort\n \n z = sort(colors.keys())\n n = len(z)\n z1 = min(z)\n zn = max(z)\n x0 = (z - z1) / (zn - z1)\n \n CC = ColorConverter()\n R = []\n G = []\n B = []\n for i in range(n):\n #i'th color at level z[i]:\n Ci = colors[z[i]] \n if type(Ci) == str:\n # a hex string of form '#ff0000' for example (for red)\n RGB = CC.to_rgb(Ci)\n else:\n # assume it's an RGB triple already:\n RGB = Ci\n R.append(RGB[0])\n G.append(RGB[1])\n B.append(RGB[2])\n\n cmap_dict = {}\n cmap_dict['red'] = [(x0[i],R[i],R[i]) for i in range(len(R))]\n cmap_dict['green'] = [(x0[i],G[i],G[i]) for i in range(len(G))]\n cmap_dict['blue'] = [(x0[i],B[i],B[i]) for i in range(len(B))]\n mymap = LinearSegmentedColormap('mymap',cmap_dict)\n return mymap", "title": "" }, { "docid": "d619d9ad8d879d9e5a217fdaf4df9bac", "score": "0.52502203", "text": "def __init__(self, data = None, cmap = pylab.cm.Greys, extent = None, origin = \"lower\", alpha = 1.0, clim = None, name = 'Image', bins = 49 ):\n self.cmap = cmap\n self.data = data\n self.extent = extent\n self.origin = origin\n self.alpha = alpha\n self.clim = clim \n self.name = name\n self.bins = bins\n self.create_histogram(self.bins)", "title": "" }, { "docid": "c420ab3a4984c7ef72fd2da085625e0a", "score": "0.52354544", "text": "def scale(self, scale):\n try:\n new = PF(func=[self])\n if isinstance(scale, Param):\n new._scale = scale\n else:\n new._scale = Param(value=scale)\n except:\n raise\n return new", "title": "" }, { "docid": "51991d92f51f86909ec1a167552ada44", "score": "0.52322984", "text": "def multicolor():", "title": "" }, { "docid": "a87d5eab843cd5c2fc15acb2504eea7d", "score": "0.523026", "text": "def NewImage(dimensions=(1,1)):\r\n om = Image.new(\"RGB\", dimensions, color=(231,255,255))\r\n return om", "title": "" }, { "docid": "b02fed3178b58921b03e8b7f5cf5dbcf", "score": "0.52302593", "text": "def __new__(cls, h=0.0, s=1.0, b=1.0, contrast=1.0):\n b = -contrast * 0.5 + 0.5, \\\n contrast * b\n return Filter.__new__(cls, '\\n'.join((\n '<feColorMatrix type=\"hueRotate\" values=\"%.2f\" />' % (h * 360),\n '<feColorMatrix type=\"saturate\" values=\"%.2f\" />' % s,\n '<feComponentTransfer>',\n '<feFuncR type=\"linear\" intercept=\"%.2f\" slope=\"%.2f\" />' % b,\n '<feFuncG type=\"linear\" intercept=\"%.2f\" slope=\"%.2f\" />' % b,\n '<feFuncB type=\"linear\" intercept=\"%.2f\" slope=\"%.2f\" />' % b,\n '</feComponentTransfer>')))", "title": "" }, { "docid": "664320f3add8f273aa3f58a057e53636", "score": "0.52278316", "text": "def __init__(\n self,\n groups: int,\n axis: Union[int, slice, Sequence[int]] = slice(1, None),\n create_scale: bool = True,\n create_offset: bool = True,\n eps: float = 1e-5,\n scale_init: Optional[hk.initializers.Initializer] = None,\n offset_init: Optional[hk.initializers.Initializer] = None,\n data_format: str = \"channels_last\",\n name: Optional[str] = None,\n ):\n super().__init__(name=name)\n\n if isinstance(axis, slice):\n self.axis = axis\n elif isinstance(axis, int):\n self.axis = (axis,)\n elif (isinstance(axis, collections.abc.Iterable) and\n all(isinstance(ax, int) for ax in axis)):\n self.axis = axis\n else:\n raise ValueError(\"`axis` should be an int, slice or iterable of ints.\")\n\n self.groups = groups\n self.eps = eps\n self.data_format = data_format\n self.channel_index = utils.get_channel_index(data_format)\n self.create_scale = create_scale\n self.create_offset = create_offset\n self.rank = None\n\n if self.create_scale:\n if scale_init is None:\n scale_init = jnp.ones\n self.scale_init = scale_init\n elif scale_init is not None:\n raise ValueError(\"Cannot set `scale_init` if `create_scale=False`.\")\n\n if self.create_offset:\n if offset_init is None:\n offset_init = jnp.zeros\n self.offset_init = offset_init\n elif offset_init is not None:\n raise ValueError(\"Cannot set `offset_init` if `create_offset=False`.\")", "title": "" }, { "docid": "65d727b77adeaeb64066a9f3edf1f214", "score": "0.5226955", "text": "def _constructor_sliced(self):\n return Spectrum", "title": "" }, { "docid": "15b6cb84dfa9a189e9e613618354af3f", "score": "0.5223874", "text": "def __init__(self, px,py,pcolor):\n self.x = px\n self.y = py\n self.color = pcolor", "title": "" }, { "docid": "e6580e2b1d668dd11c1acead72f33f0c", "score": "0.52212584", "text": "def cmy(self, value):\n c, m, y = value\n if (c < 0 or c > 1) or (m < 0 or m > 1) or (y < 0 or y > 1):\n raise ValueError(\"invalid CMY value\")\n self.r = int((1.0 - c) * 255)\n self.g = int((1.0 - m) * 255)\n self.b = int((1.0 - y) * 255)", "title": "" }, { "docid": "f67d1133b520e961331f84a2a4c06f0e", "score": "0.52190727", "text": "def __init__(self, x, y, color):\n\n super().__init__(x, y)\n object.__setattr__(self, \"color\", color)", "title": "" }, { "docid": "4ec1b27a59da43b0f3c11f43817cb77e", "score": "0.52089345", "text": "def __init__(self, color: str, name: str):\n\n self.color = color\n self.name = name", "title": "" }, { "docid": "2615f3f9641d8b9bacf922a79a69d774", "score": "0.5188349", "text": "def __init__(self, scaler=None):\n self.scaler = scaler", "title": "" }, { "docid": "2615f3f9641d8b9bacf922a79a69d774", "score": "0.5188349", "text": "def __init__(self, scaler=None):\n self.scaler = scaler", "title": "" }, { "docid": "2615f3f9641d8b9bacf922a79a69d774", "score": "0.5188349", "text": "def __init__(self, scaler=None):\n self.scaler = scaler", "title": "" }, { "docid": "b68b61fd5b3660c1ad2100c7b704d033", "score": "0.5186174", "text": "def make_colormap(colors):\n\n from matplotlib.colors import LinearSegmentedColormap, ColorConverter\n from numpy import sort\n \n z = sort(colors.keys())\n n = len(z)\n z1 = min(z)\n zn = max(z)\n x0 = (z - z1) / (zn - z1)\n \n CC = ColorConverter()\n R = []\n G = []\n B = []\n for i in range(n):\n #i'th color at level z[i]:\n Ci = colors[z[i]] \n if type(Ci) == str:\n # a hex string of form '#ff0000' for example (for red)\n RGB = CC.to_rgb(Ci)\n else:\n # assume it's an RGB triple already:\n RGB = Ci\n R.append(RGB[0])\n G.append(RGB[1])\n B.append(RGB[2])\n\n cmap_dict = {}\n cmap_dict['red'] = [(x0[i],R[i],R[i]) for i in range(len(R))]\n cmap_dict['green'] = [(x0[i],G[i],G[i]) for i in range(len(G))]\n cmap_dict['blue'] = [(x0[i],B[i],B[i]) for i in range(len(B))]\n mymap = LinearSegmentedColormap('mymap',cmap_dict)\n return mymap", "title": "" }, { "docid": "cab4a863150693c932f3eaf353a50664", "score": "0.517558", "text": "def create_cdict(r, g, b):\n i = np.linspace(0, 1, r.size)\n\n cdict = {name: list(zip(i, el / 255.0, el / 255.0))\n for el, name in [(r, 'red'), (g, 'green'), (b, 'blue')]}\n return cdict", "title": "" }, { "docid": "ece40b1b7eb336b7f8270aee4614eb6e", "score": "0.5171127", "text": "def colors2cmap(*args, name=None):\n if len(args) < 2:\n raise Exception(\"Give at least two colors.\")\n\n cmap_data = [_to_hex(c) for c in args]\n cmap = colors.LinearSegmentedColormap.from_list(name, cmap_data)\n plt.register_cmap(name, cmap)\n\n return cmap", "title": "" }, { "docid": "2d9bd74352887b0f63c79b0f712aadae", "score": "0.5162211", "text": "def test_init_stimulusColor(self):\n\t\t\n\t\trandom.seed()\n\t\tL = random.random()\n\t\ta = random.randint(0, 255)\n\t\tb = random.randint(0, 255) \n\t\tcol = Color([L, a, b])\n\t\tstimul_col = StimulusColor(col)\n\t\tself.assertEqual(stimul_col.color, col)\n\t\tself.assertEqual(stimul_col.color.content, [L, a, b])\n\t\tself.assertEqual(isinstance(stimul_col, Stimulus), True)\n\t\tself.assertEqual(isinstance(stimul_col, StimulusColor), True)", "title": "" }, { "docid": "daf51f5a18b7db7e2a19be3b30e814b2", "score": "0.5157941", "text": "def New(*args, **kargs):\n obj = itkScaleTransformD2.__New_orig__()\n import itkTemplate\n itkTemplate.New(obj, *args, **kargs)\n return obj", "title": "" }, { "docid": "daf51f5a18b7db7e2a19be3b30e814b2", "score": "0.5157941", "text": "def New(*args, **kargs):\n obj = itkScaleTransformD2.__New_orig__()\n import itkTemplate\n itkTemplate.New(obj, *args, **kargs)\n return obj", "title": "" }, { "docid": "f8f96f2d2190ffe72b3814c54842b188", "score": "0.51571965", "text": "def __init__(self,ax,size=(200,300)):\n self.ax = ax\n self.bounds = [ax.get_xlim()[0],ax.get_ylim()[0],ax.get_xlim()[1],ax.get_ylim()[1]]\n self.realWidth = self.bounds[2] - self.bounds[0]\n self.realHeight = self.bounds[3] - self.bounds[1]\n self.width = size[1]\n self.height = size[0]\n self.pixelWidth = self.realWidth/self.width\n self.pixelHeight = self.realHeight/self.height\n self.img =255* np.ones((size[0],size[1],3),dtype=np.uint8)", "title": "" }, { "docid": "065b08c6ebc705903e048f210391a5c5", "score": "0.5153511", "text": "def make_colormap(cmap_arr, name):\n ctmp = []\n i = 0\n while i < len(cmap_arr):\n ctmp.append([cmap_arr[i]/255., cmap_arr[i+1]/255., cmap_arr[i+2]/255.])\n i += 3\n\n return ListedColormap(ctmp, name=name, N=None)", "title": "" }, { "docid": "642804a6a9b53b06e8611e7fe84fa388", "score": "0.5140853", "text": "def __init__(self, z_scale=1500., sigma_from=200, sigma_to=0.8, y_scale=1., x_scale=1., stopping_criteria=0.00001,\\\n z_magnet=3070., sigma_decrement=0.5, noise_treshold=2., inlier_treshold=0.8, pre_sigma=0.8, x_resolution=1.,\\\n y_resolution=1., adjusting=True):\n\n self.labels_ = None\n self.tracks_params_ = None\n self.z_scale = z_scale\n self.y_scale = y_scale\n self.x_scale = x_scale\n self.sigma_from = sigma_from\n self.sigma_to = sigma_to\n self.stopping_criteria = stopping_criteria\n self.sigma_decrement = sigma_decrement\n self.noise_treshold = noise_treshold\n self.inlier_treshold = inlier_treshold\n self.pre_sigma = pre_sigma\n self.x_resolution = x_resolution\n self.y_resolution = y_resolution\n self.z_magnet = z_magnet\n self.adjusting = adjusting", "title": "" }, { "docid": "67b867e631dd1143651be8bf7711d319", "score": "0.5122719", "text": "def __init__(self, transformation, obj, color):\n SceneNode.__init__(self, transformation, obj)\n\n self.color = color", "title": "" }, { "docid": "d6fdde3ca08a24a678f8d404bd7f9884", "score": "0.5115922", "text": "def make_sc(nx=1, ny=1, nz=1, scale=1.0, noise=0.0):\n fractions = np.array([[0, 0, 0]], dtype=np.float32)\n return make_cubic(nx, ny, nz, fractions, scale, noise)", "title": "" }, { "docid": "4a2a9dcfb7b2b81e6a5c946bc87259ae", "score": "0.5115497", "text": "def __init__(\n self, resize=(0, 0), padding=(0, 0), crop=(0, 0), horizontal_flip_prob=0.0,\n vertical_flip_prob=0.0, gaussian_blur_prob=0.0, rotate_degree=0.0,\n cutout_prob=0.0, cutout_dim=(8, 8), hue_saturation_prob=0.0, contrast_prob=0.0,\n mean=(0.5, 0.5, 0.5), std=(0.5, 0.5, 0.5), normalize=True, train=True\n ):\n transforms_list = []\n\n if sum(resize) > 0:\n transforms_list += [A.Resize(\n height=resize[0], width=resize[1], always_apply=True\n )]\n if train:\n if sum(padding) > 0:\n transforms_list += [A.PadIfNeeded(\n min_height=padding[0], min_width=padding[1], always_apply=True\n )]\n if sum(crop) > 0:\n transforms_list += [A.RandomCrop(crop[0], crop[1], always_apply=True)]\n if horizontal_flip_prob > 0: # Horizontal Flip\n transforms_list += [A.HorizontalFlip(p=horizontal_flip_prob)]\n if vertical_flip_prob > 0: # Vertical Flip\n transforms_list += [A.VerticalFlip(p=vertical_flip_prob)]\n if gaussian_blur_prob > 0: # Patch Gaussian Augmentation\n transforms_list += [A.GaussianBlur(p=gaussian_blur_prob)]\n if rotate_degree > 0: # Rotate image\n transforms_list += [A.Rotate(limit=rotate_degree)]\n if cutout_prob > 0: # CutOut\n if isinstance(mean, float):\n fill_value = mean * 255.0\n else:\n fill_value = tuple([x * 255.0 for x in mean])\n transforms_list += [A.CoarseDropout(\n p=cutout_prob, max_holes=1, fill_value=fill_value,\n max_height=cutout_dim[0], max_width=cutout_dim[1]\n )]\n if hue_saturation_prob > 0: # Hue Saturation\n transforms_list += [A.HueSaturationValue(p=hue_saturation_prob)]\n if contrast_prob > 0: # Random Contrast\n transforms_list += [A.RandomContrast(p=contrast_prob)]\n if normalize:\n # normalize the data with mean and standard deviation to keep values in range [-1, 1]\n # since there are 3 channels for each image,\n # we have to specify mean and std for each channel\n transforms_list += [\n A.Normalize(mean=mean, std=std, always_apply=True),\n ]\n \n # convert the data to torch.FloatTensor\n transforms_list += [\n ToTensor()\n ]\n\n self.transform = A.Compose(transforms_list)", "title": "" }, { "docid": "cfbe991d0aaebba8f1eb644f1f2b9e77", "score": "0.5111386", "text": "def set_rgb_colormap(fieldName, minVal, maxVal, pr1max):\n lut = GetColorTransferFunction(fieldName)\n if fieldName == 'PR':\n if pr1max:\n lut.RGBPoints = [0.0/6, 0, 0, 1, \\\n 1.0/6, 0, 1, 1, \\\n 2.0/6, 0, 1, 1, \\\n 3.0/6, 0, 1, 0, \\\n 4.0/6, 1, 1, 0, \\\n 6.0/6, 1, 0, 0 ]\n else:\n lut.RGBPoints = [0.0/7, 0, 0, 1, \\\n 1.0/7, 0, 1, 1, \\\n 3.0/7, 0, 1, 0, \\\n 4.0/7, 1, 1, 0, \\\n 6.0/7, 1, 0, 0, \\\n 6.5/7, 1, 0, 1, \\\n 7.0/7, 0.5, 0.5, 0.5 ]\n else:\n lut.RGBPoints = [0.00, 0, 0, 1, \\\n 0.25, 0, 1, 1, \\\n 0.50, 0, 1, 0, \\\n 0.75, 1, 1, 0, \\\n 1.00, 1, 0, 0]\n lut.ColorSpace = 'RGB'\n lut.NanColor = [0.5, 0.5, 0.5]\n lut.BelowRangeColor = [0.0, 0.0, 0.0]\n lut.AboveRangeColor = [1.0, 1.0, 1.0]\n lut.UseBelowRangeColor = 0\n lut.UseAboveRangeColor = 0\n # For some reason you cannot toggle whether to use a NaN color; it must use it no matter what\n lut.ScalarRangeInitialized = 1\n lut.RescaleTransferFunction(minVal, maxVal)\n\n pwf = GetOpacityTransferFunction(fieldName)\n pwf.Points = [0.0, 0.0, 0.5, 0.0, \\\n 1.0, 1.0, 0.5, 0.0]\n pwf.ScalarRangeInitialized = 1\n pwf.RescaleTransferFunction(minVal, maxVal)", "title": "" }, { "docid": "3bc216fca9b3aa9c561be939f7a9aaf5", "score": "0.5100143", "text": "def __init__(self, number, colour):\n self._number = number\n self._colour = colour", "title": "" }, { "docid": "1e8bffd12e3476aadfaf2212c72acff0", "score": "0.5090016", "text": "def __init__(self, *, rows=1, cols=1):\n # TODO(jwnimmer-tri) Add an argument for pixel (image) type.\n LeafSystem.__init__(self)\n assert rows >= 1\n assert cols >= 1\n self._rows = rows\n self._cols = cols\n self._inputs = dict()\n for row in range(rows):\n for col in range(cols):\n key = (row, col)\n self._inputs[key] = self.DeclareAbstractInputPort(\n name=f\"color_image_r{row}_c{col}\",\n model_value=Value(ImageRgba8U()))\n self._output = self.DeclareAbstractOutputPort(\n \"color_image\",\n alloc=lambda: Value(ImageRgba8U()),\n calc=self._calc_output)", "title": "" }, { "docid": "504a2edaba96b935e5df3c3cb419b863", "score": "0.5086759", "text": "def colorize(self):\n if self.image:\n original = self.image\n color = (randrange(32, 224, 24), randrange(32, 224, 24), randrange(32, 224, 24))\n self.image = Image.new('RGB', original.size)\n self.image.paste(color, box=(0, 0) + original.size)\n self.image.paste(original, mask=original)\n self.operationsCount += 1\n return self", "title": "" }, { "docid": "74482f010ae78865e963183ac14684e1", "score": "0.50720763", "text": "def create_trash_label_colormap():\r\n colormap = np.zeros((11, 3), dtype=np.uint8)\r\n for inex, (r, g, b) in enumerate(class_colormap):\r\n colormap[inex] = [r, g, b]\r\n \r\n return colormap", "title": "" }, { "docid": "9e8d02de0c946824f7313a58370ee20c", "score": "0.5071454", "text": "def _create(self):\n return self._get_client(\"autoscale\")", "title": "" }, { "docid": "5b9235e2f0e98f0d19824ebf6f6c61f7", "score": "0.50676537", "text": "def __init__(self, scale, startCoord=(0, 0)):\n self.conquered = False\n self.val = randint(1, 5)\n # Reference to the upper left\n # coordinate of the tile\n self.startCoord = startCoord\n self.scale = scale\n self.edges = self.initEdges()", "title": "" }, { "docid": "878bc51fdd6a5d2dbe10c5e666b5ea23", "score": "0.506449", "text": "def __init__(self, image_size=224):\n super().__init__()\n raster_settings = {\n 'image_size': image_size,\n 'blur_radius': 0.0,\n 'faces_per_pixel': 1,\n 'bin_size': None,\n 'max_faces_per_bin': None,\n 'perspective_correct': False,\n }\n raster_settings = util.dict2obj(raster_settings)\n self.raster_settings = raster_settings", "title": "" }, { "docid": "8e4b5373e5416897cc1aa1c03e3928fa", "score": "0.5061011", "text": "def __init__(self, x, y, dim, color):\n\n Rectangle.__init__(self, dim, dim, color, x, y)", "title": "" }, { "docid": "b166d21dbde493fc7aacc1d16f405fc4", "score": "0.5060623", "text": "def __init__(self, size, mode=\"RGBA\", background_color=0, offset=(0,0)):\n image = size if isinstance(size, PIL.Image.Image) else \\\n PIL.Image.new(mode, size, background_color)\n super(self.__class__, self).__init__(image, mode)\n self._image = image\n self._offset = offset", "title": "" }, { "docid": "969d65ecdafbecf4b29276038dd37ffd", "score": "0.50513476", "text": "def __init__(self, shape_a, shape_b):\n self.shape_a = shape_a\n self.shape_b = shape_b\n self.color = ( (shape_a.color[0] + shape_b.color[0])/2,\n (shape_a.color[1] + shape_b.color[1])/2,\n (shape_a.color[2] + shape_b.color[2])/2 )", "title": "" }, { "docid": "969d65ecdafbecf4b29276038dd37ffd", "score": "0.50513476", "text": "def __init__(self, shape_a, shape_b):\n self.shape_a = shape_a\n self.shape_b = shape_b\n self.color = ( (shape_a.color[0] + shape_b.color[0])/2,\n (shape_a.color[1] + shape_b.color[1])/2,\n (shape_a.color[2] + shape_b.color[2])/2 )", "title": "" }, { "docid": "ebf219209d198ea0a4c496b03d52ba62", "score": "0.50473994", "text": "def __init__(self, *args, **kwargs):\r\n cls = self.__class__\r\n mode = kwargs.get('mode', None)\r\n if mode is not None and mode not in cls.modes :\r\n raise ValueError, \"unknown mode %s for %s\" % (mode, util.clsname(self))\r\n # can also use the form <componentname>=<number>\r\n # for now supports only rgb and hsv flags\r\n hsvflag = {}\r\n rgbflag = {}\r\n for a in 'hsv' :\r\n if a in kwargs :\r\n hsvflag[a] = kwargs[a]\r\n for a in 'rgb' :\r\n if a in kwargs :\r\n rgbflag[a] = kwargs[a]\r\n # can't mix them\r\n if hsvflag and rgbflag :\r\n raise ValueError, \"can not mix r,g,b and h,s,v keyword arguments in a %s declaration\" % util.clsname(self)\r\n # if no mode specified, guess from what keyword arguments where used, else use 'rgb' as default\r\n if mode is None :\r\n if hsvflag :\r\n mode = 'hsv'\r\n else :\r\n mode = 'rgb'\r\n # can't specify a mode and use keywords of other modes\r\n if mode is not 'hsv' and hsvflag :\r\n raise ValueError, \"Can not use h,s,v keyword arguments while specifying %s mode in %s\" % (mode, util.clsname(self))\r\n elif mode is not 'rgb' and rgbflag :\r\n raise ValueError, \"Can not use r,g,b keyword arguments while specifying %s mode in %s\" % (mode, util.clsname(self))\r\n # NOTE: do not try to use mode with _api.Color, it seems bugged as of 2008\r\n #import colorsys\r\n #colorsys.rgb_to_hsv(0.0, 0.0, 1.0)\r\n ## Result: (0.66666666666666663, 1.0, 1.0) #\r\n #c = _api.Color(_api.Color.kHSV, 0.66666666666666663, 1.0, 1.0)\r\n #print \"# Result: \",c[0], c[1], c[2], c[3],\" #\"\r\n ## Result: 1.0 0.666666686535 1.0 1.0 #\r\n #c = _api.Color(_api.Color.kHSV, 0.66666666666666663*360, 1.0, 1.0)\r\n #print \"# Result: \",c[0], c[1], c[2], c[3],\" #\"\r\n ## Result: 1.0 240.0 1.0 1.0 #\r\n #colorsys.hsv_to_rgb(0.66666666666666663, 1.0, 1.0)\r\n ## Result: (0.0, 0.0, 1.0) #\r\n # we'll use Color only to store RGB values internally and do the conversion a read/write if desired\r\n # which I think make more sense anyway\r\n # quantize (255, 65535, no quantize means colors are 0.0-1.0 float values)\r\n # Initializing api's Color with int values seems also not to always behave so we quantize first and\r\n # use a float init always\r\n quantize = kwargs.get('quantize', None)\r\n if quantize is not None :\r\n try :\r\n quantize = float(quantize)\r\n except :\r\n raise ValueError, \"quantize must be a numeric value, not %s\" % (util.clsname(quantize))\r\n # can be initilized with a single argument (other Color, Vector, VectorN)\r\n if len(args)==1 :\r\n args = args[0]\r\n # we dont rely much on Color api as it doesn't seem totally finished, and do some things directly here\r\n if isinstance(args, self.__class__) or isinstance(args, self.apicls) :\r\n # alternatively could be just ignored / output as warning\r\n if quantize :\r\n raise ValueError, \"Can not quantize a Color argument, a Color is always stored internally as float color\" % (mode, util.clsname(self))\r\n if mode == 'rgb' :\r\n args = VectorN(args)\r\n elif mode == 'hsv' :\r\n args = VectorN(cls.rgbtohsv(args))\r\n else :\r\n # single alpha value, as understood by api will break coerce behavior in operations\r\n # where other operand is a scalar\r\n #if not hasattr(args, '__iter__') :\r\n # args = VectorN(0.0, 0.0, 0.0, args)\r\n if hasattr(args, '__len__') :\r\n shape = (min(len(args), cls.size),)\r\n else :\r\n shape = cls.shape\r\n args = VectorN(args, shape=shape)\r\n # quantize if needed\r\n if quantize :\r\n args /= quantize\r\n # pad to a full Color size\r\n args.stack(self[len(args):])\r\n\r\n # apply keywords arguments, and convert if mode is not rgb\r\n if mode == 'rgb' :\r\n if rgbflag :\r\n for i, a in enumerate('rgb') :\r\n if a in rgbflag :\r\n if quantize :\r\n args[i] = float(rgbflag[a]) / quantize\r\n else :\r\n args[i] = float(rgbflag[a])\r\n elif mode == 'hsv' :\r\n if hsvflag :\r\n for i, a in enumerate('hsv') :\r\n if a in hsvflag :\r\n if quantize :\r\n args[i] = float(hsvflag[a]) / quantize\r\n else :\r\n args[i] = float(hsvflag[a])\r\n args = VectorN(cls.hsvtorgb(args))\r\n # finally alpha keyword\r\n a = kwargs.get('a', None)\r\n if a is not None :\r\n if quantize :\r\n args[-1] = float(a) / quantize\r\n else :\r\n args[-1] = float(a)\r\n\r\n try :\r\n self.assign(args)\r\n except :\r\n msg = \", \".join(map(lambda x,y:x+\"=<\"+util.clsname(y)+\">\", mode, args))\r\n raise TypeError, \"in %s(%s), at least one of the components is of an invalid type, check help(%s) \" % (util.clsname(self), msg, util.clsname(self))", "title": "" }, { "docid": "da875b289bab12f81562810cc31447d4", "score": "0.5045717", "text": "def image_from_component_values(component):\n hi = max(component)\n lo = min(component)\n n = len(component) / 3\n divisor = hi - lo\n if divisor == 0:\n divisor = 1\n def rescale(x):\n return int(255 * (x - lo) / divisor)\n d = [(rescale(component[3 * i]),\n rescale(component[3 * i + 1]),\n rescale(component[3 * i + 2])) for i in range(n)]\n im = Image.new('RGB',STANDARD_SIZE)\n im.putdata(d)\n return im", "title": "" } ]
a096da259efff9e160ccbea446040e40
Private function to get the absolute path to the downloaded file.
[ { "docid": "515b8459a0fbd85ad7dab27c0aa5f909", "score": "0.0", "text": "def _get_data(path, scraper_name, project_id):\n cwd = os.path.abspath(os.path.dirname(__file__))\n return os.path.join(cwd, 'pdf', scraper_name, project_id, path)", "title": "" } ]
[ { "docid": "83ad940bd914fa9b3f8790f35f6843a2", "score": "0.81307155", "text": "def download_path(self):\n download_path = self.TMP_DIR.joinpath(self.DOWNLOAD_NAME).resolve()\n return download_path", "title": "" }, { "docid": "a4a1c4e8c019224e8a5dcad9c716aeae", "score": "0.747789", "text": "def get_download_url() -> str:\n return DOWNLOAD_URL_FILE.read_text().strip('/\\n')+'/'", "title": "" }, { "docid": "64244f3ebe3b2a53c168ef187fec5fce", "score": "0.72105974", "text": "def download_dir_fq(self):\n if self.download_dir is None:\n return paths['raw_data_path']\n else:\n download_path = pathlib.Path(self.download_dir)\n if download_path.is_absolute():\n return download_path\n else:\n return paths['raw_data_path'] / self.download_dir", "title": "" }, { "docid": "2f50360b5709fc0fd1ea10d1ee6f39b9", "score": "0.7207265", "text": "def _get_abs_path(self, rel_path):\n return os.path.join(self.path, rel_path)", "title": "" }, { "docid": "30adf4443c8a68b79bba82c3a106b68d", "score": "0.71881306", "text": "def getAbsolutePath(self):\n return self.absoluteFilepath", "title": "" }, { "docid": "ed11361f782126767fcd106a15384668", "score": "0.7184022", "text": "def PathManager_GetDownloadDir():\n return _MaxPlus.PathManager_GetDownloadDir()", "title": "" }, { "docid": "d1f7a3162560e125ed57707203167e62", "score": "0.71480334", "text": "def absoluteFilePath(self):\n if self.__absolutePath is None:\n if self.isRoot():\n path = \"\"\n elif self.isDrive():\n path = self.__fileInfo.filePath()\n else:\n path = os.path.join(self.parent().absoluteFilePath(), self.__fileInfo.fileName())\n if path == \"\":\n return \"/\"\n self.__absolutePath = path\n return self.__absolutePath", "title": "" }, { "docid": "75bbe526b833664a79f97c718aa728c3", "score": "0.7144771", "text": "def download_dir(self):\n return self._download_dir", "title": "" }, { "docid": "fd8dc35d0331eac07824d711ac6a9dd7", "score": "0.71108794", "text": "def download_name_file(self) -> str:\n data_path = os.path.join(self.output_path, \"data\")\n file_path: str = os.path.join(data_path, self.FILE_NAME)\n ensure_directory_exists(data_path)\n ensure_file_downloaded(source_url=self.SOURCE_URI, target_path=file_path)\n return file_path", "title": "" }, { "docid": "1e139ace0bc07520259c81b72ef391cb", "score": "0.7105343", "text": "def _get_download_url(self):\n\n return self._url", "title": "" }, { "docid": "16f71c07119067f94cf041de9a829749", "score": "0.70968205", "text": "def get_file_path(self) -> str:\n return self.file_path", "title": "" }, { "docid": "2c051e5292ca5ba890e45bba7aeec5c5", "score": "0.70964605", "text": "def download_url(self):\n if self.file:\n return self.file.url\n else:\n return None", "title": "" }, { "docid": "bdf81258f81632bf2499ea540c813a71", "score": "0.708905", "text": "def getabspath(self):\n\t\treturn os.path.join(self.getpath(), self.getfilename())", "title": "" }, { "docid": "188bdb9d760e741a851ec543b6238700", "score": "0.7083036", "text": "def get_file_abs_path(self, filename: str, data_dir: str):\n return os.path.join(data_dir, self.name, filename)", "title": "" }, { "docid": "9e8f83af4b5cb67e053b167a7bda40e4", "score": "0.70176077", "text": "def get_file_url(self):\r\n if self.html is None:\r\n self.download_html()\r\n if not self.wantReconnect:\r\n self.req_opts = self.get_download_options() # get the Post options for the Request\r\n #file_url = self.pyfile.url\r\n #return file_url\r\n else:\r\n self.offline()", "title": "" }, { "docid": "84ef4218b6365cadf6c0ed4acc5c4f8b", "score": "0.6979087", "text": "def full_file_path(self):\n return Path(settings.TNPACKAGE_FILES_ROOT) / Path(self.pkg.disk_directory) / self.file_path", "title": "" }, { "docid": "f28fc22edd4f9c032c867aa981353a4a", "score": "0.69514865", "text": "def GetDownloadDir():\n return _MaxPlus.PathManager_GetDownloadDir()", "title": "" }, { "docid": "001334186a50e8fdbe89cbf4255062d6", "score": "0.6886881", "text": "def get_file_path(self):\n return self.file_path", "title": "" }, { "docid": "79faa994498662eeb65341f0a3c32d59", "score": "0.68684673", "text": "def getAbsolutePath(self) -> str:\n return self.absolutePath", "title": "" }, { "docid": "cd8682750695b05c5bd510fa6a0a9704", "score": "0.6827829", "text": "def file_path(self):\n fh = self._get_filehandler()\n if fh:\n return fh.baseFilename\n return None", "title": "" }, { "docid": "2787a18505d00fe3f167a0c82ffc82a3", "score": "0.68277234", "text": "def absolute_path(self):\n return FilePath(os.path.abspath(self.path))", "title": "" }, { "docid": "011193cc36deffddc1fa25354ece836d", "score": "0.6816996", "text": "def file_path(self):\n return self.directory + '/' + self.file_name", "title": "" }, { "docid": "7f30b0296788cf1f06c84a162568d199", "score": "0.6811151", "text": "def get_download_path(self):\n self.download_dir = QFileDialog.getExistingDirectory(\n self, \"Open folder\", BASE_PATH\n )\n if not self.download_dir:\n self.download_dir = BASE_PATH\n\n self.download_folder_select.setText(\n self.get_parent_current_dir(self.download_dir)\n )", "title": "" }, { "docid": "25fd4c4934bb382b0fe6f262b2c6afa1", "score": "0.6800178", "text": "def getFileName(self):\n return os.path.abspath(self.filename)", "title": "" }, { "docid": "4b936bcdeedc9c2de0350d979c75b53d", "score": "0.67987883", "text": "def get_file_path(self):\n return self.__file_path", "title": "" }, { "docid": "97e4a5837d97f0e65552ed628ac13135", "score": "0.6797612", "text": "def get_file_path():\n file = Path(f\"{DIR_NAME}/{FILE_NAME}\")\n\n if file.exists() and file.is_file():\n return str(file.absolute())\n\n raise FileExistsError(f\"file {FILE_NAME} doesn't exists in {DIR_NAME}\")", "title": "" }, { "docid": "7b618e7bd4c9fb9c64da40da41ec5d61", "score": "0.6766405", "text": "def filename(self):\n return self.urlparse.path", "title": "" }, { "docid": "8f7b723831be089b3c1b62383aec64d0", "score": "0.67649645", "text": "def _get_download_dir(self) -> str:\n try:\n return config.get_pubmed_download_dir()\n except Exception as e:\n PubMedScraper.LOGGER.exception(e)\n return PubMedScraper.DEFAULT_DOWNLOAD_DIR", "title": "" }, { "docid": "d8491bcb0e24e02160b53c26b67c4506", "score": "0.67506146", "text": "def get_cache_path(self, url: str) -> Path:\n filename = short_hash(url)\n return self.downloads_directory / filename", "title": "" }, { "docid": "f4e62e040944151f04d15502ccd281b6", "score": "0.6688198", "text": "def getFullPath(self):\n return self.fullpath", "title": "" }, { "docid": "391bac0742a326a5820489edbe27e59e", "score": "0.66806006", "text": "def download_url(self):\n raise NotImplementedError", "title": "" }, { "docid": "d6f09aaa5f8f38cc8cbbcdd1a9499bec", "score": "0.6673809", "text": "def download_url(self):\n return self.url", "title": "" }, { "docid": "e617ab53d155e0eb9bff27cb4a8e239c", "score": "0.66657525", "text": "def absolute_path(self):\n return os.path.abspath(self.path) + '/'", "title": "" }, { "docid": "fc0ae08cccd2d1b63cd8f75fea50f99a", "score": "0.6659369", "text": "def getFullPath(self):\n return self.getNci('fullpath',False)", "title": "" }, { "docid": "36c9248145830f4f311ad3317a4b2f31", "score": "0.6645144", "text": "def download_file():\n file_path = mpd_command('currentsong').get('file')\n if file_path:\n return send_from_directory(config.MPD_ROOT, file_path, as_attachment=True)\n else:\n return \"\"", "title": "" }, { "docid": "db000ded2e62bc99ba91b55ce1259a2f", "score": "0.6627876", "text": "def getFilePath(self):\n return self.__filename", "title": "" }, { "docid": "7f69e95ea63ef556af6000a37e136c6a", "score": "0.66262424", "text": "def file_path(self):\n return self._file_path", "title": "" }, { "docid": "dc936f401785b0bb910f08101a5b24d4", "score": "0.6620132", "text": "def file_path(self):\n return self._path", "title": "" }, { "docid": "52ae58fb805e3d5ad19d88aa617a20b1", "score": "0.6613831", "text": "def get_path(self):\n parts = self.filename.split('/')[0:-1]\n return ('/'.join(parts)) if parts else None", "title": "" }, { "docid": "52ae58fb805e3d5ad19d88aa617a20b1", "score": "0.6613831", "text": "def get_path(self):\n parts = self.filename.split('/')[0:-1]\n return ('/'.join(parts)) if parts else None", "title": "" }, { "docid": "9dc3cf420e26e603ab5a553716ab2022", "score": "0.66136885", "text": "def GetFilename(self):", "title": "" }, { "docid": "62a2c1182e7ecd37528e673290b2f945", "score": "0.66107327", "text": "def file_path(self) -> pulumi.Output[Optional[str]]:\n return pulumi.get(self, \"file_path\")", "title": "" }, { "docid": "095a773acf2beca0bc6e8588dd950c18", "score": "0.66077846", "text": "def get_abs_host_path(self):\n return os.path.join(HOST_BASE, self.wf_name, self.task_name, self.src[1:])", "title": "" }, { "docid": "c546994e4ca9dd41edf6321e6b09a4bf", "score": "0.660742", "text": "def get_absolute_path(file_name, package_level=True):\n if package_level:\n # Inside `gmail_api_wrapper`\n dirname = os.path.dirname(__file__)\n else:\n # Outside `gmail_api_wrapper`\n dirname = os.path.join(os.path.dirname(__file__), os.pardir)\n\n file_path = os.path.abspath(os.path.join(dirname, file_name))\n return file_path", "title": "" }, { "docid": "e0f165479d318863015776da261251c6", "score": "0.66062856", "text": "async def _fetch_download_url(self):\n if not self.download_url:\n # v1 Waterbutler url provided\n path = urlparse(self.url).path\n if path.startswith('/v1/resources'):\n self.download_url = self.url\n self.metrics.add('download_url.orig_type', 'wb_v1')\n else:\n self.metrics.add('download_url.orig_type', 'osf')\n # make request to osf, don't follow, store waterbutler download url\n request = await self._make_request(\n 'GET',\n self.url,\n allow_redirects=False,\n headers={\n 'Content-Type': 'application/json'\n }\n )\n await request.release()\n\n logger.debug('osf-download-resolver: request.status::{}'.format(request.status))\n if request.status != 302:\n raise exceptions.MetadataError(\n request.reason,\n metadata_url=self.url,\n provider=self.NAME,\n code=request.status,\n )\n self.download_url = request.headers['location']\n\n self.metrics.add('download_url.derived_url', str(self.download_url))\n\n return self.download_url", "title": "" }, { "docid": "5311dcf2ca6af802c4a7f8f20a6687e6", "score": "0.65978575", "text": "def file_path(self):\n return self.__file_path", "title": "" }, { "docid": "74a6f89a212ab46b6070aa473b22ca55", "score": "0.65854627", "text": "def path_absolute(self):\n return os.path.abspath(self.path)", "title": "" }, { "docid": "95fa09de14c7e5a38f2f78930b671983", "score": "0.658137", "text": "def _download_file( self, url ):\n filename = os.path.join( tempfile.mkdtemp(), os.path.basename( urlparse( url ).path ) )\n f = urllib2.urlopen( url )\n with open( filename, \"wb\" ) as fp:\n shutil.copyfileobj( f, fp )\n return filename", "title": "" }, { "docid": "aca10ee50bc209268b062c319efe934e", "score": "0.6571092", "text": "def get_path(self, filename):\n assert os.path.dirname(filename) == \"\"\n return os.path.join(self.path, filename)", "title": "" }, { "docid": "91124fb432dcea6e7f1a8028cd4f2d72", "score": "0.6569107", "text": "def download_name(self):\n return os.path.split(self.name())[-1]", "title": "" }, { "docid": "e874d383dad853091a8d8b1cb253f7cd", "score": "0.65669274", "text": "def abs_file(path):\n return actual_path(os.path.abspath(os.path.realpath(path)))", "title": "" }, { "docid": "7c5f94197000ad9e4eef1c886c9f62f5", "score": "0.6562237", "text": "def __get_path(self, input_source):\n abs_file_path = \"\"\n if os.path.isabs(input_source):\n abs_file_path = input_source\n else:\n file_path = os.path.normpath(input_source)\n abs_file_path = os.path.join(settings.root_path, file_path)\n return abs_file_path", "title": "" }, { "docid": "ad6bb10e6b42576c456bf4b29c78ce52", "score": "0.6551599", "text": "def getAbsPath(self, relPath):\n\t\treturn os.path.join(self.resdir, relPath)", "title": "" }, { "docid": "7cd30fd3a997f51318c4abf668635af1", "score": "0.6549312", "text": "def get_file_path(self, userfiles_dir):\n fp = full_file_path(userfiles_dir, self.project_id, self.nest_id)\n return fp", "title": "" }, { "docid": "7cd6ceb559f5653c5eaf7468dbbe9991", "score": "0.65402955", "text": "def get_abs_path(file_name: str) -> str:\n return os.path.abspath(file_name)", "title": "" }, { "docid": "cf8b9451cf2f75bc28a6952fb4409514", "score": "0.65346164", "text": "def get_filename(self):\n\t\treturn self.path_from_content.split('/')[-1]", "title": "" }, { "docid": "e49c1fdb39455a4b5cc4508766811cab", "score": "0.6492036", "text": "def _get_file_path(self, blob: Blob) -> str:\n return os.path.join(self.base_path, blob.container.name, blob.name)", "title": "" }, { "docid": "831c7ba83bf78de26e5ec9c4be81bf62", "score": "0.6488308", "text": "def get_absfile(self):\n return File(os.path.abspath(self.filename))", "title": "" }, { "docid": "52ce74a09588800b199080e86b9f3d68", "score": "0.6487778", "text": "def _get_filename(self):\n \n return self._filename", "title": "" }, { "docid": "e7593800b8448e7d42a8c866c558434b", "score": "0.6485407", "text": "def path(self, filename):\n return os.path.join(self.dirpath, filename)", "title": "" }, { "docid": "bbd3d1109809359a9595829bc7eac66d", "score": "0.6479016", "text": "def getRelativeFilepath(self):\n return self.relativeFilepath", "title": "" }, { "docid": "3ef6bca641948a5eaf06e28a2fa86b9c", "score": "0.64603853", "text": "def GetDownloadPath():\n\tif os.name == 'nt':\n\t\timport winreg\n\t\tsub_key = r'SOFTWARE\\Microsoft\\Windows\\CurrentVersion\\Explorer\\Shell Folders'\n\t\tdownloads_guid = '{374DE290-123F-4565-9164-39C4925E467B}'\n\t\twith winreg.OpenKey(winreg.HKEY_CURRENT_USER, sub_key) as key:\n\t\t\tlocation = winreg.QueryValueEx(key, downloads_guid)[0]\n\t\treturn location\n\telse:\n\t\treturn os.path.join(os.path.expanduser('~'), 'downloads')", "title": "" }, { "docid": "6fbb1559a39d303fe95da25e12b687a1", "score": "0.64549124", "text": "def get_download_path():\n if os.name == 'nt':\n import winreg\n sub_key = r'SOFTWARE\\Microsoft\\Windows\\CurrentVersion\\Explorer\\Shell Folders'\n downloads_guid = '{374DE290-123F-4565-9164-39C4925E467B}'\n with winreg.OpenKey(winreg.HKEY_CURRENT_USER, sub_key) as key:\n location = winreg.QueryValueEx(key, downloads_guid)[0]\n return location\n else:\n return os.path.join(os.path.expanduser('~'), 'downloads')", "title": "" }, { "docid": "9e4609ce398224d0e638a67e122e4beb", "score": "0.6445347", "text": "def get_file_path(filename):\n return os.path.join(data_path, filename)", "title": "" }, { "docid": "83c5c599322ce3c43d0bfe670f5384c2", "score": "0.64427793", "text": "def _abspath(self, rel_file_path) -> str:\n if path.isabs(rel_file_path):\n raise NotRelativePathError(rel_file_path)\n return join(self.tmpd.name, rel_file_path)", "title": "" }, { "docid": "859ad0af146da13f55e5b2e73f5c1d45", "score": "0.6431836", "text": "def get_file(self, filename):\n\n # Note this is an HDFS path, not a userspace path. os.path library\n # may be wrong\n placed_path = \"/user/\" + config.user + \"/qa_data/\" + filename\n return placed_path", "title": "" }, { "docid": "028a001d6c86b2f503f8f90c70c8c0e1", "score": "0.6423114", "text": "def download(self, input_path: str) -> str:", "title": "" }, { "docid": "24246c6fa4ecb067ddeb267e116dac38", "score": "0.6412704", "text": "def get_file(self, file_link):\n file_path = f'media/{file_link.split(\"/\")[-1]}'\n\n if path.exists(file_path):\n return file_path\n\n # Get File\n file_stream = requests.get(file_link)\n\n open(file_path, 'wb').write(file_stream.content)\n\n print(f'File downloaded: {file_path}')\n\n return file_path", "title": "" }, { "docid": "7a96e53c3712080bb2250d16c365cd81", "score": "0.6408431", "text": "def abspath(self):\n return os.path.abspath(os.path.expanduser(self.filename))", "title": "" }, { "docid": "623f3053583c93b5fe21d66e33ff146b", "score": "0.6397981", "text": "def maybe_download(filename):\r\n if not tf.gfile.Exists(WORK_DIRECTORY):\r\n tf.gfile.MakeDirs(WORK_DIRECTORY)\r\n filepath = os.path.join(WORK_DIRECTORY, filename)\r\n if not tf.gfile.Exists(filepath):\r\n filepath, _ = urllib.request.urlretrieve(SOURCE_URL + filename, filepath)\r\n with tf.gfile.GFile(filepath) as f:\r\n size = f.size()\r\n print('Successfully downloaded', filename, size, 'bytes.')\r\n return filepath", "title": "" }, { "docid": "a02d9f6c9bbea48494049518ac01c326", "score": "0.6381349", "text": "def get_abs_path(self):\n return os.path.join(app.config['UPLOADS'], self.base_name)", "title": "" }, { "docid": "0a24fc294ec1889590de91fafb48aa03", "score": "0.6374135", "text": "def get_dep_abs_path(self, entry):\n req_file = self.raw.get('dependencies', {}).get(entry)\n\n if not req_file:\n return None\n\n if os.path.isabs(req_file):\n return req_file\n\n return os.path.join(self.reference_path, req_file)", "title": "" }, { "docid": "19444eb5ca9198edd4069eb52f8f83d8", "score": "0.63734305", "text": "def _cache_path(self):\n root = os.path.dirname(__file__)\n relative_path = \"%s/%s.mp3\" % (options.tmp, self.query)\n path = os.path.join(root, relative_path)\n return path", "title": "" }, { "docid": "4e45a081ef0356014a360d132f3733c4", "score": "0.6367195", "text": "def downloaded_file_name(self):\n return self.name + '.' + self.song.url.split(\".\")[-1]", "title": "" }, { "docid": "213970168135a06ba3cad18e708e4747", "score": "0.63526714", "text": "def download(self, input_path: str) -> str:\n return input_path", "title": "" }, { "docid": "e785cd9a2a42d4b805cdb3e3077873a5", "score": "0.63456154", "text": "def get_abs_path(path: str) -> str:\n if not os.path.exists(path):\n raise FileNotFoundError('`{}` doesn\\'t exist.'.format(path))\n return os.path.abspath(path)", "title": "" }, { "docid": "8cf7742ff276a36c118351586fdba0f3", "score": "0.6342336", "text": "def maybe_download(filename):\n if not os.path.exists(WORK_DIRECTORY):\n os.mkdir(WORK_DIRECTORY)\n filepath = os.path.join(WORK_DIRECTORY, filename)\n if not os.path.exists(filepath):\n filepath, _ = urlretrieve(SOURCE_URL + filename, filepath)\n statinfo = os.stat(filepath)\n print('Successfully downloaded', filename, statinfo.st_size, 'bytes.')\n else:\n print('Already downloaded', filename)\n return filepath", "title": "" }, { "docid": "eabded02b24573f9fdd13b668685566c", "score": "0.63401634", "text": "def file_path(self):\n \n self.filePath = const.DATA_DIRECTORY + self.fileName.split(\"\\\\\")[-1]\n return self.filePath", "title": "" }, { "docid": "d529f44342b51ce2b3707e7c67c717ce", "score": "0.6337927", "text": "def get_file_url(self):\r\n if self.html is None:\r\n self.download_html()\r\n\r\n file_url = unescape(re.search(r'hashlink=(http.*?)\"', self.html).group(1))\r\n\r\n return file_url", "title": "" }, { "docid": "68541352bbfe0f236d165ee477f3accc", "score": "0.63324624", "text": "def to_relative_path(self, url: str) -> str:", "title": "" }, { "docid": "58303f215527f64a8da5b4da09da1498", "score": "0.6323593", "text": "def getpath():\n return path", "title": "" }, { "docid": "31eac3924b4d347f480cbed80c415c59", "score": "0.63224804", "text": "def maybe_download(filename, work_directory):\n if not os.path.exists(work_directory):\n os.mkdir(work_directory)\n filepath = os.path.join(work_directory, filename)\n if not os.path.exists(filepath):\n filepath, _ = urllib.request.urlretrieve(SOURCE_URL + filename, filepath)\n statinfo = os.stat(filepath)\n print('Successfully downloaded', filename, statinfo.st_size, 'bytes.')\n return filepath", "title": "" }, { "docid": "2499cbb5ad28c1770d5fa138769bd8e5", "score": "0.6319714", "text": "def get_file(self, path):\n return path", "title": "" }, { "docid": "3d3b915ca2bbe84b13f116c9a1a5e52c", "score": "0.63174057", "text": "def get_path(self):\n return self._asset.get_path()", "title": "" }, { "docid": "49e0bd7a8b8af635b90c151c782f1e90", "score": "0.63151944", "text": "def get_path():\n return os.path.join(get_parent_path(), \"raw\")", "title": "" }, { "docid": "7417db64a92fc4e3eaef353508f59907", "score": "0.63128954", "text": "def Download(self):\n if not self._source_filename:\n if not self.project_version:\n return\n\n self._source_filename = self._download_helper.Download(\n self.project_name, self.project_version)\n\n return self._source_filename", "title": "" }, { "docid": "9806cbaa7e4b9d022e936e3d895ed355", "score": "0.63103473", "text": "def fetch_file(osf_id, path, filename):\n url = 'https://osf.io/{}/download'.format(osf_id)\n full_path = os.path.join(path, filename)\n if not os.path.isfile(full_path):\n urlretrieve(url, full_path)\n return full_path", "title": "" }, { "docid": "31eac7a4908603fb15ecf4372e378d5d", "score": "0.6309929", "text": "def get_path_from_file(self, file):\n return file.get(self.FILE_HREF)", "title": "" }, { "docid": "9a4f6cb83100089b1bc4af235b72d625", "score": "0.63044", "text": "def absolute_path(filename):\n return os.path.dirname(os.path.realpath(os.path.abspath(filename)))", "title": "" }, { "docid": "b59c28a6b160e0f830e36235c52340b9", "score": "0.6302899", "text": "def _isDownload(file_name, work_dir):\n if not os.path.exists(work_dir):\n os.mkdir(work_dir)\n file_path = os.path.join(work_dir, file_name)\n if not os.path.exists(file_path):\n # download file\n file_path, _ = urllib.request.urlretrieve(\n SOURCE_URL + file_name, file_path)\n state_info = os.stat(file_path)\n print('Successfully downloaded!!', file_name,\n state_info.st_size, 'bytes.')\n return file_path", "title": "" }, { "docid": "36fa9869320a6d2ffe02e5e425b1ea6b", "score": "0.63019884", "text": "def get_absolute_path_of_file(filename):\n relative_path = os.path.join(\"tests\", \"functional\", \"files\", filename)\n absolute_path = os.path.abspath(relative_path)\n with assertion_msg(\n \"Could not find '%s' in ./tests/functional/files. Please check the \"\n \"filename!\", filename):\n assert os.path.exists(absolute_path)\n\n return absolute_path", "title": "" }, { "docid": "1da12a6fbf06890ac905c4adb8223128", "score": "0.63010186", "text": "def _get_absolute_path(path):\n base_path = \".\"\n absolute_path = os.path.join(base_path, path)\n return absolute_path", "title": "" }, { "docid": "fd7114dfe3067e6653ba4d2e2c0fd77b", "score": "0.6296426", "text": "def filename(self):\n return os.path.join(self.base_dir, self._filename)", "title": "" }, { "docid": "fd7114dfe3067e6653ba4d2e2c0fd77b", "score": "0.6296426", "text": "def filename(self):\n return os.path.join(self.base_dir, self._filename)", "title": "" }, { "docid": "47a68deeffc511b37653c66a96cb2f77", "score": "0.6295602", "text": "async def get_url(self):\n file = await self.get_file()\n return self.bot.get_file_url(file.file_path)", "title": "" }, { "docid": "5db276a27a8b38f5d8e53da89bdd8e17", "score": "0.62930316", "text": "def generate_filelink(self):\n\t\treturn '_' + self.path_from_content", "title": "" }, { "docid": "504dbcbc5870ac5bb4c0590e9c5cf1c7", "score": "0.62882185", "text": "def __getDataPath(self, entity, version=1):\n try:\n eid = entity['id']\n e = self.get(eid, version=version, downloadFile=True)\n except:\n e = self.get(entity, version=version, downloadFile=True)\n return e['path']", "title": "" }, { "docid": "fc3748281d5f35616b3c1dc23b1a5d42", "score": "0.627442", "text": "def get_output_filepath(self):\n if not self.check_format():\n return None\n return os.path.join(self.wiki.public_dir(),\n self.wiki.date, self.FILENAME +\n \".\" + self.fileformat)", "title": "" }, { "docid": "07fc0667e816cd4523ec713bae3a8ead", "score": "0.6273814", "text": "def _get_path(env_path, parent_realm, parent_id, filename):\n path = os.path.join(env_path, 'files', 'attachments',\n parent_realm)\n hash = hashlib.sha1(parent_id.encode('utf-8')).hexdigest()\n path = os.path.join(path, hash[0:3], hash)\n if filename:\n path = os.path.join(path, _get_hashed_filename(filename))\n return os.path.normpath(path)", "title": "" }, { "docid": "4e3a0fe9b943d4bfef147eff82ad31a4", "score": "0.62732077", "text": "def file_path(self, url):\r\n # Remove ``media_url``.\r\n relative_url = url[len(self.media_url[2]):]\r\n return urllib.url2pathname(relative_url)", "title": "" }, { "docid": "8748e42a82ddf43edde0f3cc98c83e8c", "score": "0.6269781", "text": "def _get_datafile_path(self) -> str:\n return self._get_sub_path(self.DATAFILE_PATH)", "title": "" } ]
bc6a04eba5b3f41ca429d548419f5b6c
Changes the text of the label to the summary of this scenario.
[ { "docid": "acbf0f299549d2a525eb8b693beb5897", "score": "0.888844", "text": "def summary_scenario(self, label):\n label.setText(self.summary)", "title": "" } ]
[ { "docid": "eddd18e56ca8206d5c49cb5e6baf457a", "score": "0.6996269", "text": "def _update_label(label, new_text):\n label.setText(\"Score: {}\".format(new_text))", "title": "" }, { "docid": "17aacdff760629a5ac94ef60b2a5cf2b", "score": "0.658206", "text": "def summary(self, summary):\n\n self._summary = summary", "title": "" }, { "docid": "91b85442159e0d2254bada4ffa4d5439", "score": "0.65296173", "text": "def say_summary(self) -> None:\n pass", "title": "" }, { "docid": "7abf8e32c6b96a23f909cc055ddfee81", "score": "0.64863634", "text": "def set_label_text(self, text):\n self.statusLabel.setText(text)", "title": "" }, { "docid": "a08633f923ba30a9c57ae0cc642dbb09", "score": "0.63631606", "text": "def set_label(self, text):\n self._label.setText(text)", "title": "" }, { "docid": "f911eac088d96ffb979456674bb9f3f4", "score": "0.6304534", "text": "def label_text(self) -> str:\n return self._label.textItem.toPlainText()", "title": "" }, { "docid": "eee612d95f50d7b5e080ffd807adaab2", "score": "0.6276258", "text": "def label(self, label):\n\n self._label = label", "title": "" }, { "docid": "eee612d95f50d7b5e080ffd807adaab2", "score": "0.6276258", "text": "def label(self, label):\n\n self._label = label", "title": "" }, { "docid": "eee612d95f50d7b5e080ffd807adaab2", "score": "0.6276258", "text": "def label(self, label):\n\n self._label = label", "title": "" }, { "docid": "eee612d95f50d7b5e080ffd807adaab2", "score": "0.6276258", "text": "def label(self, label):\n\n self._label = label", "title": "" }, { "docid": "eee612d95f50d7b5e080ffd807adaab2", "score": "0.6276258", "text": "def label(self, label):\n\n self._label = label", "title": "" }, { "docid": "eee612d95f50d7b5e080ffd807adaab2", "score": "0.6276258", "text": "def label(self, label):\n\n self._label = label", "title": "" }, { "docid": "74f2eab3d63035343389b4f7b71334af", "score": "0.6249062", "text": "def label(self) -> str:\n ...", "title": "" }, { "docid": "c6540345d049dc3e06aabe4883d02e9c", "score": "0.6220558", "text": "def set_label(self, label):\r\n self._label.set_text(label)", "title": "" }, { "docid": "e8e6c5563f368547b939c0bd1eb61c8c", "score": "0.616387", "text": "def set_label(self, label):\n self.label = label", "title": "" }, { "docid": "cdcdfa8cb8ce42600d5a70f5068a1899", "score": "0.6115334", "text": "def edit_label_description(self):\n # the activities should be set by passing a `Settings` object which inherits from mad_gui.config.BaseSettings\n # and has an attribute `ACTIVITIES`, see our developer guidelines for more information\n new_description = NestedLabelSelectDialog(parent=self.parent.parent).get_label(self.descriptions)\n if not new_description:\n raise NoLabelSelected(\"Invalid description selected for label\")\n self.description = new_description", "title": "" }, { "docid": "e23036230b7dbacc9241e184f52b17db", "score": "0.61070395", "text": "def setText(self,text):\r\n\t\tself.Label.setText(text)", "title": "" }, { "docid": "7b73fc2cd207aabb044ae7dcfa3f5dbd", "score": "0.6101033", "text": "def label_text(self):\n return self._label_text", "title": "" }, { "docid": "31ad7d66f93738794b3f29b1b1d293ca", "score": "0.609899", "text": "def label(self, value):\n self.__label = value", "title": "" }, { "docid": "dd32f76e549e0125abc453e9dedcf960", "score": "0.6087003", "text": "def _update_label(self, label: str):\n for segment in self._segments:\n segment.label_text = label\n self._label_text = label\n self.emit_update(StateAction.UPDATE)", "title": "" }, { "docid": "c199b011466a4ec4bd99ead259687678", "score": "0.6062452", "text": "def updateStep(self):\n self.stepLabel['text'] = \"# Current step :\\n\" + str(self.count)", "title": "" }, { "docid": "ee7f623819c960fbc3bd68ecb28e0ff9", "score": "0.60578996", "text": "def update_summary_value(self):\n self.summary_value = self.var_value", "title": "" }, { "docid": "32e5cae7b42237d091b3096fff167877", "score": "0.6048544", "text": "def summary(self):\n status = (self.passed() and \"PASSED\") or type_labels[self._type].upper()\n out = \"{0}: multibag {1} {2}\".format(status, self.profile_version, \n self.label)\n if self.specification:\n out += \": {0}\".format(self.specification)\n return out", "title": "" }, { "docid": "00fb44efe5ff995640c03e1fdf05cdb8", "score": "0.60367936", "text": "def _updateText(self):\n score = self._incrementScore()\n self._score.text = 'Score: ' + str(score)", "title": "" }, { "docid": "1244bff74af67af58cdbc934b427d789", "score": "0.6024267", "text": "def set_label(self, text, label_label):\n self.labels[label_label].set_label(text)", "title": "" }, { "docid": "61db29d98129ddef7fb9bc1dcf195d2e", "score": "0.6023854", "text": "def set_label(self, text):\n self.entry.set_text(text)", "title": "" }, { "docid": "a9fd1e2cfe9f68bca51cf13051796dad", "score": "0.5994356", "text": "def summarize(self):\n if not self.summary_str:\n self.summary_str = self.get_summary_str()\n print(self.summary_str)", "title": "" }, { "docid": "a3a7c729bb0cd91ec3c4b266e0e8cc3c", "score": "0.597884", "text": "def confirmlabels(self):\n words = getpreviouslyanalyzed(Appa)\n self.label2.configure(text = \"Experiment number: \" + str(Appa.expnumber))\n self.label3.configure(text = \"Experiment type: \" + words)\n self.label4.configure(text = \"Run time (s): \" + str(Appa.exptime))", "title": "" }, { "docid": "0ea721a8c07b119ebc108782b1c4f6ef", "score": "0.5948307", "text": "def update_score(self, score):\n self.label.set_text(str(score))", "title": "" }, { "docid": "dc5c21ceb904dd1e5b546be5a70dd4ff", "score": "0.5943155", "text": "def label(self):", "title": "" }, { "docid": "c282c9d8c0b9e2680c6fa5b5889f3783", "score": "0.59329516", "text": "def change_label2(self,text):\n self.label2.config(text=text)", "title": "" }, { "docid": "a950f2880159761bbefe7fa9d97641d5", "score": "0.5927486", "text": "def get_label_desc(self, label):\n raise NotImplementedError()", "title": "" }, { "docid": "9bee57478b197fb3386292239586dc79", "score": "0.5920359", "text": "def set_label(self, label):\r\n self._label.set_text(label)\r\n # no need to call self._invalidate(). WidgetWrap takes care of\r\n # that when self.w changes\r", "title": "" }, { "docid": "1be54844cf773cd48638cc4417c44611", "score": "0.5917521", "text": "def change_label_in_frame(self, frame, string):\n label = frame.get_label_widget()\n label.set_markup(\"<big>\" + string + \" \" + \"Information\" + \"</big>\")", "title": "" }, { "docid": "22cd78d64f6052703d4890bc62222ee1", "score": "0.5909582", "text": "def label_text(self, value: str):\n self._label.setText(value[:10])\n self._update_label_pos()", "title": "" }, { "docid": "0167daffde927380678a8bdeae9650ad", "score": "0.5877094", "text": "def set_label(self, label):\n self.set_float_info('label', label)", "title": "" }, { "docid": "0f5aff00be9ddd4073ead4febacc868d", "score": "0.58316976", "text": "def _update_display(self, label, text):\n display = self.view.displays.get(label)\n display.set(text)", "title": "" }, { "docid": "8dedee6cde1f083613d5b248083ad76e", "score": "0.57915664", "text": "def print_summary(self):\n pass", "title": "" }, { "docid": "42927e161fd55cf23428dc03e991d5ff", "score": "0.5789725", "text": "def label(self, value):\r\n self.logger.warn(\"Setting values on label will NOT update the remote Canvas instance.\")\r\n self._label = value", "title": "" }, { "docid": "167f1c39381a30060500e82d0a3a7588", "score": "0.5767001", "text": "def printSummary(self):\n self.print_summary()", "title": "" }, { "docid": "b5632cbe53551f6d0ac490c563fa6d48", "score": "0.57519543", "text": "def text(name, data, step=None, description=None):\n summary_metadata = metadata.create_summary_metadata(\n display_name=None, description=description\n )\n # TODO(https://github.com/tensorflow/tensorboard/issues/2109): remove fallback\n summary_scope = (\n getattr(tf.summary.experimental, \"summary_scope\", None)\n or tf.summary.summary_scope\n )\n with summary_scope(name, \"text_summary\", values=[data, step]) as (tag, _):\n tf.debugging.assert_type(data, tf.string)\n return tf.summary.write(\n tag=tag, tensor=data, step=step, metadata=summary_metadata\n )", "title": "" }, { "docid": "e355fdba13b3000c4bf315247386140a", "score": "0.5744368", "text": "def summary(self) -> str:\n return pulumi.get(self, \"summary\")", "title": "" }, { "docid": "e355fdba13b3000c4bf315247386140a", "score": "0.5744368", "text": "def summary(self) -> str:\n return pulumi.get(self, \"summary\")", "title": "" }, { "docid": "e355fdba13b3000c4bf315247386140a", "score": "0.5744368", "text": "def summary(self) -> str:\n return pulumi.get(self, \"summary\")", "title": "" }, { "docid": "e355fdba13b3000c4bf315247386140a", "score": "0.5744368", "text": "def summary(self) -> str:\n return pulumi.get(self, \"summary\")", "title": "" }, { "docid": "77e31cf447e5349c4966aa8750b0ef81", "score": "0.573822", "text": "def describe(self) -> str:\n return f'{self.name} '.ljust(22, '.') + f' {self.summary}'", "title": "" }, { "docid": "4799d64439db4470b80e90b860c0878f", "score": "0.5728557", "text": "def show_summary(self):\n print(\"\\nFertig! %s%i%s richtige & %s%i%s falsche Antworten.\" \n % (Fore.GREEN, self.right_answers, Style.RESET_ALL, Style.BRIGHT + Fore.RED, self.wrong_answers, Style.RESET_ALL))", "title": "" }, { "docid": "fdfa163ded768bef6811410ea294309f", "score": "0.5727551", "text": "def print_text(self, name, **kwargs):\n if not kwargs.get(\"label_size\"):\n kwargs[\"label_size\"] = 50\n self.add_field(name, 'label', **kwargs)\n getattr(self, name).caption_label.set_align_mode(\"left\")\n self.refresh_body()", "title": "" }, { "docid": "c9c56db64d19b5163db4f074c5e6de2b", "score": "0.5723991", "text": "def summary(self):\n pass", "title": "" }, { "docid": "804eaefa798543284fa4267c91f489c5", "score": "0.5669767", "text": "def get_label_string(self):\n string=\"\"\n if self.label is not None:\n string=self.label +' '\n return string", "title": "" }, { "docid": "55e22959aedc563bc576bc6bd6217c0d", "score": "0.56582284", "text": "def _set_label(self, nid, text, col=0):\n expanded, node, obj = self._get_node_data(nid)\n nid.setText(col, node.get_label(obj))", "title": "" }, { "docid": "539b4bd7e2bb0cdb0540410fc70ee8db", "score": "0.564577", "text": "def setLabel(self, val='True', **kwargs):\n \n pass", "title": "" }, { "docid": "5404e1db172be45d408f78f8db2c2757", "score": "0.5638426", "text": "def summary(self):\n return \"%s: %s\" % (self.attribute.name, self.value_as_text)", "title": "" }, { "docid": "63e5d735e6a725bcc2812698a8fda7f4", "score": "0.56374896", "text": "def writeLabel(self, label):\n\n self.out.write('label ' + label.upper() + '\\n')", "title": "" }, { "docid": "83c12c19fdef88bace3cfbfca3882a0a", "score": "0.5628504", "text": "def label(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"label\")", "title": "" }, { "docid": "64ad7498e30044c540d996dd8d4180c1", "score": "0.5623719", "text": "def set_ylabel(self, label):\n self._ylabel = label", "title": "" }, { "docid": "a56ac54dbf9fab15ce581fedd2ae03d3", "score": "0.56146747", "text": "def summary_str(self): \n raise NotImplementedError", "title": "" }, { "docid": "397f7375c044e105bc6be3bf45c02d70", "score": "0.5611215", "text": "def summary(self):\n return u\"%s: %s\" % (self.attribute.name, self.value_as_text)", "title": "" }, { "docid": "41517c7e8c1cab7c5c1d4b28f6ad57b8", "score": "0.5607614", "text": "def set_label(self, label):\n self.label.set_label(label)\n self._recenter()", "title": "" }, { "docid": "0d11677354ba693450a7e49d0c708129", "score": "0.5582191", "text": "def DoStreamLabel(theStream):\n theStreamMeta.title = theTestTitle\n theStream.insert(0,theStreamMeta)", "title": "" }, { "docid": "4555d34d4d80ebcd651e42e3e667fcf8", "score": "0.55778956", "text": "def set_target_object(self, text):\n\n self.target_obj_lbl.setText(text)", "title": "" }, { "docid": "725f4489f32a1cbc928aa31d0461cf0f", "score": "0.55746317", "text": "def setName(self, name):\n\t\tself.label.setText(name)", "title": "" }, { "docid": "f137041e98784d1e13bf299b5cf5d217", "score": "0.5574526", "text": "def _layout_summary(self):\n self.data_summary = wx.TextCtrl(self, -1,\n style=wx.TE_MULTILINE | wx.HSCROLL,\n size=(-1, 200))\n summary = 'No data info available...'\n self.data_summary.SetValue(summary)\n #self.summary_sizer.Add(self.data_summary, 1, wx.EXPAND|wx.ALL, 10) ", "title": "" }, { "docid": "be161d967d93e0f022168d9385c52388", "score": "0.5574098", "text": "def setText(self, val='True', **kwargs):\n \n pass", "title": "" }, { "docid": "be161d967d93e0f022168d9385c52388", "score": "0.5574098", "text": "def setText(self, val='True', **kwargs):\n \n pass", "title": "" }, { "docid": "17bb9d37f2fad71250925cc3cc275801", "score": "0.55728567", "text": "def label(self):\n return self._lab", "title": "" }, { "docid": "a30f18f5494792e6a2224105188dca56", "score": "0.55725217", "text": "def summary(self):\n print(self.__repr__())", "title": "" }, { "docid": "f6e24122ecdc7fb7db0748f97f2a1726", "score": "0.55677867", "text": "def printSummary(self):", "title": "" }, { "docid": "529f818a21c971fe5ce80e134e283df1", "score": "0.55661154", "text": "def get_text(self, label_label):\n\n return self.labels[label_label].get_label()", "title": "" }, { "docid": "a430bf0d3f5e25837698a5ac0b33db02", "score": "0.55651003", "text": "def gettext(self):\n if Appa.exptype == \"0\" or Appa.exptype == \"3\":\n words = \"Ready\"\n elif Appa.exptype == \"1\" or Appa.exptype == \"2\":\n words = \"Prepare/Insert Stimulus\"\n elif Appa.exptype == \"4\":\n words = \"Prepare to cut worm\"\n self.label1.configure(text = words)", "title": "" }, { "docid": "ed35ea1e8525322c30a93f39df84870c", "score": "0.5562542", "text": "def printSummary(self):\n pass", "title": "" }, { "docid": "9542ffe5cee03c86a8eb41762caea7d9", "score": "0.556055", "text": "def summary(self):\r\n raise NotImplementedError(\r\n \"This method is not implemented for your current model\"\r\n )", "title": "" }, { "docid": "c002d29bf4088f6d335da1fed73c4b1e", "score": "0.55600166", "text": "def setupLabel(self):\n self.label_item = QtWidgets.QGraphicsTextItem(self)\n self.label_item.setPlainText(self.label)\n self.label_item.setDefaultTextColor(QtCore.Qt.white)\n self.label_item.setFont(self.labelFont)\n self.label_item.setPos(-variables.NODE_SIZE, -variables.NODE_SIZE - 2)\n self.label_item.setTextWidth(variables.NODE_SIZE)", "title": "" }, { "docid": "6276381ba08e083d077bfb565f336829", "score": "0.5546583", "text": "def textLabels(obj, img):\n obj.label.setText(\"Title: \"+img.title)\n obj.label_2.setText(\"File Path: \"+img.f)\n obj.label_3.setText(\"Pixel Shift: (\"+str(img.x_shift)+\",\"+str(img.y_shift)+\")\")\n obj.label_5.setText(\"Intensity from ROI: \"+str(img.intensity))", "title": "" }, { "docid": "732695cf2a9c6c11d71e7bf6a12a19ff", "score": "0.55446786", "text": "def _hover_text(self, data):\n\n mortality_rate = \"{:.1f}%\".format(data.mortality_rate)\n label = f\"Country: {data.country}<br>\" \\\n f\"Population: {data.population}<br>\" \\\n f\"Deaths: {data.deaths}<br>\" \\\n f\"Mortality rate: {mortality_rate}\"\n return label", "title": "" }, { "docid": "fc4ff9737dafa493e82054465b11c820", "score": "0.55432004", "text": "def get_summary(self):\n return self.summary", "title": "" }, { "docid": "36b9fde5a97e3da70d61a0c2e2731180", "score": "0.55333894", "text": "def get_long_label(self):\n if self.description:\n return '{} - {}'.format(self.label, self.description)\n else:\n return self.label", "title": "" }, { "docid": "e48bb24ad480812b68c9bc12d715f298", "score": "0.55265856", "text": "def setTitle(self):\n tmpstr = \"Russian Pronoun Declension For \" + self.rustr.capitalize()\n self.titleLbl.setText(tmpstr)", "title": "" }, { "docid": "d2d0b460d93578fd92b05c14493b39eb", "score": "0.55213743", "text": "def write_label(self, label):\n self.file.write(\"({}:{})\\n\".format(self.current_file.upper(), label.upper()))", "title": "" }, { "docid": "d9874627cdf16a76a64686431396068e", "score": "0.5521248", "text": "def newRepLabel(self, val='True', **kwargs):\n \n pass", "title": "" }, { "docid": "d8172242f133742e1968bc37014115dc", "score": "0.5514035", "text": "def label(self) -> str:\n return pulumi.get(self, \"label\")", "title": "" }, { "docid": "6204307027aecd5b98c8e0e594f0fcf6", "score": "0.55120647", "text": "def description_text(self, P=None):\n if not P:\n P = self.parameters.values_to_dict()\n\n text = (\n 'Please replace this with a short summary of the '\n 'seamm_default_atomtyping step, including key parameters.'\n )\n\n return self.header + '\\n' + __(text, **P, indent=4 * ' ').__str__()", "title": "" }, { "docid": "ec345ab86aed3209b529fb2923c00091", "score": "0.54936767", "text": "def get_label(self):\r\n return self._label.text", "title": "" }, { "docid": "ec345ab86aed3209b529fb2923c00091", "score": "0.54936767", "text": "def get_label(self):\r\n return self._label.text", "title": "" }, { "docid": "216b953aa7be4212d5418ec0cf441edf", "score": "0.54910576", "text": "def get_summary(self, instance) -> str: # noqa\n return instance.summary.html", "title": "" }, { "docid": "7dae219055ead13bc8dff969681b7d1a", "score": "0.5490332", "text": "def getLabel(self, **kwargs):\n \n pass", "title": "" }, { "docid": "95a3c68be2f0b6dadfa10067efa4e910", "score": "0.54890233", "text": "def short_summary(self):\n if not self._short_summary:\n self._generate_short_summary()\n return self._short_summary", "title": "" }, { "docid": "4d65e02b202a6926bebb34e784f58f76", "score": "0.5484604", "text": "def get_label():", "title": "" }, { "docid": "94e6e0786ff5db30b7d2ab846dd50533", "score": "0.5482848", "text": "def updateText(self):\n self.setText(0, self.circuit.name + '.' + self.channel)", "title": "" }, { "docid": "7aec3ad0606e940df00b6c22ddb3d6d7", "score": "0.54812217", "text": "def updateStatusBar(self, text):\n self.statusLabel.setText(text)", "title": "" }, { "docid": "d667548f73364fda4d8f527be881ad57", "score": "0.5469321", "text": "def set_description(self, text):\n self.desc = text", "title": "" }, { "docid": "8a270c292663bec5f5a6c4e678bdd0c0", "score": "0.5467339", "text": "def label(self, max_lenght: int = 100) -> str:\n first_sentence = sent_tokenize(self.page.summary)[0]\n regex = re.compile('^.*(is a |is an|was a |was an |was the |is the )(?P<summary>.*).$')\n if match := regex.match(first_sentence):\n label = match.groupdict()['summary']\n if len(label) > max_lenght:\n label = label[:max_lenght] + '...'\n return label\n\n return ''", "title": "" }, { "docid": "4106a997f315b811480dc32250097cc7", "score": "0.5457391", "text": "def set_description(self, description):\n self.widget.setWhatsThis(description)", "title": "" }, { "docid": "6cfc99635c835fb8ca1e80aa3254020c", "score": "0.54556215", "text": "def set_text(self, name):\n self.progressbar.set_text(\"%s\" % (name,))", "title": "" }, { "docid": "8b9f7853b14feb2d79864891fb5fdde2", "score": "0.5452426", "text": "def summary (self) :\n self.model.summary()", "title": "" }, { "docid": "92fac2550d247ad87b95a37b139d51e5", "score": "0.5447511", "text": "def __updateQtSample(self):\n self.qtSampleLabel.setText(\n self.tr(\"Sample: {0}designer{1}\").format(\n self.qtPrefixEdit.text(), self.qtPostfixEdit.text()))", "title": "" }, { "docid": "db5eb6061840951b79f30ae1163c0d97", "score": "0.54403293", "text": "def set_label(self, key):\n self.label = '%s_(%s)' % key", "title": "" }, { "docid": "780aab4abf9e4f56d39cb516caf70f3e", "score": "0.54315865", "text": "def fmt_label(self):\n\n return \" \".join(self.label)", "title": "" }, { "docid": "98b63ab9ea3f46c6b15b1d2a39aa2abc", "score": "0.54162985", "text": "def summary(self):\n return self.__summary", "title": "" }, { "docid": "733861a17e19732d3108f2088fbf2c7b", "score": "0.5410679", "text": "def setLabel(self, label):\n Placemark_setLabel(self._obj, label)\n return", "title": "" } ]
b4b1a94bf941996732d66220a294dc7a
Sets the email of this Consultation.
[ { "docid": "edf73bdd43acf1b4fa8bf69a59e0f338", "score": "0.77411693", "text": "def email(self, email):\n\n self._email = email", "title": "" } ]
[ { "docid": "3edda176cb95d00051c3d2ccb6af96ba", "score": "0.782061", "text": "def email(self, email):\n self._email = email", "title": "" }, { "docid": "3edda176cb95d00051c3d2ccb6af96ba", "score": "0.782061", "text": "def email(self, email):\n self._email = email", "title": "" }, { "docid": "4fbb724a68d439149915f820b15ddcab", "score": "0.7802871", "text": "def setEmail(self, email: str):\n self.__email = email\n\n return self", "title": "" }, { "docid": "3d0114d69b888b7c6e82a56a6233883a", "score": "0.7774265", "text": "def email(self, email):\n if self.local_vars_configuration.client_side_validation and email is None: # noqa: E501\n raise ValueError(\"Invalid value for `email`, must not be `None`\") # noqa: E501\n\n self._email = email", "title": "" }, { "docid": "c854578d85e6f7fbbfd61b05bed905ac", "score": "0.7697306", "text": "def email(self, email: str):\n\n self._email = email", "title": "" }, { "docid": "9cdfccb42c22ca249ab0cbab096d4e45", "score": "0.7695182", "text": "def email(self, val: str):\n self._email_address = val", "title": "" }, { "docid": "213c3dbbbcd5301cac74bcb4f344a555", "score": "0.7453148", "text": "def set_email(self, email):\n try:\n assert isinstance(email, dict)\n self.email = email\n Logger.info(TradingEnvironment.set_email, \"Email report address set to: %s\" % (str([email[key] for key in email if key == 'to'])))\n except Exception as e:\n Logger.error(TradingEnvironment.set_email, self.parse_error(e))", "title": "" }, { "docid": "9204e067476fd0977103b1160c82d18b", "score": "0.74392074", "text": "def email(self, email: \"str\"):\n self._attrs[\"email\"] = email", "title": "" }, { "docid": "85ab64c36c44157ab6a7231ff2fb3690", "score": "0.74053985", "text": "def e_mail(self, e_mail):\n\n self._e_mail = e_mail", "title": "" }, { "docid": "24e5178227ba1e3a8262118a82807b3d", "score": "0.7368447", "text": "def email_address(self, val: str):\n self._email_address = val", "title": "" }, { "docid": "465de8880b07826e3635293f916d082b", "score": "0.73491555", "text": "def email(self, email: datetime):\n if email is None:\n raise ValueError(\"Invalid value for `email`, must not be `None`\") # noqa: E501\n\n self._email = email", "title": "" }, { "docid": "5829857dcdf8f8d4f2d87e9ad1fac29d", "score": "0.7299599", "text": "def contact_email(self, contact_email):\n \n self._contact_email = contact_email", "title": "" }, { "docid": "461be981ae20810801c253b1ee7e4e19", "score": "0.7265379", "text": "def contact_email(self, contact_email):\n\n self._contact_email = contact_email", "title": "" }, { "docid": "461be981ae20810801c253b1ee7e4e19", "score": "0.7265379", "text": "def contact_email(self, contact_email):\n\n self._contact_email = contact_email", "title": "" }, { "docid": "22b3a39f11bc78f23d80d3b8d4a7d2e7", "score": "0.72444177", "text": "def email(self, email):\n if email is None:\n raise ValueError(\n \"Invalid value for `email`, must not be `None`\"\n ) # noqa: E501\n\n self._email = email", "title": "" }, { "docid": "d42a8b27b3d8116242da2a8aa81d7b50", "score": "0.7239511", "text": "def email(self, email):\n if email is None:\n raise ValueError(\"Invalid value for `email`, must not be `None`\") # noqa: E501\n\n self._email = email", "title": "" }, { "docid": "d42a8b27b3d8116242da2a8aa81d7b50", "score": "0.7239511", "text": "def email(self, email):\n if email is None:\n raise ValueError(\"Invalid value for `email`, must not be `None`\") # noqa: E501\n\n self._email = email", "title": "" }, { "docid": "78d9e10da9439a9afc9ccb440858b13e", "score": "0.7137218", "text": "def change_email(self, new_email: str) -> None:\n\n self.email = new_email", "title": "" }, { "docid": "343605401d1ee7b31e2c874892488a1f", "score": "0.70963347", "text": "def user_email(self, user_email):\n\n self._user_email = user_email", "title": "" }, { "docid": "58c409a33eae58d0ef6870fa23b9cff0", "score": "0.7043129", "text": "def email(self, email):\n if email is not None and len(email) > 100:\n raise ValueError(\"Invalid value for `email`, length must be less than or equal to `100`\") # noqa: E501\n\n self._email = email", "title": "" }, { "docid": "c0815362dcd01b76ab434bf45406fcf6", "score": "0.7013196", "text": "def facility_email(self, facility_email):\n\n self._facility_email = facility_email", "title": "" }, { "docid": "a7d6ad00e691d957e6d200b722b55cb3", "score": "0.6901243", "text": "def email(self, email: str):\n if email is not None and not re.search('^.*@.*\\\\..*$', email): # noqa: E501\n raise ValueError(\"Invalid value for `email`, must be a follow pattern or equal to `/^.*@.*\\\\..*$/`\") # noqa: E501\n\n self._email = email", "title": "" }, { "docid": "994c464a8c095623b86ed0c78d1fb71b", "score": "0.678678", "text": "def email(self, value):\n self.logger.warn(\"Setting values on email will NOT update the remote Canvas instance.\")\n self._email = value", "title": "" }, { "docid": "db2fdd7843165e0d57d72a02b1365264", "score": "0.67644966", "text": "def email_address(self, email_address):\n\n self._email_address = email_address", "title": "" }, { "docid": "db2fdd7843165e0d57d72a02b1365264", "score": "0.67644966", "text": "def email_address(self, email_address):\n\n self._email_address = email_address", "title": "" }, { "docid": "db2fdd7843165e0d57d72a02b1365264", "score": "0.67644966", "text": "def email_address(self, email_address):\n\n self._email_address = email_address", "title": "" }, { "docid": "db2fdd7843165e0d57d72a02b1365264", "score": "0.67644966", "text": "def email_address(self, email_address):\n\n self._email_address = email_address", "title": "" }, { "docid": "559cf600f1c6707a1a80a42861bb2509", "score": "0.6757134", "text": "def sender_email(self, sender_email):\n\n self._sender_email = sender_email", "title": "" }, { "docid": "23b7da67f43ecc4008c9a72397a1e8b4", "score": "0.67398876", "text": "def email(self, email):\n if email is not None and len(email) > 256:\n raise ValueError(\n \"Invalid value for `email`, length must be less than or equal to `256`\"\n ) # noqa: E501\n if email is not None and len(email) < 3:\n raise ValueError(\n \"Invalid value for `email`, length must be greater than or equal to `3`\"\n ) # noqa: E501\n\n self._email = email", "title": "" }, { "docid": "edc2c356a185532e9b046ada7d847944", "score": "0.67120546", "text": "def email(self, email):\n if email is None:\n raise ValueError(\"Invalid value for `email`, must not be `None`\") # noqa: E501\n if email is not None and len(email) > 255:\n raise ValueError(\"Invalid value for `email`, length must be less than or equal to `255`\") # noqa: E501\n if email is not None and len(email) < 3:\n raise ValueError(\"Invalid value for `email`, length must be greater than or equal to `3`\") # noqa: E501\n\n self._email = email", "title": "" }, { "docid": "4d3eb3a3c52e41d2928b9a2e1f787367", "score": "0.66880715", "text": "def email_name(self, email_name):\n\n self._email_name = email_name", "title": "" }, { "docid": "dbf0715aadeb67d742c15eabbb9921ce", "score": "0.6671049", "text": "def edit_email(self, new_email: str) -> None:\r\n self.email = new_email", "title": "" }, { "docid": "4e4e0869c53bdff0ff02c10c11a8c345", "score": "0.6653339", "text": "def set_paypal_email(self, email):\n\n # if the class was not created from a primary key\n # the withdraw_object will currently be None and\n # we will need to use the email passed into this method\n if self.withdraw_object is None:\n self.paypal_email = email", "title": "" }, { "docid": "eec655d06503303a4747927d043005fb", "score": "0.66394246", "text": "def setEmail(d, customField, w):\r\n\ttry:\r\n\t\temail = w.email\r\n\t\tif email: d[customField] = email\r\n\texcept:\r\n\t\tpass", "title": "" }, { "docid": "57bdb7f97a078393dd4a6d3944f66c7f", "score": "0.6603049", "text": "def set_email(self, email: str):\n try:\n email_field = self.browser.find_element_by_xpath(\n self.login_page_x_paths['email_field'])\n email_field.send_keys(email)\n except (\n ElementNotInteractableException,\n NoSuchElementException) as error:\n console_print('failed', '[Email input failed!]')\n console_print('failed', str(error))\n self.browser.quit()\n\n raise", "title": "" }, { "docid": "41a1062a2b7d61f247f84234b50e6f3d", "score": "0.65629584", "text": "def set_email(self, emailaddress):\n req_obj = self.rest_put(\"/account/email\", params={'fullname': emailaddress})\n if req_obj.success:\n return req_obj.data", "title": "" }, { "docid": "ecdb3a56d6be2dd8fd47e80cd70af606", "score": "0.65523106", "text": "def email(self) -> str:\n\n return self.__email", "title": "" }, { "docid": "74f6be351ba54084d36bcc44ee19a5fc", "score": "0.6511286", "text": "def email_attribute(self, email_attribute: \"str\"):\n self._attrs[\"email_attribute\"] = email_attribute", "title": "" }, { "docid": "364b876f4e804de629458f9523b7b3d5", "score": "0.65075624", "text": "def email_from(self, email_from):\n\n self._email_from = email_from", "title": "" }, { "docid": "d86ed2962334c64e7f5e94a522a2224c", "score": "0.65054184", "text": "def for_email(self):\n self._type = 'email'\n return self", "title": "" }, { "docid": "0c302f29b47a45386d89b64291d93902", "score": "0.65001214", "text": "def email_attribute(self, email_attribute: \"str\"):\n if email_attribute is None:\n raise ValueError(\"Invalid value for `email_attribute`, must not be `None`\")\n self._attrs[\"email_attribute\"] = email_attribute", "title": "" }, { "docid": "bf8486310579cbf24bdaaf97cd3e1315", "score": "0.6485711", "text": "def originator_email(self, originator_email):\n\n self._originator_email = originator_email", "title": "" }, { "docid": "78fda82af8834137bfdb0a566398b044", "score": "0.641644", "text": "def email(self) -> str:\n return self._email", "title": "" }, { "docid": "78fda82af8834137bfdb0a566398b044", "score": "0.641644", "text": "def email(self) -> str:\n return self._email", "title": "" }, { "docid": "7d66823fafcb9682775e9378fd68f890", "score": "0.63649917", "text": "def email_id(self, email_id):\n\n self._email_id = email_id", "title": "" }, { "docid": "b8c13cf5abd41ffc54f880cb7dda3771", "score": "0.63640386", "text": "def set_Email(self, value):\n super(CreateFileInputSet, self)._set_input('Email', value)", "title": "" }, { "docid": "4250153130b6d20c5ad3786ba0de3e42", "score": "0.636265", "text": "def email(self):\n return self.__email", "title": "" }, { "docid": "1c786f5cacf890b2338a92334fd71902", "score": "0.6327086", "text": "def email_address(self):\n return self.__email_address", "title": "" }, { "docid": "7f3b9fb25a17cc61b2afb0d827b0bb9f", "score": "0.63247615", "text": "def set_email(self, address, is_valid):\n address = str(address)\n is_valid = bool(is_valid)\n reason = 'user_verified' if is_valid else 'user_refuted'\n self._database.execute(self.EMAIL_SET_STATEMENT,\n (address, is_valid, reason,\n datetime.datetime.today()))\n self._database.commit()", "title": "" }, { "docid": "a4dbac30de26267cd184c4d3333f55db", "score": "0.62893206", "text": "def get_email(self) -> str:\n return self.email", "title": "" }, { "docid": "28070512fb8ca6a07b53e44be97379ec", "score": "0.628464", "text": "def email(self):\n return self._email", "title": "" }, { "docid": "28070512fb8ca6a07b53e44be97379ec", "score": "0.628464", "text": "def email(self):\n return self._email", "title": "" }, { "docid": "28070512fb8ca6a07b53e44be97379ec", "score": "0.628464", "text": "def email(self):\n return self._email", "title": "" }, { "docid": "feb034d49252ae89639bfa3c4f77e743", "score": "0.6244485", "text": "def set_EmailAddress(self, value):\n super(CreatePeopleInputSet, self)._set_input('EmailAddress', value)", "title": "" }, { "docid": "3fd74733e14058ff79ad557ae1118647", "score": "0.6232272", "text": "def email_address(self):\n return self._email_address", "title": "" }, { "docid": "3fd74733e14058ff79ad557ae1118647", "score": "0.6232272", "text": "def email_address(self):\n return self._email_address", "title": "" }, { "docid": "f0fd8aa6c2be3e52063d08c01445c4de", "score": "0.62280065", "text": "def get_email(self):\n return self.email", "title": "" }, { "docid": "141ecd42b6889b7af9d3f22da90e8223", "score": "0.62211365", "text": "def getEmail(self):\n return self.__email", "title": "" }, { "docid": "3065727a530f85a5c67d99f5cb8764ef", "score": "0.62084746", "text": "def email(self) -> str:\n return pulumi.get(self, \"email\")", "title": "" }, { "docid": "07fb781597fde02fe04481dc34fb675b", "score": "0.6204964", "text": "def owner_email(self, owner_email):\n\n self._owner_email = owner_email", "title": "" }, { "docid": "8019b92792eda765ee8e7e6fb8a2f4dc", "score": "0.6183347", "text": "def set_user_email(geniuser, new_email):\n assert_geniuser(geniuser)\n \n geniuser.email = new_email\n geniuser.save()", "title": "" }, { "docid": "fa6e69934ff5a3bc2501364dfb0b27a5", "score": "0.6175226", "text": "def email(self):\n return self.properties.get(\"email\", None)", "title": "" }, { "docid": "4fd676c87a9d4aaf190491ce2ea2cddb", "score": "0.6129801", "text": "def email(self):\n return str(self._email_address)", "title": "" }, { "docid": "263bac277894025590c8ac484572bf87", "score": "0.6118055", "text": "def email(cls, val):\n return cls('email', val)", "title": "" }, { "docid": "b4c3d252e9583ed0bb4060f5a5cf1984", "score": "0.6117994", "text": "def email(self):\n return '[email protected]'", "title": "" }, { "docid": "958b9444a4e0be60c45cc293a8afe066", "score": "0.6083542", "text": "def get_email(self):\n if self.user and self.user.email:\n return self.user.email\n elif self.email:\n return self.email", "title": "" }, { "docid": "d28e2faac2aa64022b9575292444c3f7", "score": "0.6077487", "text": "def email_address(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"email_address\")", "title": "" }, { "docid": "d28e2faac2aa64022b9575292444c3f7", "score": "0.6077487", "text": "def email_address(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"email_address\")", "title": "" }, { "docid": "aa3ac5d63cfe602edcee9399a9e20b5b", "score": "0.6063535", "text": "def emails(self, emails):\n\n self._emails = emails", "title": "" }, { "docid": "13b434741a4e0d09475022a882d2efa7", "score": "0.60589397", "text": "def email(self, value):\n value = str(value)\n if TeacherModel.query.filter_by(email=value).first():\n raise ValueError(RestErrors.EMAIL_TAKEN)\n return email_validator.validate_email(value).email", "title": "" }, { "docid": "40ee39accad0ba88d71cf58017dcf9e6", "score": "0.605881", "text": "def email(self) -> Optional[str]:\n return pulumi.get(self, \"email\")", "title": "" }, { "docid": "40ee39accad0ba88d71cf58017dcf9e6", "score": "0.605881", "text": "def email(self) -> Optional[str]:\n return pulumi.get(self, \"email\")", "title": "" }, { "docid": "ca6024e2dde5e023df66df6564559260", "score": "0.6051183", "text": "def email(self):\n return \"%s_%[email protected]\" % (self.fname, self.lname)", "title": "" }, { "docid": "ef8f716edf5063a6120f9ce1ded18a2e", "score": "0.60334444", "text": "def email(self) -> \"str\":\n return self._attrs.get(\"email\")", "title": "" }, { "docid": "b0a4421b0c743d45343415ab42080fd5", "score": "0.6002925", "text": "def member_change_email(cls, val):\n return cls('member_change_email', val)", "title": "" }, { "docid": "2e93c239ad4240c2a702469634d1d527", "score": "0.59694886", "text": "def contact_email(self):\n return self._contact_email", "title": "" }, { "docid": "2e93c239ad4240c2a702469634d1d527", "score": "0.59694886", "text": "def contact_email(self):\n return self._contact_email", "title": "" }, { "docid": "ccd7bd217ffe8122c244dfb26758d7b6", "score": "0.5954198", "text": "def email(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"email\")", "title": "" }, { "docid": "5678b723f78c77e41b831092f960780f", "score": "0.593549", "text": "def email(self):\n return self._data.get('email')", "title": "" }, { "docid": "0dd894fdba597338152062e1088fb285", "score": "0.5928166", "text": "def email_from_not(self, email_from_not):\n\n self._email_from_not = email_from_not", "title": "" }, { "docid": "4e29b6bfbe32d53ee465dc1d574b345b", "score": "0.59256184", "text": "def update_email(self, user):\n raise Exception", "title": "" }, { "docid": "f95ada1860fc166de559272259b75a2c", "score": "0.59246063", "text": "def email_from_in(self, email_from_in):\n\n self._email_from_in = email_from_in", "title": "" }, { "docid": "758b6ee40d174232b5660524b3ee30d2", "score": "0.5905225", "text": "def email(self):\n raise NotImplementedError", "title": "" }, { "docid": "b69aa87dd392f3945fd415648930008f", "score": "0.59019715", "text": "def email(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"email\")", "title": "" }, { "docid": "f8b46a7dbccd3ee6dfb000f85deb1759", "score": "0.5889574", "text": "def handle_setemail(bot, ievent):\n try:\n name, email = ievent.args\n except ValueError:\n ievent.missing('<name> <email>')\n return\n if not users.exist(name):\n ievent.reply(\"can't find user %s\" % name)\n return\n users.setemail(name, email)\n ievent.reply('email set')", "title": "" }, { "docid": "a732f20193c68627e7fc0bb2d14d59b9", "score": "0.5815188", "text": "def GetEmail(self):\n if config.CONFIG.Get(\"Email.enable_custom_email_address\") and self.email:\n return self.email\n\n return \"{}@{}\".format(self.username, config.CONFIG.Get(\"Logging.domain\"))", "title": "" }, { "docid": "d4b6718733a59751c89b210c35df140f", "score": "0.5804289", "text": "def EmailAddress(self):\n if self.force_auto_sync:\n self.get('EmailAddress')\n return self._EmailAddress", "title": "" }, { "docid": "bd7847f3e1ad59b1f7726ee14bbec7ee", "score": "0.5794261", "text": "def email(self) -> datetime:\n return self._email", "title": "" }, { "docid": "47e841ce17209c34f06992f3725e5ebd", "score": "0.57864916", "text": "def maybeSetUserEmail(self, r):\n\n defaultValue = \"ZERO_VALUE_FOR_EMAIL_OPTION\"\n reader = r.config_reader()\n if reader.get_value(\n \"user\",\n \"email\",\n default=defaultValue) == defaultValue:\n writer = r.config_writer()\n writer.set_value(\"user\", \"email\", \"[email protected]\")", "title": "" } ]
2222136541f6285ea02f40b676a113e6
Returns the model properties as a dict
[ { "docid": "12d68e7daf0075b9d7336e08d677b525", "score": "0.0", "text": "def to_dict(self):\n result = {}\n\n for attr, _ in six.iteritems(self.swagger_types):\n value = getattr(self, attr)\n if isinstance(value, list):\n result[attr] = list(map(\n lambda x: x.to_dict() if hasattr(x, \"to_dict\") else x,\n value\n ))\n elif hasattr(value, \"to_dict\"):\n result[attr] = value.to_dict()\n elif isinstance(value, dict):\n result[attr] = dict(map(\n lambda item: (item[0], item[1].to_dict())\n if hasattr(item[1], \"to_dict\") else item,\n value.items()\n ))\n else:\n result[attr] = value\n if issubclass(AgentRow, dict):\n for key, value in self.items():\n result[key] = value\n\n return result", "title": "" } ]
[ { "docid": "d5fac65c7140c97c1960355ec6927973", "score": "0.7789736", "text": "def get_properties(self):\n return {}", "title": "" }, { "docid": "d5fac65c7140c97c1960355ec6927973", "score": "0.7789736", "text": "def get_properties(self):\n return {}", "title": "" }, { "docid": "10bf24a4991ba5b8816fe4dbe4c0642d", "score": "0.7601239", "text": "def properties(self) -> Dict[str, str]:\n return self.__properties", "title": "" }, { "docid": "0a755687122f15cee36589427708ad2c", "score": "0.73468405", "text": "def serialize_properties(self):\n return {\n 'name': self.name,\n 'condition': self.get_condition_display(),\n 'notes': self.notes,\n 'date_created': self.date_created.isoformat(),\n 'date_updated': self.date_updated.isoformat()\n }", "title": "" }, { "docid": "24fe6c1157202c486cb9179f8d7b5663", "score": "0.7337706", "text": "def get_properties(self):\n return self.properties", "title": "" }, { "docid": "b21279b313bffe305801b53fee1d7b43", "score": "0.72341746", "text": "def properties(self):\n if 'properties' not in self.config_object:\n return {}\n return self.config_object['properties'].items()", "title": "" }, { "docid": "a8e6e6e6cb1eebcece54e99a7cdb6e4a", "score": "0.7226241", "text": "def get_properties(self):", "title": "" }, { "docid": "7c438684950151473ffee8007b2cee16", "score": "0.7212609", "text": "def getProperties(self):\r\n return self.properties", "title": "" }, { "docid": "e525682384891a97d5a138fc6cb91193", "score": "0.71607333", "text": "def properties(self):\n return dict((p, getattr(self, p)) for p in self._property_set)", "title": "" }, { "docid": "a2860b74c9a25928d574bc483c7184c0", "score": "0.7127593", "text": "def getProperties():\n return properties", "title": "" }, { "docid": "cd186f03d70810e486e8e85d7af81592", "score": "0.7127389", "text": "def get_model_data(self):\n return {}", "title": "" }, { "docid": "975b4762223a18688984165bdd0dd030", "score": "0.7090147", "text": "def properties(self):\n\n properties = {}\n for prop in self.__values.get(\"properties\", []):\n name = prop.get(\"type\")\n if name:\n value = prop.get(\"value\", \"\").strip()\n if value:\n if name not in properties:\n properties[name] = []\n properties[name].append(value)\n return properties", "title": "" }, { "docid": "cdc97903647474dbcee82967a07baa5d", "score": "0.70887214", "text": "def serialize(self):\n return self.properties", "title": "" }, { "docid": "762224912f5e2cb09b68432ad9a1d86a", "score": "0.70862514", "text": "def get_properties(self):\n return self._get()", "title": "" }, { "docid": "8b33699bb5f30a4616d6dddcaa20ec49", "score": "0.70738834", "text": "def json_model_dict(self):\r\n # type: () -> dict\r\n return self._json_model_dict", "title": "" }, { "docid": "61cdd02bd1d6840a8d59f165667eeddb", "score": "0.70263165", "text": "def get_properties(self):\n return PropertyDirty.objects.filter(load=self.load).values_list(\n \"data\", \"property\"\n )", "title": "" }, { "docid": "a2969525e9fa9c02b11a8b26f8e6ea7a", "score": "0.7016843", "text": "def __prop_values(self):\n return self.__properties.values()", "title": "" }, { "docid": "a2d1cca3b313c5d658411928e5240987", "score": "0.6977464", "text": "def to_dict(self):\n # first we get the names of all the columns on your model\n columns = [c.key for c in class_mapper(self.__class__).columns]\n # then we return their values in a dict\n return dict((c, getattr(self, c)) for c in columns)", "title": "" }, { "docid": "2216fdb8bc496ff6f0080922ea73edc7", "score": "0.6883493", "text": "def properties(self):\n return self._properties", "title": "" }, { "docid": "2fa2d7f64ff747c45d8969265149e148", "score": "0.68244517", "text": "def get_extra_properties(self):\n\n return {}", "title": "" }, { "docid": "f8213058194e8ac0c580f6f257f67b27", "score": "0.6820822", "text": "def to_properties(self) -> Dict:\n return dict(self._j_descriptor.toProperties())", "title": "" }, { "docid": "99ce051b0d289c13b06e3548146d879a", "score": "0.67838293", "text": "def to_api_repr(self) -> dict:\n return copy.deepcopy(self._properties)", "title": "" }, { "docid": "99ce051b0d289c13b06e3548146d879a", "score": "0.67838293", "text": "def to_api_repr(self) -> dict:\n return copy.deepcopy(self._properties)", "title": "" }, { "docid": "1487a24decc2798c856e8d0323cadafb", "score": "0.6780326", "text": "def properties(self) -> object:\n return self._properties", "title": "" }, { "docid": "7fa732bb615512d9193f3d1eb93e9cff", "score": "0.6764833", "text": "def properties(self) -> pulumi.Output[Any]:\n return pulumi.get(self, \"properties\")", "title": "" }, { "docid": "dae1def4c62a3770fca0513f37bcc62b", "score": "0.67603797", "text": "def getAllFieldProperties(self):", "title": "" }, { "docid": "608d7901e844248f1a9992cb8ec2d386", "score": "0.6735349", "text": "def serialize(self, model):\n return model.to_dict()", "title": "" }, { "docid": "8c84d889ad8d33a41dc36523c3a43fca", "score": "0.6719655", "text": "def properties(self):\r\n if self._properties is None:\r\n self._init()\r\n return self._properties", "title": "" }, { "docid": "e911bc294e5a3b81398490467ede2698", "score": "0.67070204", "text": "def properties(self):\r\n if self._properties is None:\r\n try:\r\n params = {'f': 'json'}\r\n r = self._con.get(self._url, params)\r\n self._properties = PropertyMap(r)\r\n except:\r\n self._properties = PropertyMap({})\r\n elif isinstance(self._properties, dict):\r\n self._properties = PropertyMap(self._properties)\r\n return self._properties", "title": "" }, { "docid": "80d34d2a0480e294873a1cf608c18ec8", "score": "0.6693867", "text": "def to_dict(self):\n _dict = {}\n for f in self._meta.fields:\n if f.name == 'name':\n _dict[f.name] = ''.join(f.value_from_object(self).split('_')[1:])\n else:\n _dict[f.name] = f.value_from_object(self)\n\n return _dict", "title": "" }, { "docid": "80d34d2a0480e294873a1cf608c18ec8", "score": "0.6693867", "text": "def to_dict(self):\n _dict = {}\n for f in self._meta.fields:\n if f.name == 'name':\n _dict[f.name] = ''.join(f.value_from_object(self).split('_')[1:])\n else:\n _dict[f.name] = f.value_from_object(self)\n\n return _dict", "title": "" }, { "docid": "148e0f4f66558771de899d80a02dfd06", "score": "0.6692707", "text": "def get(self, key):\n if auth.is_admin():\n properties = model.ExpenseType.get_private_properties()\n else:\n properties = model.ExpenseType.get_public_properties()\n return g.model_db.to_dict(include=properties)", "title": "" }, { "docid": "c5e07beb44f914ba4e2f3974bcb0d49e", "score": "0.66897124", "text": "def properties(self) -> List[str]:\n return self.__properties", "title": "" }, { "docid": "75086f87f9a6f5244c3196e26944940c", "score": "0.6670449", "text": "def __prop_keys(self):\n return self.__properties.keys()", "title": "" }, { "docid": "a02d9fdc426bd41d77736174a29fbd65", "score": "0.6652064", "text": "def get_attrs_for_model(obj):\n return list(obj.__class__._properties)", "title": "" }, { "docid": "845a7c6b98db4127b4dd38e7de22a3fa", "score": "0.66385704", "text": "def get_all(self):\n props = {}\n # __init__ ens asegura que les propietats existeixen\n for a in self.expected_properties:\n props[a] = getattr(self.properties, a)\n return props", "title": "" }, { "docid": "b01be3e1c5d4974958ad98b01b3e4acc", "score": "0.6619675", "text": "def properties(self) -> Optional[Mapping[str, str]]:\n return pulumi.get(self, \"properties\")", "title": "" }, { "docid": "b01be3e1c5d4974958ad98b01b3e4acc", "score": "0.6619675", "text": "def properties(self) -> Optional[Mapping[str, str]]:\n return pulumi.get(self, \"properties\")", "title": "" }, { "docid": "b01be3e1c5d4974958ad98b01b3e4acc", "score": "0.6619675", "text": "def properties(self) -> Optional[Mapping[str, str]]:\n return pulumi.get(self, \"properties\")", "title": "" }, { "docid": "b01be3e1c5d4974958ad98b01b3e4acc", "score": "0.6619675", "text": "def properties(self) -> Optional[Mapping[str, str]]:\n return pulumi.get(self, \"properties\")", "title": "" }, { "docid": "b01be3e1c5d4974958ad98b01b3e4acc", "score": "0.6619675", "text": "def properties(self) -> Optional[Mapping[str, str]]:\n return pulumi.get(self, \"properties\")", "title": "" }, { "docid": "b01be3e1c5d4974958ad98b01b3e4acc", "score": "0.6619675", "text": "def properties(self) -> Optional[Mapping[str, str]]:\n return pulumi.get(self, \"properties\")", "title": "" }, { "docid": "b01be3e1c5d4974958ad98b01b3e4acc", "score": "0.6619675", "text": "def properties(self) -> Optional[Mapping[str, str]]:\n return pulumi.get(self, \"properties\")", "title": "" }, { "docid": "b01be3e1c5d4974958ad98b01b3e4acc", "score": "0.6619675", "text": "def properties(self) -> Optional[Mapping[str, str]]:\n return pulumi.get(self, \"properties\")", "title": "" }, { "docid": "b01be3e1c5d4974958ad98b01b3e4acc", "score": "0.6619675", "text": "def properties(self) -> Optional[Mapping[str, str]]:\n return pulumi.get(self, \"properties\")", "title": "" }, { "docid": "b01be3e1c5d4974958ad98b01b3e4acc", "score": "0.6619675", "text": "def properties(self) -> Optional[Mapping[str, str]]:\n return pulumi.get(self, \"properties\")", "title": "" }, { "docid": "b01be3e1c5d4974958ad98b01b3e4acc", "score": "0.6619675", "text": "def properties(self) -> Optional[Mapping[str, str]]:\n return pulumi.get(self, \"properties\")", "title": "" }, { "docid": "b01be3e1c5d4974958ad98b01b3e4acc", "score": "0.6619675", "text": "def properties(self) -> Optional[Mapping[str, str]]:\n return pulumi.get(self, \"properties\")", "title": "" }, { "docid": "b01be3e1c5d4974958ad98b01b3e4acc", "score": "0.6619675", "text": "def properties(self) -> Optional[Mapping[str, str]]:\n return pulumi.get(self, \"properties\")", "title": "" }, { "docid": "b01be3e1c5d4974958ad98b01b3e4acc", "score": "0.6619675", "text": "def properties(self) -> Optional[Mapping[str, str]]:\n return pulumi.get(self, \"properties\")", "title": "" }, { "docid": "b01be3e1c5d4974958ad98b01b3e4acc", "score": "0.6619675", "text": "def properties(self) -> Optional[Mapping[str, str]]:\n return pulumi.get(self, \"properties\")", "title": "" }, { "docid": "b01be3e1c5d4974958ad98b01b3e4acc", "score": "0.6619675", "text": "def properties(self) -> Optional[Mapping[str, str]]:\n return pulumi.get(self, \"properties\")", "title": "" }, { "docid": "b01be3e1c5d4974958ad98b01b3e4acc", "score": "0.6619675", "text": "def properties(self) -> Optional[Mapping[str, str]]:\n return pulumi.get(self, \"properties\")", "title": "" }, { "docid": "b01be3e1c5d4974958ad98b01b3e4acc", "score": "0.6619675", "text": "def properties(self) -> Optional[Mapping[str, str]]:\n return pulumi.get(self, \"properties\")", "title": "" }, { "docid": "ea9f38f5e10ba8612b1f7fa6239d2660", "score": "0.66194534", "text": "def get_dict(self):\n return {\n \"id\": self.id,\n \"name\": self.name,\n \"alias\": self.alias,\n \"description\": self.description\n }", "title": "" }, { "docid": "478753b033b7d8f0899dc29ddd3f4f38", "score": "0.6597193", "text": "def serialize(self):\n return {\n 'name' : self.name,\n 'id' : self.id,\n 'description' : self.description,\n 'ageRating' : self.ageRating,\n 'price' : self.price,\n\n }", "title": "" }, { "docid": "26a388925c2f69693a485852006a03f8", "score": "0.65946865", "text": "def dict(self, *args, **kwargs) -> Dict[str, Any]:\n model_dict = super().dict(*args, **kwargs)\n private_attrs = {k: v for k, v in self.__dict__.items() if k.startswith(\"_\")}\n model_dict.update(private_attrs)\n return model_dict", "title": "" }, { "docid": "6b32dc4129e9871f2e2d257493a7d7e0", "score": "0.6590351", "text": "def get_dict(self):\n return {\n \"id\": self.id,\n \"name\": self.name,\n \"alias\": self.alias,\n \"description\": self.description,\n \"system\": self.system,\n \"users\": self.users,\n\n }", "title": "" }, { "docid": "ed407f3d10126f5e3d34bb4ea6b804f6", "score": "0.6586702", "text": "def as_dict(self):\n return self.__dict__", "title": "" }, { "docid": "ed407f3d10126f5e3d34bb4ea6b804f6", "score": "0.6586702", "text": "def as_dict(self):\n return self.__dict__", "title": "" }, { "docid": "ed407f3d10126f5e3d34bb4ea6b804f6", "score": "0.6586702", "text": "def as_dict(self):\n return self.__dict__", "title": "" }, { "docid": "ed407f3d10126f5e3d34bb4ea6b804f6", "score": "0.6586702", "text": "def as_dict(self):\n return self.__dict__", "title": "" }, { "docid": "ed407f3d10126f5e3d34bb4ea6b804f6", "score": "0.6586702", "text": "def as_dict(self):\n return self.__dict__", "title": "" }, { "docid": "ed407f3d10126f5e3d34bb4ea6b804f6", "score": "0.6586702", "text": "def as_dict(self):\n return self.__dict__", "title": "" }, { "docid": "aba17ae7bbafc90a4ec2dbb7652fba43", "score": "0.65835196", "text": "def to_dict(self) -> dict:\n\n return {\n \"type\": self.type,\n \"geometry\": self.geometry.to_dict(),\n \"properties\": self.properties.to_dict(),\n }", "title": "" }, { "docid": "e90df2bd33b104305534d63e74b23085", "score": "0.65811896", "text": "def dict(self):\n return self._obj_dict", "title": "" }, { "docid": "fb53e8ae7ca143eead5870bf7d036092", "score": "0.6575591", "text": "def to_dict(self, *args, **kwargs):\n return model_to_dict(self, *args, **kwargs)", "title": "" }, { "docid": "c95732231f71b9393273bff7bb541fb6", "score": "0.6571587", "text": "def get_as_dict(self):\n return self.__dict__", "title": "" }, { "docid": "eaecafa6ef975950a6c615b9c3f5426c", "score": "0.65676796", "text": "def get_as_dict(self)->dict:\n return self.parameters", "title": "" }, { "docid": "eaecafa6ef975950a6c615b9c3f5426c", "score": "0.65676796", "text": "def get_as_dict(self)->dict:\n return self.parameters", "title": "" }, { "docid": "5c3e01e659d6e6523a461e7febfb398a", "score": "0.656549", "text": "def to_dict(self):\n obj_dict = {\n 'class': str(self.__class__),\n 'name': self.name,\n 'trans_model': self.trans_model.to_dict(),\n 'vib_model': self.vib_model.to_dict(),\n 'rot_model': self.rot_model.to_dict(),\n 'elec_model': self.elec_model.to_dict(),\n 'nucl_model': self.nucl_model.to_dict(),\n 'smiles': self.smiles,\n 'notes': self.notes\n }\n\n if _is_iterable(self.misc_models):\n obj_dict['misc_models'] = \\\n [mix_model.to_dict() for mix_model in self.misc_models]\n else:\n obj_dict['misc_models'] = self.misc_models\n\n try:\n obj_dict['references'] = self.references.to_dict()\n except AttributeError:\n obj_dict['references'] = self.references\n\n return obj_dict", "title": "" }, { "docid": "5ac1c13ef464287697221450b11f40ae", "score": "0.65630645", "text": "def todict(self):\n return(self.__dict__)", "title": "" }, { "docid": "0fdd6adda167e573155d3a636f512ed2", "score": "0.65624565", "text": "def to_dict(self):\n\n attr_representation = [\n key for key in dir(self) if not key.startswith(\"__\") and key != \"to_dict\"\n ]\n\n return {key: getattr(self, key) for key in attr_representation}", "title": "" }, { "docid": "c0b088b0de422b4ad920d77f7dc30baf", "score": "0.65482557", "text": "def to_dict(self):\n data = {}\n for attr in dir(self):\n if not attr.startswith('__'):\n # Exclude built-in attributes of python object\n data[attr] = getattr(self, attr)\n\n return data", "title": "" }, { "docid": "ba53291f475480ab4533dbee4a256641", "score": "0.65473586", "text": "def as_dict(self):\n\n def _attr_as_dict(field):\n \"\"\"Return an attribute as a dict, handling nested objects.\"\"\"\n attr = getattr(self, field)\n if isinstance(attr, IronicObject):\n attr = attr.as_dict()\n return attr\n\n return dict((k, _attr_as_dict(k))\n for k in self.fields\n if self.obj_attr_is_set(k))", "title": "" }, { "docid": "40c78c14a3e55ba2c38afb66d2d75d13", "score": "0.65446883", "text": "def properties(cls):\n return cls._properties.copy()", "title": "" }, { "docid": "49e07b2a97ec45c7ed43a8a25652b75d", "score": "0.6542904", "text": "def to_dict(self):\n\n data = {\n 'property_name': self.name,\n }\n if isinstance(self.value, str):\n data['string_value'] = self.value\n\n if isinstance(self.value, int):\n data['integer_value'] = self.value\n\n if isinstance(self.value, float):\n data['float_value'] = self.value\n\n if isinstance(self.value, bool):\n data['bool_value'] = self.value\n\n return data", "title": "" }, { "docid": "499147d448ee6dcb82aadfcec0572983", "score": "0.6524415", "text": "def to_dict(self):\n return to_dict(self.__dict__)", "title": "" }, { "docid": "499147d448ee6dcb82aadfcec0572983", "score": "0.6524415", "text": "def to_dict(self):\n return to_dict(self.__dict__)", "title": "" }, { "docid": "080042ee08800477de4ad4f7d17fc104", "score": "0.6511579", "text": "def to_dict(self):\n\t\treturn dict(\n\t\t\t(field.name, self.serializable_value(field.name))\n\t\t\tfor field in self._meta.fields\n\t\t)", "title": "" }, { "docid": "d642e5ee7a8abf55b13f45a620d28f9d", "score": "0.65107393", "text": "def to_dict(self):\n return {\n 'id': self.id,\n 'name': self.name,\n 'values': self.values\n }", "title": "" }, { "docid": "3bf041b0ae01ad45ea6f683f8414e7ee", "score": "0.65099376", "text": "def to_dict(self):", "title": "" }, { "docid": "3bf041b0ae01ad45ea6f683f8414e7ee", "score": "0.65099376", "text": "def to_dict(self):", "title": "" }, { "docid": "4c0f0ed0d5974e29c4f0dfcff7b0f224", "score": "0.65088964", "text": "def json(self):\n return {c.name: getattr(self, c.name) for c in self.__table__.columns}", "title": "" }, { "docid": "929d8e26111a59101ad2c3bb37e08405", "score": "0.6507359", "text": "def to_dict(self):\n return self.__dict__", "title": "" }, { "docid": "929d8e26111a59101ad2c3bb37e08405", "score": "0.6507359", "text": "def to_dict(self):\n return self.__dict__", "title": "" }, { "docid": "929d8e26111a59101ad2c3bb37e08405", "score": "0.6507359", "text": "def to_dict(self):\n return self.__dict__", "title": "" }, { "docid": "929d8e26111a59101ad2c3bb37e08405", "score": "0.6507359", "text": "def to_dict(self):\n return self.__dict__", "title": "" }, { "docid": "f1e6e735649cc73c84ed13feb5fba20f", "score": "0.65034616", "text": "def to_properties(self):\n return {\n \"id\": self.listing_id,\n \"price\": self.price,\n \"street\": self.street_address,\n \"bedrooms\": self.bedrooms,\n \"bathrooms\": self.bathrooms,\n \"sq_ft\": self.square_feet\n }", "title": "" }, { "docid": "abab555a0a50f760eebbedf732f779c5", "score": "0.6498893", "text": "def to_dict(self):\n return {key: getattr(self, key) for key in ('domain', 'name', 'value', 'path')}", "title": "" }, { "docid": "e5a74d9d53ee503339db34ff1daeeed4", "score": "0.64945495", "text": "def to_dict(self):\n output = copy.deepcopy(self.__dict__)\n output[\"text_config\"] = self.text_config.to_dict()\n output[\"vision_config\"] = self.vision_config.to_dict()\n output[\"model_type\"] = self.__class__.model_type\n return output", "title": "" }, { "docid": "e2c4effd7d97b850aac9e5490e6f7082", "score": "0.649301", "text": "def getProperties(self):\n props = {}\n text = self.getWikiText().decode('utf-8')\n for key, val in PROPERTY_RE.findall(text):\n props[key] = val\n return props", "title": "" }, { "docid": "41a2b84ac7cd073cf6babfbaa7fd6330", "score": "0.6489129", "text": "def asDict(self):\n d = {}\n for a in self.orderedAttrs:\n d[ a ] = getattr(self, a)\n return d", "title": "" }, { "docid": "448a219f4183972d09766bb9bb7d8d66", "score": "0.6487361", "text": "def serialize(self):\n return {\n 'type' : self.type,\n 'id' : self.id,\n 'description' : self.description,\n 'user_id' : self.user_id\n }", "title": "" }, { "docid": "d3c1a66651535016f3372ebca692fb31", "score": "0.6487119", "text": "def get_fields(self):\n return self.get_mapping()['properties'].keys()", "title": "" }, { "docid": "683669a0ae25374b3f935ab424f7bde6", "score": "0.6487", "text": "def asdict(self):\n return self.__dict__", "title": "" }, { "docid": "613179338664c863e4f04b7113ef4578", "score": "0.64714825", "text": "def as_dict(self) -> dict:\n return vars(self)", "title": "" }, { "docid": "fe13a88233b5eb455bca00b6f9d75962", "score": "0.6467312", "text": "def properties(self):\n\n # Copy the custom properties\n properties = self.custom_properties.copy()\n\n # Add all mandatory properties. Make sure that these are updated if one are added to the constructor!\n properties['name'] = self.obj_name\n properties['obj_id'] = self.obj_id # we return id as well, but this should never ever be modified!\n properties['location'] = self.location\n properties['is_movable'] = self.is_movable\n properties['carried_by'] = self.carried_by\n properties['is_traversable'] = self.is_traversable\n properties['class_inheritance'] = self.class_inheritance\n properties['visualization'] = {\n \"size\": self.visualize_size,\n \"shape\": self.visualize_shape,\n \"colour\": self.visualize_colour,\n \"depth\": self.visualize_depth,\n \"opacity\": self.visualize_opacity, \n \"visualize_from_center\": self.visualize_from_center\n }\n\n return properties", "title": "" }, { "docid": "a650067c9eacc10d7ceff4ed45b68cc0", "score": "0.6464917", "text": "def serialize(self):\n return {\n 'id' : self.id,\n 'created' : self.created,\n 'creator' : self.creator,\n 'name' : self.name,\n 'desc' : self.desc,\n 'category_id' : self.category_id\n }", "title": "" }, { "docid": "bc9d1c97d4fc04677548bec4028024bc", "score": "0.64604414", "text": "def as_dict(self):\n pass", "title": "" }, { "docid": "44807f23190fff53370afede48b93d4f", "score": "0.64567477", "text": "def serialize(self):\n return {\n 'id': self.id,\n 'name': self.name,\n 'picture': self.picture,\n 'description': self.description,\n 'ingredients': self.ingredients,\n 'prep_time': self.prep_time,\n 'category_id': self.category_id,\n 'user_id': self.user_id,\n }", "title": "" } ]
e2fd56eff8036e5cd205722f5a691e1f
pillar['master']['file_roots'] is overwritten by the master in order to use the fileclient interface to read the pillar files. We should restore the actual file_roots when we send the pillar back to the minion.
[ { "docid": "1bd53991876d9a27761e854c675850d0", "score": "0.6599424", "text": "def test_issue_5449_report_actual_file_roots_in_pillar(\n salt_call_cli, pillar_tree, base_env_state_tree_root_dir\n):\n ret = salt_call_cli.run(\"pillar.data\")\n assert ret.returncode == 0\n assert ret.data\n file_roots = ret.data[\"master\"][\"file_roots\"][\"base\"]\n assert pathlib.Path(base_env_state_tree_root_dir).resolve() in [\n pathlib.Path(p).resolve() for p in file_roots\n ]", "title": "" } ]
[ { "docid": "ad44959dc2368d29845f68104be29acc", "score": "0.5513161", "text": "def changeRoot(self, root):\n self.path_holder.root = root\n for s in self.subfiles:\n s.path_holder.root = root", "title": "" }, { "docid": "fd02d487b6e859a684615b8454404306", "score": "0.5375802", "text": "def backup_files(self):\r\n for host in self._hosts:\r\n # local filesystem\r\n toml_bak = os.path.join(self._l_toml_bak, host)\r\n tmpl_bak = os.path.join(self._l_tmpl_bak, host)\r\n conf_bak = os.path.join(self._l_conf_bak, host)\r\n remove_folder(toml_bak)\r\n remove_folder(tmpl_bak)\r\n remove_folder(conf_bak)\r\n get_folder(toml_bak)\r\n get_folder(tmpl_bak)\r\n get_folder(conf_bak)\r\n # minio server\r\n toml_pre = '%s/' % os.path.join('toml', self._folder_pre, host)\r\n tmpl_pre = '%s/' % os.path.join('tmpl', self._folder_pre, host)\r\n conf_pre = '%s/' % os.path.join('conf', self._folder_pre, host)\r\n objs = self.minio.list_objects(\r\n bucket_name=self._minio_bucket, prefix=toml_pre, recursive=False)\r\n for x in objs:\r\n self.minio.remove_object(\r\n bucket_name=self._minio_bucket,\r\n object_name=x.object_name.encode('utf-8'))\r\n objs = self.minio.list_objects(\r\n bucket_name=self._minio_bucket, prefix=tmpl_pre, recursive=False)\r\n for x in objs:\r\n self.minio.remove_object(\r\n bucket_name=self._minio_bucket,\r\n object_name=x.object_name.encode('utf-8'))\r\n objs = self.minio.list_objects(\r\n bucket_name=self._minio_bucket, prefix=conf_pre, recursive=False)\r\n for x in objs:\r\n self.minio.remove_object(\r\n bucket_name=self._minio_bucket,\r\n object_name=x.object_name.encode('utf-8'))\r\n aapi = Ansible2API(hosts=[host], **self._ansible_kwargs)\r\n # 1. backup toml to minio server\r\n tomls = self.get_tomls(host=host)\r\n for x in tomls:\r\n state, state_sum, results = ansible_safe_run(\r\n aapi=aapi, module='fetch',\r\n args=dict(\r\n dest='%s/' % toml_bak,\r\n src=os.path.join(self._r_toml, x),\r\n flat='yes'))\r\n msg = 'Toml File Backup: %s' % state_sum\r\n app.logger.debug(logmsg(msg))\r\n msg = 'Toml File Backup: %s' % results\r\n app.logger.info(logmsg(msg))\r\n self.minio.fput_object(\r\n bucket_name=self._minio_bucket,\r\n object_name=os.path.join(toml_pre, x),\r\n file_path=os.path.join(toml_bak, x))\r\n # 2. backup tmpl to minio server\r\n tmpls = self.get_tmpls(host=host)\r\n for x in tmpls:\r\n state, state_sum, results = ansible_safe_run(\r\n aapi=aapi, module='fetch',\r\n args=dict(\r\n dest='%s/' % tmpl_bak,\r\n src=os.path.join(self._r_tmpl, self._folder_pre, x),\r\n flat='yes'))\r\n msg = 'Tmpl File Backup: %s' % state_sum\r\n app.logger.debug(logmsg(msg))\r\n msg = 'Tmpl File Backup: %s' % results\r\n app.logger.info(logmsg(msg))\r\n self.minio.fput_object(\r\n bucket_name=self._minio_bucket,\r\n object_name=os.path.join(tmpl_pre, x),\r\n file_path=os.path.join(tmpl_bak, x))\r\n # 3. backup conf to minio server\r\n # files should include (name, dir, mode, owner)\r\n for x in self._files:\r\n src = os.path.join(x['dir'], x['name'])\r\n file_name = '%s%s%s' % (\r\n '@@'.join([x['mode'], x['owner']['name'], x['owner']['group']]),\r\n self._broken_word_2,\r\n src.replace('/', self._broken_word_1))\r\n state, state_sum, results = ansible_safe_run(\r\n aapi=aapi, module='fetch',\r\n args=dict(\r\n dest=os.path.join(conf_bak, file_name),\r\n src=src, flat='yes'))\r\n msg = 'Conf File Backup: %s' % state_sum\r\n app.logger.debug(logmsg(msg))\r\n msg = 'Conf File Backup: %s' % results\r\n app.logger.info(logmsg(msg))\r\n file_path = os.path.join(conf_bak, file_name)\r\n if os.path.isfile(file_path):\r\n self.minio.fput_object(\r\n bucket_name=self._minio_bucket,\r\n object_name=os.path.join(conf_pre, file_name),\r\n file_path=file_path)\r\n # 4. check if toml/tmpl/conf have been backuped to minio server\r\n objs = [os.path.basename(x.object_name.encode('utf-8')) for x in\r\n self.minio.list_objects(\r\n bucket_name=self._minio_bucket, prefix=toml_pre,\r\n recursive=False)]\r\n for x in tomls:\r\n if x not in objs:\r\n raise Exception('Toml Backup Failed: %s.' % x)\r\n objs = [os.path.basename(x.object_name.encode('utf-8')) for x in\r\n self.minio.list_objects(\r\n bucket_name=self._minio_bucket, prefix=tmpl_pre,\r\n recursive=False)]\r\n for x in tmpls:\r\n if x not in objs:\r\n raise Exception('Tmpl Backup Failed: %s.' % x)", "title": "" }, { "docid": "62593542fbd2029336126d78b58cfe72", "score": "0.53749573", "text": "def load_master(self):\n ret=self._handle.MasterConfLoadFile(self.conf_path+\"/\"+self._mcf_file)\n return ret", "title": "" }, { "docid": "392007e234b8f5c9c5670b26a31d24db", "score": "0.5369088", "text": "def __update_root(self) -> None:\n # 5.2.1. Let N denote the version number of the trusted root metadata\n # file.\n prev_root = self.__root\n curr_root = prev_root\n n = curr_root.version\n\n # 5.2.8. Repeat steps 5.2.1 to 5.2.8.\n for _ in range(self.config.MAX_ROOT_ROTATIONS):\n # 5.2.2. Try downloading version N+1 of the root metadata file.\n n += 1\n remote_filename = self.__remote_metadata_filename(self.ROOT_ROLENAME, n)\n remote_path = self.__remote_metadata_path(remote_filename)\n try:\n tmp_file = self.download(\n remote_path, self.config.MAX_ROOT_LENGTH, self.config\n )\n except DownloadNotFoundError:\n break\n self.__check_length(tmp_file, self.config.MAX_ROOT_LENGTH)\n\n # 5.2.3. Check for an arbitrary software attack.\n metadata = self.read_from_file(tmp_file)\n metadata.signed = cast(Root, metadata.signed)\n self.__check_signatures(curr_root.root, metadata)\n self.__check_signatures(metadata.signed.root, metadata)\n\n # 5.2.4. Check for a rollback attack.\n if metadata.signed.version != n:\n raise RollbackAttack(\n f\"{metadata.signed.version} != {n} in {remote_path}\"\n )\n\n # 5.2.5. Note that the expiration of the new (intermediate) root\n # metadata file does not matter yet.\n\n # 5.2.6. Set the trusted root metadata file to the new root metadata\n # file.\n curr_root = metadata.signed\n\n # 5.2.9. Check for a freeze attack.\n self.__check_expiry(curr_root)\n\n if prev_root < curr_root:\n # 5.2.11. Set whether consistent snapshots are used as per the\n # trusted root metadata file.\n # NOTE: We violate the spec in checking this *before* deleting local\n # timestamp and/or snapshot metadata, which I think is reasonable.\n if not curr_root.consistent_snapshot:\n raise NoConsistentSnapshotsError\n\n # 5.2.10. If the timestamp and / or snapshot keys have been rotated,\n # then delete the trusted timestamp and snapshot metadata files.\n if (\n self.__root.timestamp != curr_root.timestamp\n or self.__root.snapshot != curr_root.snapshot\n ):\n filename = self.__local_metadata_filename(self.SNAPSHOT_ROLENAME)\n if self.file_exists(filename):\n self.rm_file(filename)\n\n filename = self.__local_metadata_filename(self.TIMESTAMP_ROLENAME)\n if self.file_exists(filename):\n self.rm_file(filename)\n\n # 5.2.7. Persist root metadata.\n # NOTE: We violate the spec in persisting only *after* checking\n # everything, which I think is reasonable.\n self.mv_file(tmp_file, self.__local_metadata_filename(self.ROOT_ROLENAME))\n self.__root = curr_root", "title": "" }, { "docid": "060a31bb9b639b810ebbb50da3bbcf27", "score": "0.5350123", "text": "def __load_root(self) -> None:\n # NOTE: we must parse the root metadata file on disk in order to get\n # the keys to verify itself in the first place.\n filename = self.__local_metadata_filename(self.ROOT_ROLENAME)\n metadata = self.read_from_file(filename)\n\n # FIXME: the following line is purely to keep mypy happy; otherwise,\n # it complains that the .signed.root attribute does not exist.\n metadata.signed = cast(Root, metadata.signed)\n\n # Verify self-signatures on previous root metadata file.\n self.__check_signatures(metadata.signed.root, metadata)\n\n # NOTE: the expiration of the trusted root metadata file does not\n # matter, because we will attempt to update it in the next step.\n\n # We do not support non-consistent-snapshot repositories.\n if not metadata.signed.consistent_snapshot:\n raise NoConsistentSnapshotsError\n\n # Now that we have verified signatures, throw them away, and set the\n # current root to the actual metadata of interest.\n self.__root = metadata.signed", "title": "" }, { "docid": "a2a9d107f4683c123883aca61c05208e", "score": "0.53284204", "text": "def post_master_init(self, master):\n\n if self.connected:\n self.opts[\"pillar\"] = yield salt.pillar.get_async_pillar(\n self.opts,\n self.opts[\"grains\"],\n self.opts[\"id\"],\n saltenv=self.opts[\"saltenv\"],\n pillarenv=self.opts.get(\"pillarenv\"),\n ).compile_pillar()\n\n # Ensure that the value of master is the one we passed in.\n # if pillar_opts is enabled then master could be overwritten\n # when compile_pillar is run.\n self.opts[\"master\"] = master\n\n tag = \"salt/deltaproxy/start\"\n self._fire_master(tag=tag)\n\n if \"proxy\" not in self.opts[\"pillar\"] and \"proxy\" not in self.opts:\n errmsg = (\n \"No proxy key found in pillar or opts for id {}. Check your pillar/opts \"\n \"configuration and contents. Salt-proxy aborted.\".format(self.opts[\"id\"])\n )\n log.error(errmsg)\n self._running = False\n raise SaltSystemExit(code=-1, msg=errmsg)\n\n if \"proxy\" not in self.opts:\n self.opts[\"proxy\"] = self.opts[\"pillar\"][\"proxy\"]\n\n self.opts = salt.utils.dictupdate.merge(\n self.opts,\n self.opts[\"pillar\"],\n strategy=self.opts.get(\"proxy_merge_pillar_in_opts_strategy\"),\n merge_lists=self.opts.get(\"proxy_deep_merge_pillar_in_opts\", False),\n )\n\n if self.opts.get(\"proxy_mines_pillar\"):\n # Even when not required, some details such as mine configuration\n # should be merged anyway whenever possible.\n if \"mine_interval\" in self.opts[\"pillar\"]:\n self.opts[\"mine_interval\"] = self.opts[\"pillar\"][\"mine_interval\"]\n if \"mine_functions\" in self.opts[\"pillar\"]:\n general_proxy_mines = self.opts.get(\"mine_functions\", [])\n specific_proxy_mines = self.opts[\"pillar\"][\"mine_functions\"]\n try:\n self.opts[\"mine_functions\"] = general_proxy_mines + specific_proxy_mines\n except TypeError as terr:\n log.error(\n \"Unable to merge mine functions from the pillar in the opts, for proxy %s\",\n self.opts[\"id\"],\n )\n\n fq_proxyname = self.opts[\"proxy\"][\"proxytype\"]\n\n # Need to load the modules so they get all the dunder variables\n (\n self.functions,\n self.returners,\n self.function_errors,\n self.executors,\n ) = self._load_modules()\n\n # we can then sync any proxymodules down from the master\n # we do a sync_all here in case proxy code was installed by\n # SPM or was manually placed in /srv/salt/_modules etc.\n self.functions[\"saltutil.sync_all\"](saltenv=self.opts[\"saltenv\"])\n\n # Pull in the utils\n self.utils = salt.loader.utils(self.opts)\n\n # Then load the proxy module\n self.proxy = salt.loader.proxy(self.opts, utils=self.utils)\n\n # And re-load the modules so the __proxy__ variable gets injected\n (\n self.functions,\n self.returners,\n self.function_errors,\n self.executors,\n ) = self._load_modules()\n self.functions.pack[\"__proxy__\"] = self.proxy\n self.proxy.pack[\"__salt__\"] = self.functions\n self.proxy.pack[\"__ret__\"] = self.returners\n self.proxy.pack[\"__pillar__\"] = self.opts[\"pillar\"]\n\n # Reload utils as well (chicken and egg, __utils__ needs __proxy__ and __proxy__ needs __utils__\n self.utils = salt.loader.utils(self.opts, proxy=self.proxy)\n self.proxy.pack[\"__utils__\"] = self.utils\n\n # Reload all modules so all dunder variables are injected\n self.proxy.reload_modules()\n\n # Start engines here instead of in the Minion superclass __init__\n # This is because we need to inject the __proxy__ variable but\n # it is not setup until now.\n self.io_loop.spawn_callback(\n salt.engines.start_engines, self.opts, self.process_manager, proxy=self.proxy\n )\n\n proxy_init_func_name = \"{}.init\".format(fq_proxyname)\n proxy_shutdown_func_name = \"{}.shutdown\".format(fq_proxyname)\n if (\n proxy_init_func_name not in self.proxy\n or proxy_shutdown_func_name not in self.proxy\n ):\n errmsg = (\n \"Proxymodule {} is missing an init() or a shutdown() or both. \"\n \"Check your proxymodule. Salt-proxy aborted.\".format(fq_proxyname)\n )\n log.error(errmsg)\n self._running = False\n raise SaltSystemExit(code=-1, msg=errmsg)\n\n self.module_executors = self.proxy.get(\n \"{}.module_executors\".format(fq_proxyname), lambda: []\n )()\n proxy_init_fn = self.proxy[proxy_init_func_name]\n proxy_init_fn(self.opts)\n\n self.opts[\"grains\"] = salt.loader.grains(self.opts, proxy=self.proxy)\n\n self.mod_opts = self._prep_mod_opts()\n self.matchers = salt.loader.matchers(self.opts)\n self.beacons = salt.beacons.Beacon(self.opts, self.functions)\n uid = salt.utils.user.get_uid(user=self.opts.get(\"user\", None))\n self.proc_dir = salt.minion.get_proc_dir(self.opts[\"cachedir\"], uid=uid)\n\n if self.connected and self.opts[\"pillar\"]:\n # The pillar has changed due to the connection to the master.\n # Reload the functions so that they can use the new pillar data.\n (\n self.functions,\n self.returners,\n self.function_errors,\n self.executors,\n ) = self._load_modules()\n if hasattr(self, \"schedule\"):\n self.schedule.functions = self.functions\n self.schedule.returners = self.returners\n\n if not hasattr(self, \"schedule\"):\n self.schedule = salt.utils.schedule.Schedule(\n self.opts,\n self.functions,\n self.returners,\n cleanup=[salt.minion.master_event(type=\"alive\")],\n proxy=self.proxy,\n _subprocess_list=self.subprocess_list,\n )\n\n # add default scheduling jobs to the minions scheduler\n if self.opts[\"mine_enabled\"] and \"mine.update\" in self.functions:\n self.schedule.add_job(\n {\n \"__mine_interval\": {\n \"function\": \"mine.update\",\n \"minutes\": self.opts[\"mine_interval\"],\n \"jid_include\": True,\n \"maxrunning\": 2,\n \"run_on_start\": True,\n \"return_job\": self.opts.get(\"mine_return_job\", False),\n }\n },\n persist=True,\n fire_event=False,\n )\n log.info(\"Added mine.update to scheduler\")\n else:\n self.schedule.delete_job(\"__mine_interval\", persist=True, fire_event=False)\n\n # add master_alive job if enabled\n if self.opts[\"transport\"] != \"tcp\" and self.opts[\"master_alive_interval\"] > 0:\n self.schedule.add_job(\n {\n salt.minion.master_event(type=\"alive\", master=self.opts[\"master\"]): {\n \"function\": \"status.master\",\n \"seconds\": self.opts[\"master_alive_interval\"],\n \"jid_include\": True,\n \"maxrunning\": 1,\n \"return_job\": False,\n \"kwargs\": {\"master\": self.opts[\"master\"], \"connected\": True},\n }\n },\n persist=True,\n fire_event=False,\n )\n if (\n self.opts[\"master_failback\"]\n and \"master_list\" in self.opts\n and self.opts[\"master\"] != self.opts[\"master_list\"][0]\n ):\n self.schedule.add_job(\n {\n salt.minion.master_event(type=\"failback\"): {\n \"function\": \"status.ping_master\",\n \"seconds\": self.opts[\"master_failback_interval\"],\n \"jid_include\": True,\n \"maxrunning\": 1,\n \"return_job\": False,\n \"kwargs\": {\"master\": self.opts[\"master_list\"][0]},\n }\n },\n persist=True,\n fire_event=False,\n )\n else:\n self.schedule.delete_job(\n salt.minion.master_event(type=\"failback\"),\n persist=True,\n fire_event=False,\n )\n else:\n self.schedule.delete_job(\n salt.minion.master_event(type=\"alive\", master=self.opts[\"master\"]),\n persist=True,\n fire_event=False,\n )\n self.schedule.delete_job(\n salt.minion.master_event(type=\"failback\"),\n persist=True,\n fire_event=False,\n )\n\n # proxy keepalive\n proxy_alive_fn = fq_proxyname + \".alive\"\n if (\n proxy_alive_fn in self.proxy\n and \"status.proxy_reconnect\" in self.functions\n and self.opts.get(\"proxy_keep_alive\", True)\n ):\n # if `proxy_keep_alive` is either not specified, either set to False does not retry reconnecting\n self.schedule.add_job(\n {\n \"__proxy_keepalive\": {\n \"function\": \"status.proxy_reconnect\",\n \"minutes\": self.opts.get(\n \"proxy_keep_alive_interval\", 1\n ), # by default, check once per minute\n \"jid_include\": True,\n \"maxrunning\": 1,\n \"return_job\": False,\n \"kwargs\": {\"proxy_name\": fq_proxyname},\n }\n },\n persist=True,\n fire_event=False,\n )\n self.schedule.enable_schedule(fire_event=False)\n else:\n self.schedule.delete_job(\n \"__proxy_keepalive\",\n persist=True,\n fire_event=False,\n )\n\n # Sync the grains here so the proxy can communicate them to the master\n self.functions[\"saltutil.sync_grains\"](saltenv=\"base\")\n self.grains_cache = self.opts[\"grains\"]\n # Now setup the deltaproxies\n self.deltaproxy = {}\n self.deltaproxy_opts = {}\n self.deltaproxy_objs = {}\n self.proxy_grains = {}\n self.proxy_pillar = {}\n self.proxy_context = {}\n self.add_periodic_callback(\"cleanup\", self.cleanup_subprocesses)\n\n _failed = list()\n if self.opts[\"proxy\"].get(\"parallel_startup\"):\n log.debug(\"Initiating parallel startup for proxies\")\n waitfor = []\n for _id in self.opts[\"proxy\"].get(\"ids\", []):\n waitfor.append(\n subproxy_post_master_init(\n _id,\n uid,\n self.opts,\n self.proxy,\n self.utils,\n )\n )\n\n try:\n results = yield tornado.gen.multi(waitfor)\n except Exception as exc: # pylint: disable=broad-except\n log.error(\"Errors loading sub proxies: %s\", exc)\n\n _failed = self.opts[\"proxy\"].get(\"ids\", [])[:]\n for sub_proxy_data in results:\n minion_id = sub_proxy_data[\"proxy_opts\"].get(\"id\")\n if minion_id in _failed:\n _failed.remove(minion_id)\n\n if sub_proxy_data[\"proxy_minion\"]:\n self.deltaproxy_opts[minion_id] = sub_proxy_data[\"proxy_opts\"]\n self.deltaproxy_objs[minion_id] = sub_proxy_data[\"proxy_minion\"]\n\n if self.deltaproxy_opts[minion_id] and self.deltaproxy_objs[minion_id]:\n self.deltaproxy_objs[\n minion_id\n ].req_channel = salt.channel.client.AsyncReqChannel.factory(\n sub_proxy_data[\"proxy_opts\"], io_loop=self.io_loop\n )\n else:\n log.debug(\"Initiating non-parallel startup for proxies\")\n for _id in self.opts[\"proxy\"].get(\"ids\", []):\n try:\n sub_proxy_data = yield subproxy_post_master_init(\n _id, uid, self.opts, self.proxy, self.utils\n )\n except Exception as exc: # pylint: disable=broad-except\n log.info(\n \"An exception occured during initialization for %s, skipping: %s\",\n _id,\n exc,\n )\n _failed.append(_id)\n continue\n minion_id = sub_proxy_data[\"proxy_opts\"].get(\"id\")\n\n if sub_proxy_data[\"proxy_minion\"]:\n self.deltaproxy_opts[minion_id] = sub_proxy_data[\"proxy_opts\"]\n self.deltaproxy_objs[minion_id] = sub_proxy_data[\"proxy_minion\"]\n\n if self.deltaproxy_opts[minion_id] and self.deltaproxy_objs[minion_id]:\n self.deltaproxy_objs[\n minion_id\n ].req_channel = salt.channel.client.AsyncReqChannel.factory(\n sub_proxy_data[\"proxy_opts\"], io_loop=self.io_loop\n )\n\n if _failed:\n log.info(\"Following sub proxies failed %s\", _failed)\n self.ready = True", "title": "" }, { "docid": "2cf0f0fcee484304651511099d841b88", "score": "0.49817708", "text": "def getRoots(self) -> List[java.io.File]:\n ...", "title": "" }, { "docid": "1e3e5c16886f6b3e12aea103fdbaf125", "score": "0.4979484", "text": "def refresh_remote_folders_from_log(root_binding):\n # TODO\n raise NotImplementedError()", "title": "" }, { "docid": "764389d5e26bf4f724d85c6be0667ebf", "score": "0.495386", "text": "def update_root_list_files():\n\n\tfrom gsi.models import HomeVariables as Home\n\tfrom gsi.models import ListTestFiles\n\n\thome_var = Home.objects.all()\n\troot_path = home_var[0].RF_AUXDATA_DIR\n\n\ttry:\n\t\tfiles, errors = get_files(root_path, '.tif')\n\t\ttif_files = filter(lambda x: x.endswith('.tif'), files)\n\t\tfiles_exclude = ListTestFiles.objects.filter(input_data_directory=None).exclude(name__in=tif_files).delete()\n\t\tfiles_include = ListTestFiles.objects.filter(input_data_directory=None).values_list('name')\n\n\t\tfor f in tif_files:\n\t\t\tfile_path = os.path.join(root_path, f)\n\n\t\t\tif (f,) not in files_include:\n\t\t\t\tobj = ListTestFiles.objects.create(name=f, input_data_directory=None)\n\t\t\t\tobj.size = convert_size_file(os.path.getsize(file_path))\n\t\t\t\tobj.date_modified = datetime.fromtimestamp(os.path.getmtime(file_path))\n\t\t\t\tobj.save()\n\texcept StopIteration, e:\n\t\tpass\n\texcept OSError, e:\n\t\tpass", "title": "" }, { "docid": "a352e3e3b646c8815510e64b654e24a4", "score": "0.49312118", "text": "def test_pillar_refresh_pillar_raw(salt_cli, salt_minion, key_pillar):\n key = \"issue-54941-raw\"\n\n # We do not expect to see the pillar because it does not exist yet\n ret = salt_cli.run(\"pillar.raw\", key, minion_tgt=salt_minion.id)\n assert ret.returncode == 0\n val = ret.data\n assert val == {}\n\n with key_pillar(key) as key_pillar_instance:\n # The pillar exists now but raw reads it from in-memory pillars\n ret = salt_cli.run(\"pillar.raw\", key, minion_tgt=salt_minion.id)\n assert ret.returncode == 0\n val = ret.data\n assert val == {}\n\n # Calling refresh_pillar to update in-memory pillars\n key_pillar_instance.refresh_pillar()\n\n # The pillar can now be read from in-memory pillars\n ret = salt_cli.run(\"pillar.raw\", key, minion_tgt=salt_minion.id)\n assert ret.returncode == 0\n val = ret.data\n assert val is True, repr(val)", "title": "" }, { "docid": "90a197cdf6479cd942dec99130ed7348", "score": "0.48707634", "text": "def _unpack_rootstrap(self):\n\n ext2compr = {'tgz': 'gz', 'gz': 'gz', 'bz2': 'bz2'}\n\n try:\n extension = self._rootstrap.split('.')[-1]\n try:\n compr_flag = ext2compr[extension]\n except KeyError:\n compr_flag = ''\n rootstrap = tarfile.open(self._rootstrap, \"r:%s\" % compr_flag)\n tmpdir = tempfile.mkdtemp(prefix='rootstrap-')\n LOG.info(\"Unpacking rootstrap to '%s'\" % tmpdir)\n rootstrap.extractall(tmpdir)\n self.path = tmpdir\n except Exception, error:\n msg = \"Failed to unpack rootstrap: '%s'\" % str(error)\n LOG.error(msg)\n raise ConductorError(msg, \"311\")", "title": "" }, { "docid": "e61f48461a3105aa117acee457b88050", "score": "0.4861662", "text": "def configure_postload(self, conf):\n super().configure_postload(conf)\n base_directory = conf.get('base_directory')\n # where repos are cloned on the slave\n conf.set('repo_directory', join(base_directory, 'repos', 'slave'))\n # where the slave's result artifacts should be stored\n conf.set('artifact_directory', join(base_directory, 'artifacts'))\n # where to store results on the slave\n conf.set('results_directory', join(base_directory, 'results', 'slave'))\n conf.set('timings_directory', join(base_directory, 'timings', 'master')) # timing data", "title": "" }, { "docid": "65cb9f9f2a9233d694eaae4ca0594877", "score": "0.48539564", "text": "def push_files(self, rollback=False):\r\n for host in self._hosts:\r\n aapi = Ansible2API(hosts=[host], **self._ansible_kwargs)\r\n toml_folder = '%s/' % (\r\n os.path.join(self._l_toml_bak, host)\r\n if rollback else os.path.join(self._l_toml, host))\r\n tmpl_folder = '{}/'.format(\r\n os.path.join(self._l_tmpl_bak, host)\r\n if rollback else self._l_tmpl)\r\n if rollback:\r\n conf_folder = '%s/' % os.path.join(self._l_conf_bak, host)\r\n # clear folders\r\n remove_folder(toml_folder)\r\n remove_folder(tmpl_folder)\r\n remove_folder(conf_folder)\r\n get_folder(toml_folder)\r\n get_folder(tmpl_folder)\r\n get_folder(conf_folder)\r\n # download latest tomls/tmpls from minio\r\n toml_pre = '%s/' % os.path.join('toml', self._folder_pre, host)\r\n objs = self.minio.list_objects(\r\n bucket_name=self._minio_bucket, prefix=toml_pre, recursive=False)\r\n for x in objs:\r\n object_name = x.object_name.encode('utf-8')\r\n self.minio.fget_object(\r\n bucket_name=self._minio_bucket,\r\n object_name=object_name,\r\n file_path=os.path.join(\r\n toml_folder, os.path.basename(object_name)))\r\n tmpl_pre = '%s/' % os.path.join('tmpl', self._folder_pre, host)\r\n objs = self.minio.list_objects(\r\n bucket_name=self._minio_bucket, prefix=tmpl_pre, recursive=False)\r\n for x in objs:\r\n object_name = x.object_name.encode('utf-8')\r\n self.minio.fget_object(\r\n bucket_name=self._minio_bucket,\r\n object_name=object_name,\r\n file_path=os.path.join(\r\n tmpl_folder, os.path.basename(object_name)))\r\n conf_pre = '%s/' % os.path.join('conf', self._folder_pre, host)\r\n objs = self.minio.list_objects(\r\n bucket_name=self._minio_bucket, prefix=conf_pre, recursive=False)\r\n for x in objs:\r\n object_name = x.object_name.encode('utf-8')\r\n self.minio.fget_object(\r\n bucket_name=self._minio_bucket,\r\n object_name=object_name,\r\n file_path=os.path.join(\r\n conf_folder, os.path.basename(object_name)))\r\n # push conf files to remote/local confd client\r\n for x in os.listdir(conf_folder):\r\n config = x.split(self._broken_word_2)\r\n file_path = config[1].replace(self._broken_word_1, '/')\r\n info = config[0].split('@@')\r\n state, state_sum, results = ansible_safe_run(\r\n aapi=aapi, module='copy',\r\n args=dict(\r\n mode=info[0],\r\n src=os.path.join(conf_folder, x),\r\n dest=file_path,\r\n group=info[2],\r\n owner=info[1]))\r\n msg = 'Conf File Updated: %s' % state_sum\r\n app.logger.debug(logmsg(msg))\r\n msg = 'Conf File Updated: %s' % results\r\n app.logger.info(logmsg(msg))\r\n # 1. push toml files to remote/local confd client\r\n state, state_sum, results = ansible_safe_run(\r\n aapi=aapi, module='copy',\r\n args=dict(\r\n mode=self._confd_file_mode,\r\n src=toml_folder,\r\n dest=self._r_toml,\r\n group=self._confd_owner[1],\r\n owner=self._confd_owner[0]))\r\n msg = 'Toml File Updated: %s' % state_sum\r\n app.logger.debug(logmsg(msg))\r\n msg = 'Toml File Updated: %s' % results\r\n app.logger.info(logmsg(msg))\r\n # 2. push tmpl files to remote/local confd client\r\n r_tmpl_folder = os.path.join(self._r_tmpl, self._folder_pre)\r\n state, state_sum, results = ansible_safe_run(\r\n aapi=aapi, module='copy',\r\n args=dict(\r\n mode=self._confd_file_mode,\r\n src=tmpl_folder,\r\n dest=r_tmpl_folder,\r\n group=self._confd_owner[1],\r\n owner=self._confd_owner[0]))\r\n msg = 'Tmpl File Updated: %s' % state_sum\r\n app.logger.debug(logmsg(msg))\r\n msg = 'Tmpl File Updated: %s' % results\r\n app.logger.info(logmsg(msg))", "title": "" }, { "docid": "82699850f5dc6d73115927e877f5a9ec", "score": "0.4845694", "text": "def master_config():\n\n pass", "title": "" }, { "docid": "e02a95f8f4d4bb3d96b13d800f4d617b", "score": "0.48365924", "text": "def RefRpcFile(self):\n pass", "title": "" }, { "docid": "408926e7827627c6816c4a0723b86ac5", "score": "0.48189354", "text": "def _do_restore(all_masters: Set[Node], backup_local_path: Path) -> None:\n backup_name = backup_local_path.name\n backup_remote_path = Path('/etc/') / backup_name\n\n for master in all_masters:\n master.send_file(\n local_path=backup_local_path,\n remote_path=backup_remote_path,\n )\n\n for master in all_masters:\n master.run(args=['systemctl', 'stop', 'dcos-exhibitor'])\n\n for master in all_masters:\n master.run(\n args=[\n '/opt/mesosphere/bin/dcos-shell',\n 'dcos-zk', 'restore', str(backup_remote_path), '-v',\n ],\n output=Output.LOG_AND_CAPTURE,\n )\n\n for master in all_masters:\n master.run(args=['systemctl', 'start', 'dcos-exhibitor'])", "title": "" }, { "docid": "0f3b2bef2290f599b8ac35f6cac2ecd0", "score": "0.477597", "text": "def load(self, root):\n self.__rootdir = root", "title": "" }, { "docid": "f8b26fb5786b8a5cbb0b0f060745d1da", "score": "0.4741134", "text": "def test_issue_54755(tmp_path, state_tree, modules):\n file_path = tmp_path / \"issue-54755\"\n sls_contents = \"\"\"issue-54755:\n file.managed:\n - name: {{ pillar['file_path'] }}\n - contents: issue-54755\n - unless: /bin/bash -c false\n \"\"\"\n\n with pytest.helpers.temp_file(\"issue-54755.sls\", sls_contents, state_tree):\n ret = modules.state.sls(mods=\"issue-54755\", pillar={\"file_path\": file_path})\n key = \"file_|-issue-54755_|-{}_|-managed\".format(file_path)\n assert key in ret.raw\n assert ret.raw[key][\"result\"] is True\n with salt.utils.files.fopen(str(file_path), \"r\") as fp:\n assert fp.read().strip() == \"issue-54755\"", "title": "" }, { "docid": "87cfefe32bb6aa1a44ec253fa9af55b0", "score": "0.4737973", "text": "def reset_root_dir(self):\n self.root_directory = os.getcwd()", "title": "" }, { "docid": "be52fe9b6f4d0173f4d393c635d095cc", "score": "0.47192565", "text": "def __init__(self, root_path):\n self._root_path = os.path.realpath(root_path)", "title": "" }, { "docid": "d9590b54a8b88dea809b3bd437abb6c9", "score": "0.47020742", "text": "def setinitial(self, args):\r\n current = py.path.local()\r\n for arg in args + [current]:\r\n anchor = current.join(arg, abs=1)\r\n if anchor.check(): # we found some file object \r\n self._path2confmods[None] = self.getconftestmodules(anchor)\r\n break", "title": "" }, { "docid": "e286cc377c2cd840357f6826080a0977", "score": "0.47016668", "text": "def __init__(self):\r\n self.file_list = self.update()", "title": "" }, { "docid": "36716c424b9afb41cad69a2c3d181b3f", "score": "0.46904516", "text": "def sync(self) -> None:\n tarred_files = self._get_all_tarred_filenames()\n untarred_files = self._get_all_untarred_filepaths()\n new_files = _get_missing_tarred_files(tarred_files, untarred_files)\n\n files_with_extensions = [f'{filename}.tar.gz' for filename in new_files]\n\n if not files_with_extensions:\n pprint('no new scan files to untar')\n\n for filename in files_with_extensions:\n pprint(('untarring file: ', filename))\n self._untar_file(filename)\n pprint(('untarred file: ', filename))", "title": "" }, { "docid": "24ecd95ff533b756f56f76bd9b59020d", "score": "0.4687472", "text": "def sync_files(self):\n while True:\n try:\n time.sleep(10)\n for client in self.clients:\n logger.debug( \"list of files for client %s, availability %s\",client.mfiles.list(), client.available)\n if client.available:\n for file in client.mfiles.list():\n rpc_status = rpc.pull_file(client.ip, client.port, file, self.username, self.ip)\n\n if rpc_status is None:\n client.available = False\n continue\n client.mfiles.remove(file)\n logger.debug(\"actual sync\")\n except KeyboardInterrupt:\n break", "title": "" }, { "docid": "c789aa2538335812c9ee9df76e2ad184", "score": "0.4685471", "text": "def ls_master_path(path):\n return ls_pc_path(path) if ISMASTER else ls_s3_path(path)", "title": "" }, { "docid": "4c8ae07339bfc66cac43dd2ec385afd6", "score": "0.46796742", "text": "def tmp_bdb_root(mocker, tmp_path):\n for dot_path in ('nog.bdb._get_bdb_root','impl.nog.bdb._get_bdb_root',):\n mocker.patch(\n dot_path, return_value=(tmp_path / 'minters').resolve(),\n )\n\n return tmp_path", "title": "" }, { "docid": "86a7bfd9878a990d0964bc8b4546bbc0", "score": "0.46720725", "text": "def read_root ( fname ) :\n \n import ostap.io.root_file\n with ROOT.TFile.Open ( fname , 'READ' ) as f :\n \n f.ls()\n keys = f.GetListOfKeys()\n \n for k in keys :\n key = \"%s;%d\" % ( k.GetName() , k.GetCycle() ) \n obj = f.Get( key ) \n logger.info ( 'Read key/object %s/%s' % ( key , type ( obj ) ) )", "title": "" }, { "docid": "b0cc1736db5757407837b033345b43d0", "score": "0.46643278", "text": "def root(self):\n str_treeRoot = '/'\n self.ml_cwd = [str_treeRoot]\n self.msnode_current = self.msnode_root\n self.msbranch_current = self.msbranch_root", "title": "" }, { "docid": "ccc53978234f813df3e15ab1a28617b7", "score": "0.46580508", "text": "def _update_roots(self, server_binding, session, local_roots,\n remote_roots, repository):\n local_roots_by_id = dict((r.remote_root, r) for r in local_roots)\n local_root_ids = set(local_roots_by_id.keys())\n\n remote_roots_by_id = dict((r.uid, r) for r in remote_roots)\n remote_root_ids = set(remote_roots_by_id.keys())\n\n to_remove = local_root_ids - remote_root_ids\n to_add = remote_root_ids - local_root_ids\n\n for ref in to_remove:\n self._local_unbind_root(local_roots_by_id[ref], session)\n\n for ref in to_add:\n # get a client with the right base folder\n rc = self.get_remote_client(server_binding,\n repository=repository,\n base_folder=ref)\n self._local_bind_root(server_binding, remote_roots_by_id[ref],\n rc, session)", "title": "" }, { "docid": "a7b9e87e1319f2c8d1d3c7cd83655376", "score": "0.46576884", "text": "def hard_refresh(self):\r\n for sys in self.fs:\r\n sys.refresh_files()\r\n self.refresh_files()", "title": "" }, { "docid": "50d4a42720e5b0477230752f8a9aa19d", "score": "0.46575683", "text": "def read_master_file(self, filename):\n temp = []\n \"\"\"Read the cache file from disk\"\"\"\n with open(\"roothints.md\") as file_:\n lines = file_.readlines()\n for l in lines:\n #remove comments\n b = re.sub(r';[^\\n]*', \"\", l)\n te = l.split()\n if(te[0] != ';'):\n temp.append(te)\n if(\".\" in te[0]):\n type_ = self.getType(te[2])\n if(type_ == Type.A):\n rdata = ARecordData(te[3])\n elif(type_ == Type.NS):\n rdata = NSRecordData(te[3])\n elif(type_ == Type.CNAME):\n rdata = CNAMERecordData(te[3])\n rr = ResourceRecord(te[0], type_, Class.IN, te[1], rdata)\n self.records.update({te[0] : rr})", "title": "" }, { "docid": "10181a53ad62517b829a3201cbf0c055", "score": "0.46558937", "text": "def test_projects_project_id_restore_files_post(self):\n pass", "title": "" }, { "docid": "95257532723ba4842eaa1b8e643c2138", "score": "0.46411076", "text": "def _overrideMasterSettings(self):\n return", "title": "" }, { "docid": "cb1f51a6a59f282b54de317003af50fa", "score": "0.46361974", "text": "def reindex(self):\n # check raw detector images\n logger.info('Reindexing already present exposures...')\n for subdir, extension in [\n ('images', 'cbf'), ('images_local', 'cbf'), ('param', 'pickle'), ('param_override', 'pickle'),\n ('eval2d', 'npz'), ('eval1d', 'txt')]:\n # find all subdirectories in `directory`, including `directory`\n # itself\n directory = self.getSubDir(subdir)\n logger.debug(f'Reindexing subdirectory {directory}')\n filename_regex = re.compile(rf'^(?P<prefix>\\w+)_(?P<fsn>\\d+)\\.{extension}$')\n for folder, subdirs, files in os.walk(str(directory)):\n logger.debug(f'Looking in folder {folder}')\n # find all files\n matchlist = [m for m in [filename_regex.match(f) for f in files] if m is not None]\n # find all file prefixes, like 'crd', 'tst', 'tra', 'scn', etc.\n prefixes = {m.group('prefix') for m in matchlist}\n for prefix in prefixes:\n logger.debug(f'Checking prefix {prefix}')\n if prefix not in self._lastfsn:\n self._lastfsn[prefix] = None\n # find the highest available FSN of the current prefix in\n # this directory\n maxfsn = max([int(m.group('fsn')) for m in matchlist if m.group('prefix') == prefix])\n logger.debug(f'Maxfsn is {maxfsn}')\n if self._lastfsn[prefix] is None or (maxfsn > self._lastfsn[prefix]):\n logger.debug(f'Updating lastfsn for prefix {prefix} to {maxfsn}')\n self._lastfsn[prefix] = maxfsn\n logger.debug(f'All prefixes done in this folder ({folder})')\n logger.debug('All folders done.')\n\n logger.debug('Creating empty prefixes')\n # add known prefixes to self._lastfsn if they were not yet added.\n for prefix in self.config['path']['prefixes'].values():\n if prefix not in self._lastfsn:\n self._lastfsn[prefix] = None\n else:\n self.lastFSNChanged.emit(prefix, self._lastfsn[prefix])\n\n # update self._nextfsn\n logger.debug('Updating nextfsn.')\n for prefix in self._lastfsn:\n self._nextfsn[prefix] = self._lastfsn[prefix] + 1 if self._lastfsn[prefix] is not None else 0\n self.nextFSNChanged.emit(prefix, self._nextfsn[prefix])\n logger.info('Reindexing done.')", "title": "" }, { "docid": "3af606379e182d778d17badd5eeb1a0e", "score": "0.4634852", "text": "def test_relative_root_map(RE, tmpdir):\n directory = str(tmpdir)\n\n serializer = Serializer(directory)\n RE(count([img]), serializer)\n serializer.close()\n dest = shutil.copytree(img.save_path, pathlib.Path(directory, 'external_data'))\n relative_d = str(pathlib.Path(dest.relative_to(directory)))\n root_map = {img.save_path: relative_d}\n\n # At this point root map maps the original absolute path to one relative to\n # the diretory containing the catalog.\n\n CATALOG_FILE = f\"\"\"\nsources:\n test_relative_root_map:\n driver: bluesky-jsonl-catalog\n args:\n paths:\n - {directory}/*.jsonl\n root_map:\n {img.save_path}: {relative_d}\"\"\"\n catalog_path = str(pathlib.Path(directory, \"catalog.yml\"))\n with open(catalog_path, \"w\") as file:\n file.write(CATALOG_FILE)\n\n catalog = intake.open_catalog(catalog_path)\n subcatalog = catalog[\"test_relative_root_map\"]()\n # At init time, Broker should resolve the relative path to an absolute one.\n assert subcatalog.root_map[img.save_path] == str(dest)\n\n # But it can only do this if it has a catalog *file* to interpret the path\n # relative to.\n with pytest.raises(ValueError):\n BlueskyJSONLCatalog(f'{directory}/*.jsonl', root_map=root_map)", "title": "" }, { "docid": "fc886b7abe11ea31a8471eb2cbf9e223", "score": "0.46324226", "text": "def update_file_references(self):\n\n self.copies = {copy for copy in self.yield_unchanged_copies()}\n\n self.symlinks = {symlink for symlink in self.yield_unchanged_symlinks()}", "title": "" }, { "docid": "291e8af0d38c2837da2fe2b5bc034fc0", "score": "0.4628297", "text": "def _add_project_root_path(self):\n\n check_path = os.path.abspath(os.getcwd())\n while check_path:\n if check_path == '/':\n break\n\n for marker_file in FILES.values():\n marker_path = os.path.join(check_path, marker_file)\n if os.path.isfile(marker_path):\n self._paths['pwd'] = check_path\n return\n check_path = os.path.dirname(check_path)", "title": "" }, { "docid": "875a7a77f5f0f1cd1ed785daf7f188aa", "score": "0.46257824", "text": "def roots(self, roots):\n\n self._roots = roots", "title": "" }, { "docid": "f8a2f52a575d661dc128652112b56a2a", "score": "0.46132347", "text": "def test_write_io_mount_point_resumed_quorum_restored_x3(self):\n # pylint: disable=too-many-locals,too-many-statements,too-many-branches\n # set cluster.quorum-type to auto\n options = {\"cluster.quorum-type\": \"auto\"}\n g.log.info(\"setting cluster.quorum-type to auto on volume %s\",\n self.volname)\n ret = set_volume_options(self.mnode, self.volname, options)\n self.assertTrue(ret, (\"Unable to set volume option %s for\"\n \"volume %s\" % (options, self.volname)))\n g.log.info(\"Successfully set %s for volume %s\",\n options, self.volname)\n\n # Creating files on client side\n for mount_obj in self.mounts:\n g.log.info(\"Generating data for %s:%s\",\n mount_obj.client_system, mount_obj.mountpoint)\n\n # Creating files\n cmd = (\"python %s create_files -f 30 %s\"\n % (self.script_upload_path, mount_obj.mountpoint))\n\n proc = g.run_async(mount_obj.client_system, cmd,\n user=mount_obj.user)\n self.all_mounts_procs.append(proc)\n\n # Validate IO\n self.io_validation_complete = False\n self.assertTrue(\n validate_io_procs(self.all_mounts_procs, self.mounts),\n \"IO failed on some of the clients\"\n )\n self.io_validation_complete = True\n\n # Do IO and check on subvols with nodes to reboot\n subvols_dict = get_subvols(self.mnode, self.volname)\n for subvol in subvols_dict['volume_subvols']:\n # define nodes to reboot\n brick_list = subvol[0:2]\n nodes_to_reboot = []\n for brick in brick_list:\n node, brick_path = brick.split(':')\n nodes_to_reboot.append(node)\n\n # get files to delete/create for nodes to be offline\n node, brick_path = brick_list[0].split(':')\n ret, brick_file_list, _ = g.run(node, 'ls %s' % brick_path)\n self.assertFalse(ret, 'Failed to ls files on %s' % node)\n file_list = brick_file_list.splitlines()\n\n # delete files from mountpoint\n for mount_obj in self.mounts:\n g.log.info(\"Deleting data for %s:%s\",\n mount_obj.client_system, mount_obj.mountpoint)\n cmd = ('cd %s/ ; rm -rf %s'\n % (mount_obj.mountpoint, ' '.join(file_list)))\n ret, _, _ = g.run(mount_obj.client_system, cmd)\n self.assertFalse(ret, 'Failed to rm file on %s'\n % mount_obj.client_system)\n g.log.info('Files %s are deleted', file_list)\n\n # reboot nodes on subvol and wait while rebooting\n g.log.info(\"Rebooting the nodes %s\", nodes_to_reboot)\n ret = reboot_nodes(nodes_to_reboot)\n self.assertTrue(ret, 'Failed to reboot nodes %s '\n % nodes_to_reboot)\n\n # Creating files on nodes while rebooting\n self.all_mounts_procs = []\n for mount_obj in self.mounts:\n g.log.info(\"Creating data for %s:%s\",\n mount_obj.client_system, mount_obj.mountpoint)\n\n # Creating files\n cmd = (\"cd %s/ ;\"\n \"touch %s\"\n % (mount_obj.mountpoint, ' '.join(file_list)))\n\n proc = g.run_async(mount_obj.client_system, cmd,\n user=mount_obj.user)\n self.all_mounts_procs.append(proc)\n\n # Validate IO\n self.io_validation_complete = False\n g.log.info(\"Validating if IO failed with read-only filesystem\")\n ret = is_io_procs_fail_with_rofs(self, self.all_mounts_procs,\n self.mounts)\n self.assertTrue(ret, (\"Unexpected error and IO successful\"\n \" on read-only filesystem\"))\n self.io_validation_complete = True\n g.log.info(\"EXPECTED: \"\n \"Read-only file system in IO while creating file\")\n\n # check if nodes are online\n counter = 0\n timeout = 300\n _rc = False\n while counter < timeout:\n ret, reboot_results = are_nodes_online(nodes_to_reboot)\n if not ret:\n g.log.info(\"Nodes are offline, Retry after 5 seconds ... \")\n time.sleep(5)\n counter = counter + 5\n else:\n _rc = True\n break\n\n if not _rc:\n for node in reboot_results:\n if reboot_results[node]:\n g.log.info(\"Node %s is online\", node)\n else:\n g.log.error(\"Node %s is offline even after \"\n \"%d minutes\", node, timeout / 60.0)\n else:\n g.log.info(\"All nodes %s are up and running\", nodes_to_reboot)\n\n # Wait for volume processes to be online\n g.log.info(\"Wait for volume processes to be online\")\n ret = wait_for_volume_process_to_be_online(self.mnode,\n self.volname)\n self.assertTrue(ret,\n (\"Failed to wait for volume %s processes to \"\n \"be online\", self.volname))\n g.log.info(\"Successful in waiting for volume %s processes to be \"\n \"online\", self.volname)\n\n # Verify volume's all process are online\n g.log.info(\"Verifying volume's all process are online\")\n ret = verify_all_process_of_volume_are_online(self.mnode,\n self.volname)\n self.assertTrue(ret, (\"Volume %s : All process are not online\"\n % self.volname))\n g.log.info(\"Volume %s : All process are online\", self.volname)\n\n # Creating files on nodes after rebooting\n self.all_mounts_procs = []\n for mount_obj in self.mounts:\n g.log.info(\"Creating data for %s:%s\",\n mount_obj.client_system, mount_obj.mountpoint)\n\n # Creating files\n cmd = (\"cd %s/ ;\"\n \"touch %s\"\n % (mount_obj.mountpoint, ' '.join(file_list)))\n\n proc = g.run_async(mount_obj.client_system, cmd,\n user=mount_obj.user)\n self.all_mounts_procs.append(proc)\n\n # Validate IO\n self.io_validation_complete = False\n self.assertTrue(\n validate_io_procs(self.all_mounts_procs, self.mounts),\n \"IO failed on some of the clients\"\n )\n self.io_validation_complete = True\n\n # Do IO and check on subvols without nodes to reboot\n subvols_dict = get_subvols(self.mnode, self.volname)\n for subvol in subvols_dict['volume_subvols']:\n # define nodes to reboot\n brick_list = subvol[0:2]\n nodes_to_reboot = []\n for brick in brick_list:\n node, brick_path = brick.split(':')\n nodes_to_reboot.append(node)\n\n # get files to delete/create for nodes to be online\n new_subvols_dict = get_subvols(self.mnode, self.volname)\n subvol_to_operate = new_subvols_dict['volume_subvols']\n subvol_to_operate.remove(subvol)\n brick_list_subvol_online = subvol_to_operate[0]\n\n node, brick_path_vol_online = \\\n brick_list_subvol_online[0].split(':')\n ret, brick_file_list, _ = g.run(node,\n 'ls %s' % brick_path_vol_online)\n self.assertFalse(ret, 'Failed to ls files on %s' % node)\n file_list = brick_file_list.splitlines()\n\n # delete files from mountpoint\n for mount_obj in self.mounts:\n g.log.info(\"Deleting data for %s:%s\",\n mount_obj.client_system, mount_obj.mountpoint)\n cmd = ('cd %s/ ; rm -rf %s'\n % (mount_obj.mountpoint, ' '.join(file_list)))\n ret, _, _ = g.run(mount_obj.client_system, cmd)\n self.assertFalse(ret, 'Failed to rm file on %s'\n % mount_obj.client_system)\n g.log.info('Files %s are deleted', file_list)\n\n # reboot nodes on subvol and wait while rebooting\n g.log.info(\"Rebooting the nodes %s\", nodes_to_reboot)\n ret = reboot_nodes(nodes_to_reboot)\n self.assertTrue(ret, 'Failed to reboot nodes %s '\n % nodes_to_reboot)\n\n # Creating files on nodes while rebooting\n self.all_mounts_procs = []\n for mount_obj in self.mounts:\n g.log.info(\"Creating data for %s:%s\",\n mount_obj.client_system, mount_obj.mountpoint)\n\n # Creating files\n cmd = (\"cd %s/ ;\"\n \"touch %s\"\n % (mount_obj.mountpoint, ' '.join(file_list)))\n\n proc = g.run_async(mount_obj.client_system, cmd,\n user=mount_obj.user)\n self.all_mounts_procs.append(proc)\n\n # Validate IO\n self.io_validation_complete = False\n self.assertTrue(\n validate_io_procs(self.all_mounts_procs, self.mounts),\n \"IO failed on some of the clients\"\n )\n self.io_validation_complete = True\n\n # check if nodes are online\n counter = 0\n timeout = 300\n _rc = False\n while counter < timeout:\n ret, reboot_results = are_nodes_online(nodes_to_reboot)\n if not ret:\n g.log.info(\"Nodes are offline, Retry after 5 seconds ... \")\n time.sleep(5)\n counter = counter + 5\n else:\n _rc = True\n break\n\n if not _rc:\n for node in reboot_results:\n if reboot_results[node]:\n g.log.info(\"Node %s is online\", node)\n else:\n g.log.error(\"Node %s is offline even after \"\n \"%d minutes\", node, timeout / 60.0)\n else:\n g.log.info(\"All nodes %s are up and running\", nodes_to_reboot)\n\n # Wait for volume processes to be online\n g.log.info(\"Wait for volume processes to be online\")\n ret = wait_for_volume_process_to_be_online(self.mnode,\n self.volname)\n self.assertTrue(ret,\n (\"Failed to wait for volume %s processes to \"\n \"be online\", self.volname))\n g.log.info(\"Successful in waiting for volume %s processes to be \"\n \"online\", self.volname)\n\n # Verify volume's all process are online\n g.log.info(\"Verifying volume's all process are online\")\n ret = verify_all_process_of_volume_are_online(self.mnode,\n self.volname)\n self.assertTrue(ret, (\"Volume %s : All process are not online\"\n % self.volname))\n g.log.info(\"Volume %s : All process are online\", self.volname)\n\n # Do IO and check and reboot nodes on all subvols\n subvols_dict = get_subvols(self.mnode, self.volname)\n nodes_to_reboot = []\n file_list_for_all_subvols = []\n for subvol in subvols_dict['volume_subvols']:\n # define nodes to reboot\n brick_list = subvol[0:2]\n for brick in brick_list:\n node, brick_path = brick.split(':')\n nodes_to_reboot.append(node)\n\n # get files to delete/create for nodes to be offline\n node, brick_path = brick_list[0].split(':')\n ret, brick_file_list, _ = g.run(node, 'ls %s' % brick_path)\n self.assertFalse(ret, 'Failed to ls files on %s' % node)\n file_list = brick_file_list.splitlines()\n file_list_for_all_subvols.append(file_list)\n\n # delete files from mountpoint\n for mount_obj in self.mounts:\n g.log.info(\"Deleting data for %s:%s\",\n mount_obj.client_system, mount_obj.mountpoint)\n cmd = ('cd %s/ ; rm -rf %s'\n % (mount_obj.mountpoint, ' '.join(file_list)))\n ret, _, _ = g.run(mount_obj.client_system, cmd)\n self.assertFalse(ret, 'Failed to rm file on %s' % node)\n g.log.info('Files %s are deleted', file_list)\n\n # reboot nodes on subvol and wait while rebooting\n g.log.info(\"Rebooting the nodes %s\", nodes_to_reboot)\n ret = reboot_nodes(nodes_to_reboot)\n self.assertTrue(ret, 'Failed to reboot nodes %s '\n % nodes_to_reboot)\n\n # Creating files on nodes while rebooting\n all_mounts_procs, all_mounts_procs_1, all_mounts_procs_2 = [], [], []\n # Create files for 1-st subvol and get all_mounts_procs_1\n for mount_obj in self.mounts:\n g.log.info(\"Creating data for %s:%s\",\n mount_obj.client_system, mount_obj.mountpoint)\n\n # Creating files\n cmd = (\"cd %s/ ;\"\n \"touch %s\"\n % (mount_obj.mountpoint,\n ' '.join(file_list_for_all_subvols[0])))\n\n proc = g.run_async(mount_obj.client_system, cmd,\n user=mount_obj.user)\n all_mounts_procs_1.append(proc)\n all_mounts_procs.append(all_mounts_procs_1)\n\n # Create files for 2-st subvol and get all_mounts_procs_2\n for mount_obj in self.mounts:\n g.log.info(\"Creating data for %s:%s\",\n mount_obj.client_system, mount_obj.mountpoint)\n\n # Creating files\n cmd = (\"cd %s/ ;\"\n \"touch %s\"\n % (mount_obj.mountpoint,\n ' '.join(file_list_for_all_subvols[1])))\n\n proc2 = g.run_async(mount_obj.client_system, cmd,\n user=mount_obj.user)\n all_mounts_procs_2.append(proc2)\n all_mounts_procs.append(all_mounts_procs_2)\n\n for mounts_procs in all_mounts_procs:\n # Validate IO\n self.io_validation_complete = False\n g.log.info(\"Validating if IO failed with read-only filesystem\")\n ret = is_io_procs_fail_with_rofs(self, mounts_procs,\n self.mounts)\n self.assertTrue(ret, (\"Unexpected error and IO successful\"\n \" on read-only filesystem\"))\n self.io_validation_complete = True\n g.log.info(\"EXPECTED: \"\n \"Read-only file system in IO while creating file\")\n\n # check if nodes are online\n counter = 0\n timeout = 300\n _rc = False\n while counter < timeout:\n ret, reboot_results = are_nodes_online(nodes_to_reboot)\n if not ret:\n g.log.info(\"Nodes are offline, Retry after 5 seconds ... \")\n time.sleep(5)\n counter = counter + 5\n else:\n _rc = True\n break\n\n if not _rc:\n for node in reboot_results:\n if reboot_results[node]:\n g.log.info(\"Node %s is online\", node)\n else:\n g.log.error(\"Node %s is offline even after \"\n \"%d minutes\", node, timeout / 60.0)\n else:\n g.log.info(\"All nodes %s are up and running\", nodes_to_reboot)\n\n # Wait for volume processes to be online\n g.log.info(\"Wait for volume processes to be online\")\n ret = wait_for_volume_process_to_be_online(self.mnode,\n self.volname)\n self.assertTrue(ret,\n (\"Failed to wait for volume %s processes to \"\n \"be online\", self.volname))\n g.log.info(\"Successful in waiting for volume %s processes to be \"\n \"online\", self.volname)\n\n # Verify volume's all process are online\n g.log.info(\"Verifying volume's all process are online\")\n ret = verify_all_process_of_volume_are_online(self.mnode,\n self.volname)\n self.assertTrue(ret, (\"Volume %s : All process are not online\"\n % self.volname))\n g.log.info(\"Volume %s : All process are online\", self.volname)\n\n # Creating files on nodes after rebooting\n all_mounts_procs, all_mounts_procs_1, all_mounts_procs_2 = [], [], []\n # Create files for 1-st subvol and get all_mounts_procs_1\n for mount_obj in self.mounts:\n g.log.info(\"Creating data for %s:%s\",\n mount_obj.client_system, mount_obj.mountpoint)\n\n # Creating files\n cmd = (\"cd %s/ ;\"\n \"touch %s\"\n % (mount_obj.mountpoint,\n ' '.join(file_list_for_all_subvols[0])))\n\n proc = g.run_async(mount_obj.client_system, cmd,\n user=mount_obj.user)\n all_mounts_procs_1.append(proc)\n all_mounts_procs.append(all_mounts_procs_1)\n\n # Create files for 2-st subvol and get all_mounts_procs_2\n for mount_obj in self.mounts:\n g.log.info(\"Creating data for %s:%s\",\n mount_obj.client_system, mount_obj.mountpoint)\n\n # Creating files\n cmd = (\"cd %s/ ;\"\n \"touch %s\"\n % (mount_obj.mountpoint,\n ' '.join(file_list_for_all_subvols[1])))\n\n proc2 = g.run_async(mount_obj.client_system, cmd,\n user=mount_obj.user)\n all_mounts_procs_2.append(proc2)\n all_mounts_procs.append(all_mounts_procs_2)\n\n for mounts_procs in all_mounts_procs:\n # Validate IO\n self.io_validation_complete = False\n self.assertTrue(\n validate_io_procs(self.all_mounts_procs, self.mounts),\n \"IO failed on some of the clients\"\n )\n self.io_validation_complete = True", "title": "" }, { "docid": "0c0bc85a87fc61879b24444581869f95", "score": "0.46022454", "text": "def _load_file_system(self):\n \n #a = time.time()\n # parse the paths so we get full paths\n root_dir = self.FILE_SYSTEM.open_dir(inode=self.ROOT_INUM)\n self.fs_inode_to_path[self.ROOT_INUM] = \"/\"\n self._parse_paths(root_dir,\"/\")\n \n #print \"Total time to process path structure: %f s\" % (time.time() - a)\n \n # parse all the MFT entries in order\n last_inum = self.FILE_SYSTEM.info.last_inum\n for inode_num in xrange(0, last_inum+1):\n self._load_file_entry(inode_num)", "title": "" }, { "docid": "445349ba3d3f9959f417c2757717d259", "score": "0.46003407", "text": "def _set_current_data(self, file_name: str) -> None:\n if self.has_multiple_files:\n self.file_name = file_name\n\n self.error_prefix = \"File {}\".format(self.file_name)\n self.file_path = Nitpick.current_app().root_dir / self.file_name\n\n # Configuration for this file as a TOML dict, taken from the style file.\n self.file_dict = Nitpick.current_app().config.style_dict.get(TomlFormat.group_name_for(self.file_name), {})\n\n # Nitpick configuration for this file as a TOML dict, taken from the style file.\n self.nitpick_file_dict = search_dict(\n 'files.\"{}\"'.format(self.file_name), Nitpick.current_app().config.nitpick_section, {}\n )", "title": "" }, { "docid": "b75f9c1cee3e19028125597cb8151585", "score": "0.45966762", "text": "def substitute_project_root(yaml_file):\n for key in yaml_file.keys():\n current_key = yaml_file[key]\n # If we are dealing with a File object\n if (\n isinstance(current_key, ruamel.yaml.comments.CommentedMap)\n and \"class\" in current_key\n and current_key[\"class\"] == \"File\"\n ):\n new_value = yaml_file[key][\"path\"].replace(\n PIPELINE_ROOT_PLACEHOLDER, ROOT_DIR\n )\n yaml_file[key][\"path\"] = new_value\n\n # If we are dealing with a string\n # Todo: these should be replaced with File types\n if type(yaml_file[key]) == str:\n new_value = yaml_file[key].replace(PIPELINE_ROOT_PLACEHOLDER, ROOT_DIR)\n yaml_file[key] = new_value\n\n return yaml_file", "title": "" }, { "docid": "5567a64ad571fc977f3a2a8fafb8a95a", "score": "0.45780694", "text": "def restore_files(self, file_list: Sequence[str]) -> Mapping[str, Union[str, bytes]]:\n file_index = self.get_file_ids(file_list)\n result = {}\n for path, file_id in file_index.items():\n result[path] = self.restore_single_file(file_id)\n return result", "title": "" }, { "docid": "9f96e615f1a61d7983c3704af2ae3ce7", "score": "0.45726028", "text": "def test_pillar_refresh_pillar_item(salt_cli, salt_minion, key_pillar):\n key = \"issue-54941-item\"\n\n # We do not expect to see the pillar because it does not exist yet\n ret = salt_cli.run(\"pillar.item\", key, minion_tgt=salt_minion.id)\n assert ret.returncode == 0\n val = ret.data\n assert key in val\n assert val[key] == \"\"\n\n with key_pillar(key) as key_pillar_instance:\n # The pillar exists now but get reads it from in-memory pillars, no\n # refresh happens\n ret = salt_cli.run(\"pillar.item\", key, minion_tgt=salt_minion.id)\n assert ret.returncode == 0\n val = ret.data\n assert key in val\n assert val[key] == \"\"\n\n # Calling refresh_pillar to update in-memory pillars\n key_pillar_instance.refresh_pillar()\n\n # The pillar can now be read from in-memory pillars\n ret = salt_cli.run(\"pillar.item\", key, minion_tgt=salt_minion.id)\n assert ret.returncode == 0\n val = ret.data\n assert key in val\n assert val[key] is True", "title": "" }, { "docid": "16845454ebc098491b06351c14b566c5", "score": "0.45724306", "text": "def _flash_rootfs(self, ROOTFS):\n raise Exception(\"Not implemented!\")", "title": "" }, { "docid": "90e1c65360519916db0dd7ab96a8143f", "score": "0.45709443", "text": "def test_pillar_refresh_pillar_ping(salt_cli, salt_minion, key_pillar):\n key = \"issue-54941-ping\"\n\n # We do not expect to see the pillar because it does not exist yet\n ret = salt_cli.run(\"pillar.item\", key, minion_tgt=salt_minion.id)\n assert ret.returncode == 0\n val = ret.data\n assert key in val\n assert val[key] == \"\"\n\n with key_pillar(key) as key_pillar_instance:\n ret = salt_cli.run(\"test.ping\", minion_tgt=salt_minion.id)\n assert ret.returncode == 0\n val = ret.data\n assert val is True\n\n # The pillar exists now but get reads it from in-memory pillars, no\n # refresh happens\n ret = salt_cli.run(\"pillar.item\", key, minion_tgt=salt_minion.id)\n assert ret.returncode == 0\n val = ret.data\n assert key in val\n assert val[key] == \"\"\n\n # Calling refresh_pillar to update in-memory pillars\n key_pillar_instance.refresh_pillar()\n\n # The pillar can now be read from in-memory pillars\n ret = salt_cli.run(\"pillar.item\", key, minion_tgt=salt_minion.id)\n assert ret.returncode == 0\n val = ret.data\n assert key in val\n assert val[key] is True", "title": "" }, { "docid": "4a2b3d21dfe9aca17cb50ed043c81d98", "score": "0.45405373", "text": "def __init__(self, repo_root,\n debug, quiet, no_color,\n out=None):\n\n super(MKCTFAPI, self).__init__()\n self.logger = Logger(debug, quiet, no_color, out)\n\n self.repo_root = Path(repo_root)\n self.logger.debug('repo_root: {}'.format(self.repo_root))\n\n self.glob_conf_path = Path.home() / '.config/mkctf.yml'\n self.logger.debug('glob_conf_path: {}'.format(self.glob_conf_path))\n\n self.glob_conf = load_config(self.glob_conf_path)\n self.logger.debug('glob_conf: {}'.format(self.glob_conf))\n\n self.repo_conf_path = self.repo_root / self.glob_conf['files']['config']['repository']\n self.logger.debug('repo_conf_path: {}'.format(self.repo_conf_path))\n\n self.repo = Repository(self.logger, self.repo_conf_path, self.glob_conf)", "title": "" }, { "docid": "bf19c519496235d0f421260715155cee", "score": "0.45368007", "text": "def _push_file_main(self, file_path):\n with open(file_path) as file_pointer:\n hashes = get_hashes(\n file_pointer,\n settings.UPLOAD_HASH_CHUNK_SIZE,\n hash_funcs,\n )\n file_pointer.seek(0)\n primary_hash = hashes[settings.UPLOAD_PRIMARY_HASH.__name__]\n with RetryUpload(self):\n container = storage.storage_container_proxy.get()\n obj, created = container.get_or_upload_file(file_pointer, primary_hash)\n md5 = hashes.get(hashlib.md5.__name__)\n if md5 != obj.md5:\n raise errors.HashMismatchError\n copy_completed_file(file_path, primary_hash)\n cleaned_hashes = clean_hash_names(hashes)\n return serialize_object(obj, **cleaned_hashes), created", "title": "" }, { "docid": "6e415d9244ff9a64a1059885f67f8c4d", "score": "0.45354697", "text": "def __setup_master_specific_conf_files():\n import params\n\n params.File(hawq_constants.hawq_check_file,\n content=params.hawq_check_content)\n\n params.File(hawq_constants.hawq_slaves_file,\n content=InlineTemplate(\"{% for host in hawqsegment_hosts %}{{host}}\\n{% endfor %}\"))", "title": "" }, { "docid": "9687fca4f32fc19691d47711581c1c11", "score": "0.4530055", "text": "def test_filemanager_setup_valid_with_inplace_data(self):\n print '\\n'; print_message('---- Starting Test: {} ----'.format(inspect.stack()[0][3]), 'ok')\n config_path = 'tests/test_configs/e3sm_diags_complete.cfg'\n config = ConfigObj(config_path)\n db = '{}.db'.format(inspect.stack()[0][3])\n\n filemanager = FileManager(\n database=db,\n event_list=EventList(),\n config=config)\n filemanager.populate_file_list()\n filemanager.update_local_status()\n\n self.assertTrue(isinstance(filemanager, FileManager))\n self.assertTrue(os.path.exists(db))\n self.assertTrue(filemanager.all_data_local())\n os.remove(db)", "title": "" }, { "docid": "79441f886b18df34862b1eb48da0e9e7", "score": "0.45227385", "text": "def masterUFOPaths(self):\n return [sourceDescriptor.path for sourceDescriptor in self.sources]", "title": "" }, { "docid": "32477dbc02a4fd8b914aeba391da4241", "score": "0.45136926", "text": "def sync_workdir_from_guest(self):\n pass", "title": "" }, { "docid": "064cf081837913bc427e36ea4cdee7ff", "score": "0.4511163", "text": "def populate_tree(self):\n\n def create_file(root):\n \"\"\"\n Function used in os.walk via create_files\n\n Parameters\n ----------\n root : string\n This is where the files will be written\n \"\"\"\n\n file_path = os.path.join(root, self.random_string())\n while(os.path.exists(file_path)):\n file_path = os.path.join(root, self.random_string())\n\n delta_time = self.config[\"end\"] - self.config[\"start\"]\n atime = self.config[\"start\"] + int(random.choice(xrange(delta_time)))\n mtime = self.config[\"start\"] + int(random.choice(xrange(delta_time)))\n\n self.debug(\"Creating file %s\" % file_path, blue)\n file = open(file_path, 'w')\n\n max_size = self.config[\"size\"] * 1024\n chars_to_use = self.legal_chars + \"\\n\"\n content = self.random_string(max_size,legal_chars=chars_to_use)\n file.write(content)\n\n file.close()\n os.utime(file_path, (atime, mtime))\n self.fileCount+=1\n\n def create_files(root):\n \"\"\"\n Function used in os.walk to manage creation of files\n\n Parameters\n ----------\n root : string\n Current root\n \"\"\"\n\n num_files_to_create = random.choice(xrange(self.config[\"files\"]))\n for _ in xrange(0, num_files_to_create):\n create_file(root)\n\n for root, _ , _ in os.walk(self.config[\"target\"]):\n create_files(root)", "title": "" }, { "docid": "79515513d521eba51279976fbd609183", "score": "0.4510647", "text": "def localfiles(self):\n\n for shape in self.loc_fl.keys():\n for ota in info.mel_ota_list:\n local_lyr = paths.ktima(ota, shape, ext=True)\n\n if os.path.exists(local_lyr):\n self.loc_fl[shape].add(str(ota))", "title": "" }, { "docid": "063dc179e0f86e6f9ea2ca2e4a734f28", "score": "0.45057848", "text": "def rsync_done_opener(main_path): \n\n # Creates a list to store \"rsync_done\" files descriptor\n rsync_done_files = list()\n\n for file_path in recursive_walk(main_path, files_only=True):\n if basename(file_path) == RSYNC_PATTERN:\n # Opens each file\n try:\n rsync_done_files.append(open(file_path, \"r\"))\n except IOError:\n print >> stderr, \"Invalid argument - [\" + path + \"] is not an openable file.\"\n continue\n\n return rsync_done_files", "title": "" }, { "docid": "fd45492ebaefa8a4b85c2a49e0a720bf", "score": "0.45017883", "text": "def __refresh(self) -> None:\n try:\n self.__load_root()\n self.__update_root()\n self.__update_timestamp()\n self.__update_snapshot()\n except Exception:\n self.close()\n raise", "title": "" }, { "docid": "cd9a41610f617d77d3c2ee829bc309d9", "score": "0.44998658", "text": "def alignToMakeMaster(self):\n # Define a running variable that contains the refList. This\n # will be updated after each loop.\n refList = self.firstRefList\n\n\n for ii in range(0, len(self.images)):\n alignRoot = self.alignDir + 'align_'\n alignRoot += str(ii) + '_' + self.fields[ii]\n\n alignRootBoot = self.alignDir + 'align_boot_'\n alignRootBoot += str(ii) + '_' + self.fields[ii]\n\n starList = self.images[ii] + '_rms_dist.lis'\n\n # ==========\n # Re-order refList to shift common source to the top.\n # ==========\n newRefList = self.lisDir + 'ref_%d_%s' % (ii, refList)\n newStarList = self.lisDir + 'nirc2_%d_%s' % (ii, starList)\n\n # Ref List\n shiftToTopOfList(self.lisDir + refList, newRefList, \n self.refStars[self.fields[ii]])\n\n # Star List\n shiftToTopOfList(self.lisDir + starList, newStarList, \n self.refStars[self.fields[ii]])\n\n # Get the align data type\n fitsFile = self.dataDir + '../%s.fits' % (self.images[ii])\n alignType = dataUtil.get_align_type(fitsFile, errors=True)\n\n # Get the angle\n posAngle = dataUtil.get_pos_angle(fitsFile)\n if posAngle != 0 and self.alignOrder == 0:\n print 'You must allow for rotation with alignOrder > 0'\n\n # ==========\n # Align\n # ==========\n _list = open(alignRoot + '.list', 'w')\n _list.write('%s %s ref\\n' % (newRefList, alignType))\n _list.write('%s %d\\n' % (newStarList, alignType))\n _list.close()\n\n print '\\n*** Aligning %s ***' % starList\n cmd = 'java align -v -p -a %d -r %s %s.list' % (self.alignOrder, alignRoot, alignRoot)\n os.system(cmd)\n\n # Bootstrap\n print '\\n*** Aligning %s (bootstrap) ***' % starList\n ntrials = 100\n cmd = 'java align -v -p -a %d -n %d -r %s %s.list' % \\\n (self.alignOrder, ntrials, alignRootBoot, alignRoot)\n os.system(cmd)\n\n # ==========\n # Make a new reference list out of the previous align results\n # This builds up a list after each alignment\n # ==========\n refList = 'aligned_%d_%s' % (ii, starList)\n starPrefix = 'ep%d' % ii\n\n makeNewRefList(alignRootBoot, self.lisDir + refList, \n starPrefix=starPrefix)\n\n shutil.copyfile(self.lisDir + refList, self.lisDir + 'master.lis')", "title": "" }, { "docid": "62154abd99604418ff3d9c23d83dfa02", "score": "0.44965872", "text": "def _populatedata(self):\n file_basename = os.path.basename(self._filename)\n\n # FIXME - this code is crazy\n path = self._filename.replace(self._root, '')\n path = path.replace(os.path.basename(self._filename), '')\n path = path[:-1]\n\n absolute_path = self._filename.replace(self._datadir, '')\n absolute_path = self._filename.replace(self._datadir, '', 1)\n absolute_path = absolute_path.replace(file_basename, '')\n absolute_path = absolute_path[1:][:-1]\n\n if absolute_path and absolute_path[-1] == \"/\":\n absolute_path = absolute_path[0:-1]\n\n filenamenoext = os.path.splitext(file_basename)[0]\n if absolute_path == '':\n file_path = filenamenoext\n else:\n file_path = '/'.join((absolute_path, filenamenoext))\n\n tb_id = '%s/%s' % (absolute_path, filenamenoext)\n tb_id = re.sub(r'[^A-Za-z0-9]', '_', tb_id)\n\n self._metadata.update({\n 'path': path,\n 'absolute_path': absolute_path,\n 'file_path': file_path,\n 'tb_id': tb_id,\n 'basename': filenamenoext,\n 'filename': self._filename\n })\n\n self.set_time(self._timetuple)\n\n config = self._request.get_configuration()\n\n fileext = os.path.splitext(self._filename)\n if fileext:\n fileext = fileext[1][1:]\n eparser = config['extensions'][fileext]\n entrydict = eparser(self._filename, self._request)\n\n # Update the _metadata directly skipping over this class'\n # dict-like stuff. Otherwise we end up in a vicious loop!\n self._metadata.update(entrydict)\n self.__populated = 1", "title": "" }, { "docid": "c055d1bad6d6d88cfda91a11b5bda6b7", "score": "0.4490046", "text": "def referance_duplicate_to_master(master_file, duplicate_file):\n\n duplicate_file['real_path'] = master_file['real_path']\n duplicate_file['version_id'] = master_file['version_id']\n\n return duplicate_file", "title": "" }, { "docid": "81cf05068ea44b13c772597da08201b0", "score": "0.44894654", "text": "def _readPath(self):\n try:\n name = nuke.root().name()\n if name == \"Root\":\n self.path = os.path.join(os.getcwd(), \"untitled.nk\")\n else:\n self.path = name\n except:\n # in the PLE, nuke.root() returns None\n self.path = os.path.join(os.getcwd(), \"untitled.nk\")", "title": "" }, { "docid": "99bbe7cebc6e0f8348ae920f032b7c70", "score": "0.44876128", "text": "def all_codes(self):\n from q import Q\n if not self.settings.WORKDIR:\n raise QError(\"Ticket storage directory WORKDIR is not set.\")\n if not os.path.isdir(self.settings.WORKDIR):\n Q.wr(\"Initialize\", \"Creating ticket directory '%s'.\", self.settings.WORKDIR)\n mkpath(self.settings.WORKDIR)\n ret = []\n for p in os.listdir(self.settings.WORKDIR):\n if os.path.isfile(self.settings.WORKDIR+\"/\"+p+\"/README\"):\n ret.append(p)\n return ret", "title": "" }, { "docid": "1a9e48c2a37df142cf18bdc28ca241ad", "score": "0.4479799", "text": "def _init_1d_correlations_hists_from_root_file(self) -> None:\n self._init_hists_from_root_file(hists = self.correlation_hists_delta_phi)\n self._init_hists_from_root_file(hists = self.correlation_hists_delta_eta)", "title": "" }, { "docid": "1a9e48c2a37df142cf18bdc28ca241ad", "score": "0.4479799", "text": "def _init_1d_correlations_hists_from_root_file(self) -> None:\n self._init_hists_from_root_file(hists = self.correlation_hists_delta_phi)\n self._init_hists_from_root_file(hists = self.correlation_hists_delta_eta)", "title": "" }, { "docid": "50752ee90167f2acf046dec9a7ac0bb0", "score": "0.4477912", "text": "def root(self, import_path):", "title": "" }, { "docid": "cd1ba8147809695c423aad0633b2d7a0", "score": "0.44775957", "text": "def backup_keys(self):\r\n dir_pre = os.path.join('/', self._key_bak_pre, self._folder_pre)\r\n if dir_pre in self.etcd:\r\n self.etcd.delete(key=dir_pre, dir=True, recursive=True)\r\n for x in self._files:\r\n items = self.get_keys(cfg_name=x['name'])\r\n for k, v in items.items():\r\n ret = self.etcd.write(\r\n key=os.path.join(dir_pre, x['name'], k), value=v)\r\n msg = 'Etcd Key Backup: %s.' % ret\r\n app.logger.info(logmsg(msg))", "title": "" }, { "docid": "eb8cdf79b0043d6725034bc0563101da", "score": "0.44770062", "text": "def _fetch_root_resources(self):\n status, headers, service_root_resp = (\n self._conn._rest_get(self._root_prefix))\n self._root_resp = service_root_resp", "title": "" }, { "docid": "6765a2a41a9fca6174bdd37b77d57c23", "score": "0.44691765", "text": "def rehash(self):\n\t\tself.load(self.fileName)\n\t\tlog(\"Reloading configuration.\")", "title": "" }, { "docid": "a7d4f70c2752213d243d2864244422a2", "score": "0.44657946", "text": "def test_file_root_hash(self):\n root = ipfs_file(b'definetly not a dir')\n with self.assertRaises(InvalidIPFSPathException):\n with ipfs_mounted(root, ipfs_client):\n pass", "title": "" }, { "docid": "36df502f15ac7431e7d165bf2af92466", "score": "0.44648266", "text": "def test_pull_local_old(self, app, client):\n\n repo_dir = os.path.join(self.testing_repos['simple'], 'subdir')\n if os.path.exists(repo_dir):\n shutil.rmtree(repo_dir)\n\n self.pull_and_wait(client, repo_dir)\n\n with open(repo_dir + '/subfile.txt', 'r') as local_file:\n assert local_file.readline() == 'subfile content\\n'\n\n # Modify in local repo and change its modification time\n with open(repo_dir + '/subfile.txt', 'w') as local_file:\n local_file.write('This was touched locally\\n')\n\n self.set_old_atime(repo_dir + '/subfile.txt', age=3600 * 30000, recursive=False)\n\n # Try to pull a dir already pulled just before\n self.pull_and_wait(client, repo_dir)\n\n assert os.path.exists(repo_dir + '/subfile.txt')\n assert os.path.isdir(repo_dir + '/subsubdir')\n assert os.path.exists(repo_dir + '/subsubdir/subsubfile.txt')\n assert os.path.exists(repo_dir + '/subsubdir/poutrelle.xml')\n\n with open(repo_dir + '/subfile.txt', 'r') as local_file:\n assert local_file.readline() == 'This was touched locally\\n'\n\n if os.path.exists(repo_dir):\n shutil.rmtree(repo_dir)", "title": "" }, { "docid": "0d2035b64f96d041e84717151e7f175e", "score": "0.44598183", "text": "def upload_local_changes():\n\n while not mqueue.modified.empty():\n file = mqueue.modified.get()\n \n if file in mqueue.client_modified:\n continue\n\n print \"Detect %s modified\" % file\n \n url = \"%s/files/%s/hashes\" % (server_url, file)\n try:\n fd = urllib2.urlopen(url)\n except URLError, e:\n if hasattr(e, 'reason'):\n mqueue.postponed_modified.append(file)\n continue\n except HTTPError:\n # The server may return us an error if things have gone south during\n # the file creation process.\n # If so, mark the file as added instead of modified.\n print \"mark as added instead\"\n mqueue.added.put(file)\n continue\n\n hashes = json.load(fd)\n try:\n patchedfile = open(secure_path(cagibi_folder, file), \"rb\")\n deltas = encode_deltas(rsyncdelta(patchedfile, hashes))\n patchedfile.close()\n except IOError:\n print \"IOError - continuing\"\n # The file may be locked by another process\n # Add it to the postponed list. \n mqueue.postponed_modified.append(file)\n continue\n\n # Send the deltas to the server.\n post_data = {}\n post_data[\"deltas\"] = json.dumps(deltas)\n post_string = urllib.urlencode(post_data)\n fd = urllib2.urlopen(url, post_string)\n results = json.load(fd)\n \n local_files = load_config(\"files.json\")\n local_files[file][\"rev\"] = results[\"rev\"]\n save_config(local_files, filename=\"files.json\") \n\n opener = urllib2.build_opener(urllib2.HTTPHandler)\n\n while not mqueue.added.empty():\n file = mqueue.added.get()\n print \"Detected %s added\" % file\n\n put_data = {}\n try:\n fd = open(secure_path(cagibi_folder, file), \"r\")\n put_data[\"contents\"] = fd.read()\n put_string = urllib.urlencode(put_data)\n fd.close()\n except IOError, e:\n print e\n continue\n\n url = \"%s/files/%s\" % (server_url, file)\n request = urllib2.Request(url, data=put_string)\n request.add_header('Content-Type', 'application/json')\n request.get_method = lambda: 'PUT'\n try:\n url = opener.open(request)\n local_files = load_config(\"files.json\")\n local_files[file] = {\"rev\": 1}\n save_config(local_files, filename=\"files.json\") \n\n except URLError, e:\n if hasattr(e, 'reason'):\n mqueue.postponed_added.append(file)\n continue\n except HTTPError:\n continue\n\n while not mqueue.removed.empty():\n file = mqueue.removed.get()\n print \"Detect %s removed\" % file\n\n url = \"%s/files/%s\" % (server_url, file)\n request = urllib2.Request(url)\n request.get_method = lambda: 'DELETE'\n try:\n url = opener.open(request)\n except URLError, e:\n if hasattr(e, 'reason'):\n mqueue.postponed_removed.append(file)\n continue\n except HTTPError:\n continue", "title": "" }, { "docid": "01bb4abc520bfa57a0740e6637646ebf", "score": "0.44562474", "text": "def load_from_poscar(self, filename):\n atomic_set.load_from_poscar(self, filename)\n self.forces = zeros(self.atoms.shape)\n self.energy = 0", "title": "" }, { "docid": "7a04651ae9e11ec83c362317966dbc49", "score": "0.44556227", "text": "def form_transfer(xmap, as_subform=0):\n initialize = 0\n info = create_obj_xmap_mode(xmap)\n\n print_text(TXT_TRANSFER_INFO.format(**info))\n\n # not use info[] for 'ignore' and 'files' because in info[] are string\n origin = info['origin']['path'] \n destination = info['destination']['path']\n ignore_test = ignore_rules(xmap.get(\"ignore\"))\n objects = xmap.get('files')\n is_path_transfer = not objects\n if not exists_master_path(info['origin']['path']):\n print_error(TXT_ERR_MASTER_OBJECT_NOT_EXISTS.format(storage=info['origin']['name'],path = info['origin']['path']))\n objects = ()\n\n elif is_path_transfer: \n # get all objects in master path\n objects = ls_master_path(origin)\n else:\n # if not 'is_master_path'\n # check existence of all files specified in xmap['files']\n all_exists = True\n for file in ( \"/\".join([info['origin']['path'], obj]) for obj in objects):\n if not exists_master_file(file):\n print_error(TXT_ERR_MASTER_OBJECT_NOT_EXISTS.format(storage=info['origin']['name'], path=file))\n all_exists = False\n if not all_exists:\n # remove all objects, stop transfer\n objects = iter(())\n\n for obj in objects:\n # Objects for cicle on s3 master return error message and iter(())\n # if bucket name is wrong!\n #\n # Objects is generator, don't give error until first access\n # for this reason\n # '_init_fun' is inside the loop:\n # is useful to NOT delete local slave folder\n # if bucket objects not exists!\n obj_data = None if obj.endswith(\"/\") else get_master_file(\"/\".join([origin, obj]))\n obj_fullpath = \"/\".join([destination, obj])\n if not initialize:\n initialize = 1\n _init_fun = _init_path_transfer if is_path_transfer else _init_files_transfer\n if not _init_fun(info):\n break # objects transfer\n if ignore_test(obj):\n print_blue(TXT_TRANSFER_IGNORE.format(storage=info['destination']['name'], object=obj))\n else:\n print_text(TXT_SAVE_OBJECT.format(storage=info['destination']['name'],root=info['destination']['path'], object=obj))\n if not mk_slave_object(obj_fullpath, obj_data):\n break # objects transfer\n else: # and complete cicle of for obj in objects\n if initialize:\n if CONFIG['MAIN'].getboolean('show_transfer_detail'):\n print_success(TXT_TRANSFER_COMPLETE)\n if as_subform:\n return 1\n else:\n return\n\n # here if cicle inclomplete or not 'initializate' == 0\n print_warning(TXT_WARNING_EXIT_TASK_WITH_ERROR)\n if as_subform:\n return 0\n # else return None", "title": "" }, { "docid": "a8a1a102cb1aced09d3232a6b01fe488", "score": "0.4454129", "text": "def set_root(self, root):\n self.root.key = root.key", "title": "" }, { "docid": "45aee2077f7df3697c6c15aa35b3b970", "score": "0.445254", "text": "def _update_stage_paths(self):\n\n # Get the expanded path for the 'MAST_HLSP' directory.\n cwd = os.getcwd()\n self._root = os.path.join(cwd.split(self._root_dir, 1)[0],\n self._root_dir,\n )\n\n # Default filename should be in the root directory named hlsp_name.hlsp\n default_name = self._get_filename()\n self._default_path = os.path.join(self._root, default_name)\n\n # Construct file path for check_file_names.py results.\n cfn_name = self._get_filename(self._check_file_names_out)\n self._cfn_path = os.path.join(self._root,\n self._check_file_names_dir,\n cfn_name,\n )\n\n # Construct file path for precheck_data_format.py reults.\n pcdf_name = self._get_filename(self._precheck_metadata_format_out)\n self._pcdf_path = os.path.join(self._root,\n self._check_metadata_format_dir,\n pcdf_name\n )\n\n # Construct file path for check_metadata_format.py results.\n cmd_name = self._get_filename(self._check_metadata_format_out)\n self._cmd_path = os.path.join(self._root,\n self._check_metadata_format_dir,\n cmd_name,\n )", "title": "" }, { "docid": "4085693ee35d7e1c1551521c9c7adb4c", "score": "0.44417578", "text": "def get_all_pickles(self):\n if self.__pickle_files:\n # self.pickles = {pickle.load(file) for file in self.__pickle_files]\n return self.pickles\n else:\n raise FileNotFoundError", "title": "" }, { "docid": "d78372c659d8249b7cda9feed02dceab", "score": "0.44313928", "text": "def set_master_dir(set_dir_to):\n global MASTER_DIR\n\n MASTER_DIR = set_dir_to\n # TODO - remove this function", "title": "" }, { "docid": "109acfa1d5db3bc630981196865c0d4c", "score": "0.44312644", "text": "def _set_all_owners(self):\n direct_dependency = get_folders(self.changed_files)\n all_folders = set(direct_dependency)\n for folder in direct_dependency:\n folder_name = folder.replace(self.repo_root, \"\")\n dependencies = {\n f\"{self.repo_root}{f}\"\n for f in self.transitive_dependencies.get(folder_name, [])\n }\n all_folders = all_folders.union(dependencies)\n\n for folder in all_folders:\n folder_name = folder.replace(self.repo_root, \"\")\n self.owners[folder_name] = self._get_folder_owners(folder)", "title": "" }, { "docid": "3e434c0c55e656f936cc54185f416a1a", "score": "0.44302025", "text": "def restore_user_files(logged_in, client):\n\n try:\n folder = read_bytes_until(client[0], \"\\n\")\n print_connection_event(client[1], \"Upload args: \", folder, \" \")\n except:\n print_connection_event(client[1], \"Error in request for restoration\", \"RBR ERR\", \"<-\")\n client[0].sendall(\"RBR ERR\\n\".encode())\n exit(2)\n\n dirpath = os.path.join(logged_in, folder)\n if not os.path.isdir(dirpath):\n print_connection_event(client[1], \"Directory not found\", \"RBR EOF\", \"<-\")\n client[0].sendall(\"RBR EOF\\n\".encode())\n exit(1)\n\n file_list = [f for f in os.scandir(dirpath) if f.is_file()]\n message = \"RBR {}\".format(len(file_list))\n\n print_connection_event(client[1], \"Start sending back files\", message, \"<-\")\n client[0].sendall(message.encode())\n\n for user_file in file_list:\n\n f_stat = user_file.stat()\n f_time = strftime(\"%d.%m.%Y %H:%M:%S\", gmtime(f_stat.st_mtime))\n mess_part = \" {} {} {} \".format(user_file.name, f_time, f_stat.st_size)\n print_connection_event(client[1], \" Sending {}\".format(user_file.name), \"\", \" \")\n client[0].sendall(mess_part.encode())\n\n filefd = os.open(user_file.path, os.O_RDONLY)\n for data in chunked_read_fd(filefd, f_stat.st_size, 4096):\n client[0].sendall(data)\n print_connection_event(client[1], \" Sent {}\".format(user_file.name), \"\", \" \")\n\n client[0].sendall(\"\\n\".encode())\n print_connection_event(client[1], \"Finished sending back files\", message, \"<-\")", "title": "" }, { "docid": "60f64198dfebe7daddc362c9b12e5a14", "score": "0.44281995", "text": "def test_pillar_refresh_pillar_get(salt_cli, salt_minion, key_pillar):\n key = \"issue-54941-get\"\n\n # We do not expect to see the pillar because it does not exist yet\n ret = salt_cli.run(\"pillar.get\", key, minion_tgt=salt_minion.id)\n assert ret.returncode == 0\n val = ret.data\n assert val == \"\"\n\n with key_pillar(key) as key_pillar_instance:\n # The pillar exists now but get reads it from in-memory pillars, no\n # refresh happens\n ret = salt_cli.run(\"pillar.get\", key, minion_tgt=salt_minion.id)\n assert ret.returncode == 0\n val = ret.data\n assert val == \"\"\n\n # Calling refresh_pillar to update in-memory pillars\n key_pillar_instance.refresh_pillar()\n\n # The pillar can now be read from in-memory pillars\n ret = salt_cli.run(\"pillar.get\", key, minion_tgt=salt_minion.id)\n assert ret.returncode == 0\n val = ret.data\n assert val is True, repr(val)", "title": "" }, { "docid": "3c427165f25dffff7f08c043cd4dffda", "score": "0.44203442", "text": "def __load_processed_files(self):\n if self.correctDir.get() is not None and self.display_hocr is not None:\n self.gui['hocr_list'].delete(0, tk.END)\n for k, v in self.display_hocr.get_file_listing().items():\n self.gui['hocr_list'].insert(tk.END, k)\n self.__poll_processed_list()", "title": "" }, { "docid": "a7825aff335266e7e09e79a58467bc8e", "score": "0.44156718", "text": "def __init__(self, root):\n self.root = root\n self.listdir = os.listdir(self.root)\n # self.args = args", "title": "" }, { "docid": "1a8dd66568bc1a407a839e3268e52295", "score": "0.4412801", "text": "def restore_config_file(self):\n if PS == \"Windows\":\n common_config_file = world.config[\"sd_server\"][\"common_config_file_win\"]\n common_config_file_backup = world.config[\"sd_server\"][\"common_config_file_backup_win\"]\n else:\n common_config_file = world.config[\"sd_server\"][\"common_config_file\"]\n common_config_file_backup = world.config[\"sd_server\"][\"common_config_file_backup\"]\n shutil.copy2(common_config_file_backup, common_config_file)\n world.config_file_backed_up = False", "title": "" }, { "docid": "954ab868dacb74549f52e1c73e2aa4de", "score": "0.44106334", "text": "def paths_update(self, al_branchNodes):\n for node in al_branchNodes:\n #print \"appending %s\" % node\n l_pwd = self.ml_cwd[:]\n l_pwd.append(node)\n #print \"l_pwd: %s\" % l_pwd\n #print \"ml_cwd: %s\" % self.ml_cwd\n self.ml_allPaths.append(l_pwd)", "title": "" }, { "docid": "7486bd9e563bc290b4dee44c09ae6fe4", "score": "0.44104803", "text": "def GitConfigRebaseMaster(cwd):\n proc.check_call(\n ['git', 'config', 'branch.master.rebase', 'true'], cwd=cwd)", "title": "" }, { "docid": "c1804939a76345fdb971d8e45cef6982", "score": "0.4406382", "text": "def sync_workdir_to_guest(self):\n pass", "title": "" }, { "docid": "abda81b089649217106040067c65cd92", "score": "0.4406118", "text": "def _phon_poscar_setup(self):\n name = self.keywords['name']\n pospath = os.path.join(name, \"POSCAR\")\n prepath = os.path.join(name, \"POSCAR_prePHON\")\n if os.path.isfile(pospath): #Already done. Return.\n return\n my_poscar = Poscar.from_file(prepath) \n my_poscar.selective_dynamics=None #unset SD if it is set\n my_poscar.velocities=None #unset velocities\n dirutil.lock_directory(name)\n my_poscar.write_file(pospath)\n dirutil.unlock_directory(name)\n #pick up a copy and strip out the elements line.\n mypfile = MASTFile(pospath)\n myline6=mypfile.get_line_number(6)\n if myline6.strip().split()[0].isalpha:\n mypfile.modify_file_by_line_number(6,\"D\")\n mypfile.to_file(pospath)\n return", "title": "" }, { "docid": "4f4b534a756edc052c871dbb64fbed1a", "score": "0.44045126", "text": "async def restore(self) -> None:\n if self._core_config_changed:\n await reset_hass_config(self._hass)\n self._core_config_changed = False\n _LOGGER.debug(\"Finished exam: %s\", self.config_file_full)", "title": "" }, { "docid": "63b27fa09dc08c006b0c4df770763c54", "score": "0.44001788", "text": "def init_folders(self):\n filename = pkg_resources.resource_filename('pub2', 'skel')\n pathname = os.path.dirname(os.path.abspath(__file__))\n os.system(\"mrbob -w {0} -O {1}\".format(opj(pathname, filename), self.working_dir))\n os.remove(opj(self.working_dir, \".mrbob.ini\"))\n os.remove(opj(self.working_dir, \"_pubs/_assets/.gitignore\"))\n os.remove(opj(self.working_dir, \"_data/.gitignore\"))", "title": "" }, { "docid": "0555b26754304148dd07b0656cd5e199", "score": "0.43957016", "text": "def _initialize(self):\r\n status, out, err = git.ls_tree('--full-tree', '-r', '-t', '-z',\r\n self.ref)\r\n if status != 0:\r\n Interaction.log_status(status, out, err)\r\n return\r\n\r\n if not out:\r\n return\r\n\r\n for line in out[:-1].split('\\0'):\r\n # .....6 ...4 ......................................40\r\n # 040000 tree c127cde9a0c644a3a8fef449a244f47d5272dfa6\trelative\r\n # 100644 blob 139e42bf4acaa4927ec9be1ec55a252b97d3f1e2\trelative/path\r\n objtype = line[7]\r\n relpath = line[6 + 1 + 4 + 1 + 40 + 1:]\r\n if objtype == 't':\r\n parent = self.dir_entries[utils.dirname(relpath)]\r\n self.add_directory(parent, relpath)\r\n elif objtype == 'b':\r\n self.add_file(relpath)", "title": "" }, { "docid": "f8c6f5f13538dd5c9b59a5241e278869", "score": "0.43914732", "text": "def merge_rootfile(filename=None,RunList='',caltag=None): \n\n if filename == None:\n return None\n\n if caltag == None:\n return None\n\n flags='hadd '+filename+' '\n for run in RunList:\n name=os.path.splitext(os.path.basename(run))[0]\n flags=flags+'root-files/X0-'+name+'-'+caltag+'-reco.root '\n\n if os.path.isfile(filename):\n os.remove(filename)\n subprocess.call(flags, shell=True)\n\n return None", "title": "" }, { "docid": "15cabb85f6b60af409dacfd0041986f2", "score": "0.43858448", "text": "def rootfs(self):\n return self._rootfs", "title": "" }, { "docid": "4b572a3eceb1b26ae17521983fc6ff58", "score": "0.43759075", "text": "def refresh_file_dir(self):\n res = cmd_get_free_space(self.hid)\n if res:\n self.filedir.bytesLeft = uint32_toint(res[3:7])\n capacity = uint32_toint(res[7:11])\n self.filedir.bytesUsed = capacity - self.filedir.bytesLeft\n res = cmd_get_num_files(self.hid)\n if res:\n self.filedir.files = []\n self.filedir.numFiles = uint16_toint(res[3:5])\n for i in range(64):\n res = cmd_get_dir_entry(self.hid, i+1)\n d = PFxFile()\n d.from_bytes(res)\n if d.id < 0xFF:\n self.filedir.files.append(d)", "title": "" }, { "docid": "44fdd0352492dcce31a3e876a40f8399", "score": "0.43650916", "text": "def get_pillar(self, ext_pillar_conf):\n with patched_environ(GIT_SSH=self.git_ssh):\n return super().get_pillar(ext_pillar_conf)", "title": "" }, { "docid": "9f0722c90a09d70a2c0e39c3f89c4112", "score": "0.4360991", "text": "def setup(self):\n self.temporary_file_list = False\n # store current pysat directory\n self.saved_data_path = pysat.data_dir\n\n pysat.data_dir = ''\n re_load(pysat._files)", "title": "" }, { "docid": "81af24b700a1eccf7ed66a2d2867b830", "score": "0.43600908", "text": "def repair_master(self):\n\n # change into the right directory\n self.change_path()\n\n branches = self.git_branch()\n # unneeded, if the master branch already exists\n if \"master\" in branches:\n return\n\n # we have to use this\n # (http://git-annex.branchable.com/direct_mode/)\n self.execute_command([\"git\", \"-c\", \"core.bare=false\", \"commit\", \"--allow-empty\", \"-m\", \"empty commit\"])", "title": "" }, { "docid": "d453f63c7cde4624fe7a81fa53aaaa53", "score": "0.43585685", "text": "def files_root(self):\n _s3 = self._s3_connection()\n\n files = []\n bucket = _s3.Bucket(self.bucket_name)\n\n for f in bucket.objects.all():\n files.append(f.key)\n\n return files", "title": "" }, { "docid": "ff6096a1758d0a4183a5ece162fe9b73", "score": "0.43564025", "text": "def _initialize_master_working_set():\n working_set = WorkingSet._build_master()\n _declare_state('object', working_set=working_set)\n\n require = working_set.require\n iter_entry_points = working_set.iter_entry_points\n add_activation_listener = working_set.subscribe\n run_script = working_set.run_script\n # backward compatibility\n run_main = run_script\n # Activate all distributions already on sys.path with replace=False and\n # ensure that all distributions added to the working set in the future\n # (e.g. by calling ``require()``) will get activated as well,\n # with higher priority (replace=True).\n tuple(dist.activate(replace=False) for dist in working_set)\n add_activation_listener(\n lambda dist: dist.activate(replace=True),\n existing=False,\n )\n working_set.entries = []\n # match order\n list(map(working_set.add_entry, sys.path))\n globals().update(locals())", "title": "" }, { "docid": "5d747b54b624e20417c7b0438f8f98dc", "score": "0.43535233", "text": "def readfiles(self):\n x = clprocessfiles()", "title": "" }, { "docid": "fad49a4820d857e735ebcfff89fde2c1", "score": "0.43533996", "text": "def set_master(self, master_server, get_cur_log_pos = True):\n\n self.wsrep_cluster_addr = '127.0.0.1:%d' %(master_server.galera_listen_port)", "title": "" }, { "docid": "bcd8b2b67a025446ee35550fc3bc02e4", "score": "0.4343609", "text": "def find_roots(cls, root: typing.Union[str, bytes, PathLike, PurePath]):\n root = Path(root)\n actual_roots = {}\n for sequence_number in range(11):\n try:\n actual_roots[sequence_number] = kitti_loader.find_root(root, sequence_number)\n except FileNotFoundError:\n continue\n return actual_roots", "title": "" } ]
fe8646ab98b360f3a828c884c49582f2
Creates a new instance of data page and assigns its values.
[ { "docid": "18e8b588f7434cdc93fc9c7d71063882", "score": "0.5780925", "text": "def __init__(self, data: List[Any], token: str = None, total: int = None):\n # The total amount of items in a request.\n self.total: int = total\n # The starting point for the next search.\n self.token: str = token\n # The items of the retrieved page.\n self.data: List[Any] = data", "title": "" } ]
[ { "docid": "eb6cc19f2a08202ef11920f75b300e40", "score": "0.6493468", "text": "def data(self):\n if self.options[\"type\"].value == MAP_TYPE:\n pages = self.create_map_page()\n else:\n pages = self.create_island_posters()\n return pages", "title": "" }, { "docid": "31d47b194f480e6d2ceb6ce8a459f8ae", "score": "0.6448023", "text": "def create_page(self):\n\n raise NotImplementedError", "title": "" }, { "docid": "857a9fab0392166b05bb9d02de2075c6", "score": "0.61697656", "text": "def __init__(self, wikidpage, create=False) :\n \n assert(wikidpage)\n self.wikidpage = wikidpage\n \n debugOutput(\"context = %s\" % self.wikidpage.context)\n \n # Get the fields of the wikidpage\n self.fields = []\n wikidpageFields = wikidpage.getFields().getDict()\n \n # For each data field, add a widget to this form.\n for nFieldName in wikidpageFields :\n field = wikidpageFields[nFieldName]\n if wikidbase.core.pagecontent.isDataField(field) :\n self.fields.append(self._selectWidget(field, wikidpage))", "title": "" }, { "docid": "95664c7ee58bc361a05e1ddef33f46b1", "score": "0.60978603", "text": "def __init__(self, id, title, numCols, numericColumns, pgTitle,\n numDataRows, secondTitle, numHeaderRows, caption, data):\n self.id = id\n self.title = title\n self.num_cols = numCols\n self.numeric_columns = numericColumns\n self.page_title = pgTitle\n self.num_data_rows = numDataRows\n self.second_title = secondTitle\n self.num_header_rows = numHeaderRows\n self.caption = caption\n self.data = data", "title": "" }, { "docid": "614d1f1400eaf580d12c3f0b37b24937", "score": "0.60710317", "text": "def data(self, **kw):\n return dict(page='data', params=kw)", "title": "" }, { "docid": "614d1f1400eaf580d12c3f0b37b24937", "score": "0.60710317", "text": "def data(self, **kw):\n return dict(page='data', params=kw)", "title": "" }, { "docid": "a99e80f7e118573b711a4b9a01179a7c", "score": "0.6058671", "text": "def __init__(self, uuid):\n try:\n db = rdb[cdb].split(':')\n results = r.db(db[0]).table(db[1]).get(uuid).run(g.rdb_conn)\n except RqlRuntimeError:\n print(\"PAGESMODEL: InitPage: Critical Failure: Saving Throw Failed! while looking up Rethink UUID: {}\".format(uuid))\n\n if results is None:\n raise NoSuchUUIDExists\n\n self.id = results['id']\n self.object_id = results['meta']['object_id']\n self.updated_at = results['meta']['updated_at']\n\n # These fields may potentially be missing.\n try:\n self.title = results['meta']['title']\n except KeyError:\n self.title = \"0.0.0.0\"\n try:\n self.content = results['meta']['content']\n except KeyError:\n self.content = {}\n\n print(\"PAGESMODEL: InitPage: {} Title: {} Object_ID: {}\".format(\n self.id, self.title, self.object_id))", "title": "" }, { "docid": "90553480fff231a4f5d70346b95e0918", "score": "0.6025946", "text": "def __init__(self):\n self.data = dict()", "title": "" }, { "docid": "70f48d0f5ea639cb58ed4126c4bf9552", "score": "0.59712243", "text": "def create(data, next=None):\n return {'data':data, 'next':next}", "title": "" }, { "docid": "a2e27e1809756c4c3cb9a9e2b3da71d0", "score": "0.59512293", "text": "def navigate_to_data_page(self):\n self.driver.find(DATA_TAB).click()\n return DataPage(self.driver)", "title": "" }, { "docid": "89cfbf35991c1e2c6c5efbdfb3e5f411", "score": "0.59201574", "text": "def from_data(cls, data):\n datastructure = cls()\n datastructure.data = data\n return datastructure", "title": "" }, { "docid": "52686ad1d04855c0293d629d3134baa2", "score": "0.58992875", "text": "def __init__(self):\n self.data = {}", "title": "" }, { "docid": "52686ad1d04855c0293d629d3134baa2", "score": "0.58992875", "text": "def __init__(self):\n self.data = {}", "title": "" }, { "docid": "98a356663aeeddb5c324d8acc262fe0b", "score": "0.58959043", "text": "def fromWikibase(cls: Type['WbDataPage'], page_name: str,\n site: Optional['DataSite']) -> 'WbDataPage':\n # TODO: This method signature does not match our parent class (which\n # takes a dictionary argument rather than a string). We should either\n # change this method's signature or rename this method.\n\n data_site = cls._get_data_site(site)\n page = pywikibot.Page(data_site, page_name)\n return cls(page, site)", "title": "" }, { "docid": "61dd9da4f727795e66dcded4bf370180", "score": "0.58863235", "text": "def __init__(self, data: list, json: dict, headers: dict, base_url: str):\n super().__init__(data)\n self.json = json\n self.json['variables']['page'] += 1\n self.header = headers\n self.base_url = base_url\n self.isNext = True", "title": "" }, { "docid": "3ca84bb3820fec1bbb040f9e28e3d22c", "score": "0.58843917", "text": "def __init__(self):\n self.data = {}", "title": "" }, { "docid": "509f84050114116d91ed7acf1a86aeda", "score": "0.58831763", "text": "def setup_page(self, page, submenu_header, submenu_page):\n self[self.DATA_IS_SET] = True\n self[self.DATA_PAGE] = page\n self[self.DATA_SUBMENU_HEADER] = submenu_header\n self[self.DATA_SUBMENU_PAGE] = submenu_page", "title": "" }, { "docid": "450437ec675b0d5000e716d1da6bbd81", "score": "0.5879535", "text": "def create_new_data_object(self):\n new_index = self.get_next_index()\n new_data_object = Data_Object(new_index)\n self.add_new_data_object_to_list(new_data_object)", "title": "" }, { "docid": "82147a70c9208c2525a3810e59e6d478", "score": "0.58652455", "text": "def data():\n \n return render_template(\"data.html\",xpage=\"Data\")", "title": "" }, { "docid": "83169fcde01ede6c0ec0006e573b5759", "score": "0.58420104", "text": "def __init__(self, **kwargs):\n\n # Convert show and network attributes to lists\n for attr in [\"show\", \"network\"]:\n key = kwargs.get(attr)\n if key is not None and not isinstance(key, list):\n kwargs[attr] = [key]\n self.__dict__.update(kwargs)\n\n self.date = convert_string(self.date)\n self.date_obj = convert_date(self.date)\n self.weekday = get_day(self.date_obj)\n self.soup = self._get_ratings_page()\n self.next_week = next_week(self.date_obj)\n self.last_week = last_week(self.date_obj)\n\n # After finding the page, grab the results\n if self._verify_page():\n self.entries = self.fetch_entries()\n else:\n raise PageNotFoundError(PAGE_ERROR)", "title": "" }, { "docid": "fd2ed1e8753123241ee30051c8a1a58c", "score": "0.58307207", "text": "def __init__(self):\n self.invoice = []\n self.page_context = PageContext()", "title": "" }, { "docid": "654bcaa708595c714114ef84c00ef767", "score": "0.58263206", "text": "def parse_main(self):\n\n # create item\n item = Home_Page()\n\n item['uuid'] = str(uuid.uuid1())\n item['url'] = self.home_page\n item['sitemap_exists'] = 't' if requests.get(self.home_page + 'sitemap.xml').status_code == 200 else 'f'\n\n self.home_page_id = item['uuid'] # noqa\n\n # write to db Home_Page\n self.cur.execute(\n \"\"\"INSERT INTO Home_Page (id, url, sitemap_exists)\n VALUES (%s, %s, %s)\"\"\", (\n item['uuid'],\n item['url'],\n item['sitemap_exists'],\n )\n )\n self.connection.commit()", "title": "" }, { "docid": "28ba8645c88ec72417f36d1ce7d1040e", "score": "0.58197373", "text": "def __init__(self, data):\n super(Progress, self).__init__(data)\n\n self.combat = data.get(\"Combat\")\n self.trade = data.get(\"Trade\")\n self.explore = data.get(\"Explore\")\n self.empire = data.get(\"Empire\")\n self.federation = data.get(\"Federation\")\n self.cqc = data.get(\"CQC\")", "title": "" }, { "docid": "5924c3dfb7fdbf6a96400d1ddefdd109", "score": "0.58161914", "text": "def createData(self):\n raise NotImplementedError()", "title": "" }, { "docid": "2b09d50c56fdd95e4d42d9990f5868fc", "score": "0.5804045", "text": "def __init__(self,\n page: 'pywikibot.Page',\n site: Optional['DataSite'] = None) -> None:\n site = site or page.site.data_repository()\n specifics = type(self)._get_type_specifics(site)\n WbDataPage._validate(page, specifics['data_site'], specifics['ending'],\n specifics['label'])\n self.page = page", "title": "" }, { "docid": "338efd734f79621edf9b4836f51affe9", "score": "0.5757187", "text": "def _get_page(self, *args, **kwargs):\n return Page(*args, **kwargs)", "title": "" }, { "docid": "338efd734f79621edf9b4836f51affe9", "score": "0.5757187", "text": "def _get_page(self, *args, **kwargs):\n return Page(*args, **kwargs)", "title": "" }, { "docid": "a49038cd8dfa3cfa8795570475dffb5b", "score": "0.5750442", "text": "def __init__(self, data):\n self.loads = [Load(l[\"name\"], l[\"capacity\"]) for l in data[\"loads\"]]\n self.generators = [Generator(g[\"name\"], g) for g in data[\"generators\"]]\n self.storages = [Storage(s[\"name\"], s) for s in data[\"storages\"]]\n\n self.base_purchase_price = data[\"base_purchase_price\"]\n self.period_duration = data[\"period_duration\"] # TODO period_duration to a config file?\n self.peak_price = data[\"peak_price\"]\n self.price_margin = data[\"price_margin\"]", "title": "" }, { "docid": "97f59354195dee0b6f7386c2dac55ff9", "score": "0.5734608", "text": "def __init__(self, data: Dict):\n\t\tself.data = data", "title": "" }, { "docid": "195c476d8814c777f2f6a295ada1c839", "score": "0.5713632", "text": "def __init__(self, data):\n self.post_title = data.get('post_title')\n self.post_subtitle = data.get('post_subtitle')\n self.post_content = data.get('post_content')", "title": "" }, { "docid": "a81c9ad546f39f83cb70be454b605c0f", "score": "0.567416", "text": "def build_page(self, title: str, page_results: List[dict]) -> Panel:\n structure = [self.build_analysis(analysis_result) for analysis_result in page_results]\n return Panel(child=column(*structure), title=title)", "title": "" }, { "docid": "0fc2a2c9c737214ab1609435628bffcb", "score": "0.5667301", "text": "def pages(cls):\n return Pages(cls.connect())", "title": "" }, { "docid": "6e209f01c7755210e413b9587a4597c1", "score": "0.5665332", "text": "def __init__(self, *args):\n this = _pcbnew.new_PAGE_INFO(*args)\n try: self.this.append(this)\n except: self.this = this", "title": "" }, { "docid": "1767838f5a5381262f581621c52fce78", "score": "0.56630045", "text": "def __get_xml_page(self, page, data=None, headers={}):\n return XMLParser(self, page, lambda: self.__do_special_page('XML_%s' % page.upper(), data, headers).read())", "title": "" }, { "docid": "72b116e055cd0cf2a7827ade8563f529", "score": "0.56563914", "text": "def __init__(self, data):\n self.data = data", "title": "" }, { "docid": "72b116e055cd0cf2a7827ade8563f529", "score": "0.56563914", "text": "def __init__(self, data):\n self.data = data", "title": "" }, { "docid": "72b116e055cd0cf2a7827ade8563f529", "score": "0.56563914", "text": "def __init__(self, data):\n self.data = data", "title": "" }, { "docid": "72b116e055cd0cf2a7827ade8563f529", "score": "0.56563914", "text": "def __init__(self, data):\n self.data = data", "title": "" }, { "docid": "16ede1610e255bf2c2e3730c80c3f0f9", "score": "0.565343", "text": "def __init__(self, driver, value):\n self.driver, self.value = driver, Template(value)", "title": "" }, { "docid": "74ac264c14a6e57eb5b58542f81cae78", "score": "0.5648001", "text": "def __init__(self):\n self.data: dict = {}", "title": "" }, { "docid": "d9afbb4fed8b1c91044b13cbcb41ce3e", "score": "0.56324303", "text": "def __init__(self, data):\n self.data = data\n # TODO: Handle games that are in a TBA state (8 <td> results)\n self.winner = data[3].find_all('div')[-1].string\n if self.winner in ['Tied', 'TBA']:\n data.insert(3, 'Dummy element to normalize results...')\n self.winner = None\n assert len(data) == 9, 'The dotabuff results page has changed, update API.\\n%s' % self.data\n self.league_id = data[0].find_next('img')['title']\n self.start_time = data[2].find_next('time')['datetime']\n self.max_games = int(data[1].find_next('a').string[-1])\n self.status = data[2].find_next('div').string\n self.teams = list(set([team.string for team in data[5].select(\".r-only-mobile\")]))\n self.duration = data[6].find_next('div').string\n self.game_ids = [game['title'].split()[-1] for game in data[7].select('a[title]')]", "title": "" }, { "docid": "53a5e1203e416bd18563bad96d26890b", "score": "0.56313056", "text": "def __init__(self, data=None):\n self._values = {}\n self._previous_values = {}\n if data:\n self.update(data)", "title": "" }, { "docid": "46b5c88bc400c7c6915147b2c6a2b2fa", "score": "0.56308275", "text": "def __init__(self):\r\n self.page: list = [EMPTY for i in range(PAGE_SIZE)]\r\n self.used = 0", "title": "" }, { "docid": "4cd04bfc0989076d6f4a8c07c1139159", "score": "0.5615468", "text": "def __init__(self, data):\n self._data = data", "title": "" }, { "docid": "4cd04bfc0989076d6f4a8c07c1139159", "score": "0.5615468", "text": "def __init__(self, data):\n self._data = data", "title": "" }, { "docid": "054803d4185bf4156e640c55c1af5f3a", "score": "0.5603761", "text": "def __init__(self, data=None):\n self.data = data", "title": "" }, { "docid": "60fa2094b8b325181eb34dec5292710a", "score": "0.56005955", "text": "def __call__(self) -> dict:\n page_info = self.page_info()\n return {\n 'data': self.items,\n 'pagination': page_info,\n 'total': page_info['total']\n }", "title": "" }, { "docid": "e91c8f312329c9b82d53b10932605f70", "score": "0.5576948", "text": "def __init__(self, data, state):\n\n # Instantiate Report Handler Objects here\n self.reports = {\n 'field_report': FieldReport(data['field_report']),\n 'feed_storage_report': FeedStorageReport(data['feed_storage_report']),\n 'mass_balance_report': MassBalanceReport(data['mass_balance_report']),\n 'custom_report': CustomReport(data['custom_report'])\n }\n\n # TODO: move field report to loop in dj_fields\n for pen in state.animal_management.all_pens:\n self.reports['pen_' + str(pen.id)] = PenReport(data['pen_report'], pen.id)", "title": "" }, { "docid": "4909016059e1459207f51bf751cd80f0", "score": "0.5563359", "text": "def create(self, data):\n pass", "title": "" }, { "docid": "4909016059e1459207f51bf751cd80f0", "score": "0.5563359", "text": "def create(self, data):\n pass", "title": "" }, { "docid": "41fc895e01a73ad393b2f7ad538e2928", "score": "0.5561167", "text": "def __init__(self, data):\n self._refresh_data(data)", "title": "" }, { "docid": "41fc895e01a73ad393b2f7ad538e2928", "score": "0.5561167", "text": "def __init__(self, data):\n self._refresh_data(data)", "title": "" }, { "docid": "134806adcd18e7ee563d23983e6ccc8a", "score": "0.55587316", "text": "def __init__(self,\n page: int,\n per_page: int,\n count: int,\n total_count: int) -> None:\n self.page = page\n self.per_page = per_page\n self.count = count\n self.total_count = total_count", "title": "" }, { "docid": "1e878c87f35f2e031d51a7c125f70279", "score": "0.5539544", "text": "def create_pages(self):\n\n from public_project.models import Page\n # create a parser object associated with the file object\n parser = PDFParser(self.pdf_file)\n # create a PDFDocument object that stores the document structure\n doc = PDFDocument()\n # connect the parser and document objects\n parser.set_document(doc)\n doc.set_parser(parser)\n # supply the password for initialization\n pdf_pwd = ''\n doc.initialize(pdf_pwd)\n\n if doc.is_extractable:\n # apply the function and return the result\n doc_pages = self._parse_pages(doc)\n\n i = 1\n for doc_page in doc_pages:\n page = Page(\n document=self.document,\n number=i,\n content = smart_unicode(doc_page, encoding='utf-8', strings_only=False, errors='strict'),\n )\n page.save()\n i = i + 1", "title": "" }, { "docid": "cd54b47446a873d3db283e63938bfcb0", "score": "0.55193925", "text": "def __init__(self, data):\n super(DSMetada, self).__init__(data or {})", "title": "" }, { "docid": "c7a7e14eb5daa7f8cd588d30b61557cf", "score": "0.55167127", "text": "def __init__(self, **kwargs):\n self._names = (\"outDir\",\"thumbSize\",\"boy\",\"girl\",\"summaryColumns\",\"nothing\")\n for k in self._names: # Set the defaults\n setattr(self,k,None)\n # Set special default values \n self.outDir = \"~/resource\"\n for k,v in kwargs.items():\n assert k in self._names, \"Unexpected kwarg: \" + k\n setattr(self,k,v)\n\n # Create directory for output page`\n self.outImages = self.outDir+\"/images\"\n dprintf (\"Create output directory => %s and %s\\n\",self.outDir,self.outImages)\n if os.path.exists(self.outDir):\n shutil.rmtree(self.outDir)\n\n os.mkdir(self.outDir)\n os.mkdir(self.outImages)\n\n # Create stream to use for the output string and generate the page header\n self.output = cStringIO.StringIO()\n self.header = \"\"\"<!DOCTYPE html PUBLIC \"-//W3C//DTD XHTML 1.0 Transitional//EN\" \n\t\t\"http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd\">\r\n\t\t<html xmlns=\"http://www.w3.org/1999/xhtml\" xml:lang=\"en\" lang=\"en\">\r\n\t\t<table border=1 cellspacing=5 cellpadding=5>\n\t\t\"\"\" \n self.appendStr(self.header)\n\n # Initialize item count (for the summary column count\n self.count = 1", "title": "" }, { "docid": "289ec11307fa976ec85a4537b917508f", "score": "0.5516649", "text": "def makePage():\n\n # lstgraphs to be used when constructing the page\n # each entry in this list is a different tab containing several graphs\n lstgraphs = []\n\n for tabNum, tabSet in enumerate(graphSets):\n\n # --------------- now add to the tab\n # \n tabLabel = graphTabs[tabNum]\n\n if useCallbacks:\n lstgraphs.append(\n dcc.Tab(value='Tab ' + str(tabNum), label=tabLabel))\n else:\n lstgraphs.append(\n dcc.Tab(value='Tab ' + str(tabNum), label=tabLabel, children=[\n *tabSet,\n ]))\n\n\n # create the page to be rendered in the browser\n page = html.Div([\n dcc.Tabs(id=\"tabs\", value='Tab 0', children=[\n # following is a list of dcc.Graph(() graphs\n *lstgraphs,\n ]),\n html.Div(id='tabs-content'),\n ])\n return page", "title": "" }, { "docid": "3367a8206f7a71e3b40d0ce1e6aac28d", "score": "0.5508624", "text": "def initialize_data(self):\n pass", "title": "" }, { "docid": "6f8dabda39fafd98e008b15e1b85fee7", "score": "0.5508478", "text": "def __newPage(self):\n rows = self.__gridRows\n cols = self.__gridCols\n self.__image = self.__multi.NewPage()\n self.__grid = oedepict.OEImageGrid(self.__image, rows, cols)\n self.__grid.SetCellGap(self._params[\"cellGap\"])\n self.__grid.SetMargins(self._params[\"cellMargin\"])\n logger.debug(\"Num columns %d\", self.__grid.NumCols())\n logger.debug(\"Num rows %d\", self.__grid.NumRows())\n self._opts = oedepict.OE2DMolDisplayOptions(self.__grid.GetCellWidth(), self.__grid.GetCellHeight(), oedepict.OEScale_AutoScale)\n self._assignDisplayOptions()\n self.__citer = self.__grid.GetCells()", "title": "" }, { "docid": "6f8dabda39fafd98e008b15e1b85fee7", "score": "0.5508478", "text": "def __newPage(self):\n rows = self.__gridRows\n cols = self.__gridCols\n self.__image = self.__multi.NewPage()\n self.__grid = oedepict.OEImageGrid(self.__image, rows, cols)\n self.__grid.SetCellGap(self._params[\"cellGap\"])\n self.__grid.SetMargins(self._params[\"cellMargin\"])\n logger.debug(\"Num columns %d\", self.__grid.NumCols())\n logger.debug(\"Num rows %d\", self.__grid.NumRows())\n self._opts = oedepict.OE2DMolDisplayOptions(self.__grid.GetCellWidth(), self.__grid.GetCellHeight(), oedepict.OEScale_AutoScale)\n self._assignDisplayOptions()\n self.__citer = self.__grid.GetCells()", "title": "" }, { "docid": "4872b3d49e38871d58d5ed035a0c9b8b", "score": "0.5487986", "text": "def set_info() -> None:\n\n tdesc, ttaxes, tnum, trent = get_description(), get_property_taxes(), \\\n get_num_units(), get_rent_per_unit()\n\n WebScraper.address = get_address()\n WebScraper.price = get_price()\n WebScraper.year = get_year()\n WebScraper.description = tdesc[0] if tdesc[1] else tdesc[0]\n WebScraper.sqft = get_sqft()\n WebScraper.price_per_sqft = get_price_per_sqft()\n WebScraper.lot_size = get_lot_size()\n WebScraper.parking = get_parking()\n WebScraper.property_taxes = ttaxes[0] if ttaxes[1] \\\n else use_default_property_taxes()\n WebScraper.num_units = tnum[0] if tnum[1] \\\n else use_default_num_units(tnum[0])\n WebScraper.rent_per_unit = trent[0] if trent[1] \\\n else use_default_rent_per_unit()\n\n set_found()", "title": "" }, { "docid": "e581e012c46733603cb86b7cf8e118c7", "score": "0.5481052", "text": "def build(self, data: dict) -> Dataset:\n dataset = Dataset.build(data)\n dataset.project_id = self.project_id\n dataset.session = self.session\n return dataset", "title": "" }, { "docid": "99c3c0c2197f67fb9cc4cc06a5639e1a", "score": "0.5480759", "text": "def get(cls, data):\n return cls(data)", "title": "" }, { "docid": "47f19f3bde5be55df155ce84bba9be15", "score": "0.5479652", "text": "def _setSoup(self, data=None, headers={}):\n res = self._getHTML(data=data, headers=headers)\n if res:\n self.rawpage = res['result']\n else:\n raise Exception('Page content not fetched for th url %s'%self.currenturi)\n self._setCurrentPage()", "title": "" }, { "docid": "2febfac14b1611c8efc2a9f44b010a6d", "score": "0.54790896", "text": "def __init__(self,\n filename: str,\n path: str,\n require_page_data: bool = True\n ) -> None:\n self.filename = filename\n self.path = path\n self.require_page_data = require_page_data", "title": "" }, { "docid": "d35613f70670e1740c1436c39750f368", "score": "0.54743326", "text": "def __init__(self, total_entries=1, entries_per_page=10, current_page=1, pageset_range=5, changeset_range=10):\n self.entries_per_page = abs(entries_per_page)\n self.total_entries = total_entries\n self.last_page = ceil(self.total_entries / self.entries_per_page) # if you are using python lower then version 3.0 use: ceil(float(self.total_entries)/self.entries_per_page)\n self.changeset_range = changeset_range\n self.pageset_range = abs(pageset_range)\n self.current_page = abs(current_page)\n self.first_page = 1", "title": "" }, { "docid": "369b429d6b425a8d361c70f3e304c820", "score": "0.5458607", "text": "def createPages(self):\n cat_dict = self.tree.find('globals.categories').children\n for cat, cat_node in cat_dict.iteritems(): # cat=Function,...\n print >>sys.stderr, 'Pages for ' + cat\n if self.tree.find(cat) is None: # Skip empty categories.\n print >>sys.stderr\n continue\n for subcat, node in self.tree.find(cat).children.iteritems(): # subcat=length,...\n filename = getPagePath(cat, subcat, self.out_path)\n ## print filename\n with open(filename, 'wb') as f:\n if 'title' in node.children:\n title = node.children['title'].text()\n else:\n title = node.key\n res = self.page_template(title=title,\n cat=cat,\n subcat=subcat,\n tree=self.tree,\n now=datetime.datetime.utcnow(),\n time=time, # for now.strftime()\n core=core,\n html=HtmlHelper(self.error_logger, self.tree, os.path.dirname(filename), self.include_dirs),\n json=json)\n f.write(res.encode('utf-8'))\n print >>sys.stderr, '.',\n print >>sys.stderr", "title": "" }, { "docid": "daa60a3fefe2be88683380002c196627", "score": "0.54489386", "text": "def fromDom(self,domNode):\r\n \r\n # must be PAGE \r\n self._name = domNode.name\r\n self.setNode(domNode)\r\n # get properties\r\n # all?\r\n prop = domNode.properties\r\n while prop:\r\n self.addAttribute(prop.name,prop.getContent())\r\n # add attributes\r\n prop = prop.next\r\n self.computePoints()", "title": "" }, { "docid": "d3a9c3969693d757f17b848fd425ea8b", "score": "0.5447424", "text": "def __init__(self):\n self.data = {}\n self.list = []", "title": "" }, { "docid": "c0d42c0f9f2b4ada613087c624ad1e14", "score": "0.54436886", "text": "def GatherPageData(self, request, req_info):\n\n page_data = helpers.BuildProjectMembers(req_info.project, self.demetrius_persist, self.conn_pool)\n return page_data", "title": "" }, { "docid": "3019a60db9c4b83a3ed0c74a9cdfad1b", "score": "0.544024", "text": "def initData():\n Patient.generate()\n Patient.load()\n VitalSigns.load()\n Lab.load()\n Procedure.load()\n Immunization.load()\n FamilyHistory.load()\n SocialHistory.load()\n Condition.load()\n Med.load()\n Refill.load()\n Document.load()\n Allergy.load()\n ClinicalNote.load()\n Practitioner.load()", "title": "" }, { "docid": "46ebeee989f81b6c4c280b3d332f6b88", "score": "0.5437774", "text": "def __init__(self, data):\n self.data = data\n self.next = None", "title": "" }, { "docid": "46ebeee989f81b6c4c280b3d332f6b88", "score": "0.5437774", "text": "def __init__(self, data):\n self.data = data\n self.next = None", "title": "" }, { "docid": "46ebeee989f81b6c4c280b3d332f6b88", "score": "0.5437774", "text": "def __init__(self, data):\n self.data = data\n self.next = None", "title": "" }, { "docid": "46ebeee989f81b6c4c280b3d332f6b88", "score": "0.5437774", "text": "def __init__(self, data):\n self.data = data\n self.next = None", "title": "" }, { "docid": "46ebeee989f81b6c4c280b3d332f6b88", "score": "0.5437774", "text": "def __init__(self, data):\n self.data = data\n self.next = None", "title": "" }, { "docid": "1113d4878ebfd95abff7466cbfb8459a", "score": "0.5432747", "text": "def load_data(self):\n data = self.get_data_record()\n if not data:\n return\n\n self.set_typeclass(data.typeclass)\n self.set_name(data.name)\n self.set_alias(data.alias)\n self.set_desc(data.desc)\n self.set_lock(data.lock)\n self.set_attributes(data.attributes)\n \n # set common object's info\n self.max_stack = data.max_stack\n self.unique = data.unique\n self.action = data.action", "title": "" }, { "docid": "627178eaffe6f9310a3d93febd5ba9ea", "score": "0.54324466", "text": "def __init__(self, collection, page_params):\n self.collection = collection\n self.page_params = page_params", "title": "" }, { "docid": "2dcbe83012876baeb87e1bd86a714dd7", "score": "0.54305226", "text": "def __init__(self, item_count, page_index=1, page_size=15):\n self.item_count = item_count\n self.page_size = page_size\n self.page_count = item_count // page_size + (1 if item_count % page_size > 0 else 0)\n if (item_count == 0) or (page_index < 1) or (page_index > self.page_count):\n self.offset = 0\n self.limit = 0\n self.page_index = 0\n else:\n self.page_index = page_index\n self.offset = self.page_size * (page_index - 1)\n self.limit = self.page_size\n self.has_next = self.page_index < self.page_count\n self.has_previous = self.page_index > 1", "title": "" }, { "docid": "850151e71d63b9673a8c535433003a4c", "score": "0.5426735", "text": "def _get_data(self):\n data = self.document.create_document_data_object()\n data.document.path = '%s%s' % (settings.OPENKM['configuration']['UploadRoot'], '123.pdf')\n\n # add keywords\n data.document.keywords += ['one', 'two', 'three']\n\n # add categories\n path = \"/okm:categories/Industries/Chemicals\"\n category = self.document.create_category_folder_object(path)\n data.document.categories.append(category)\n\n # initialise list nodes\n data.document.notes = []\n data.document.subscriptors = []\n\n # add properties\n sync_properties = sync.SyncProperties()\n properties = self._get_properties_dict()\n data.properties = sync_properties.populate_property_group(properties)\n\n return data", "title": "" }, { "docid": "d68bed2993b8b18dbabfcd532e5c4993", "score": "0.542657", "text": "def create_db_page(self):\n self.__view.create_db_page(self.event_handler)", "title": "" }, { "docid": "c675026b2b7840b6dbf2a217b6380883", "score": "0.542163", "text": "def make_data(ds):\n ds['_metadata'].update(\n {\n 'creator': \"Terry N. Brown, [email protected]\",\n 'description': \"Demo data for DataPlot class\",\n }\n )\n ds['ds']['x'] = list(range(10))\n ds['ds']['y'] = [i % 3 for i in range(10)]\n ds['kv']['r2'] = 0.2\n ds['ax']['x']['title'] = 'X units'\n ds['ax']['title'] = 'The Plot'", "title": "" }, { "docid": "f79c8b344b31081ed9223984013bf761", "score": "0.54199946", "text": "def __init__(self, data, *args, **kwargs):\n\t\tsuper().__init__(*args, **kwargs)\n\n\t\tself.data = data", "title": "" }, { "docid": "03466b1bd16db04f7f25f439bd4bf625", "score": "0.54186666", "text": "def construct(data):", "title": "" }, { "docid": "f680e2efe9f96664184538fd11a547f3", "score": "0.5404798", "text": "def set_pages(self, selenium: fixture) -> None:\n self.reports: Reports = Reports(selenium)", "title": "" }, { "docid": "72a12fdb552af3cadc626e2552bd8430", "score": "0.54031694", "text": "async def build_pages(self):\n pass", "title": "" }, { "docid": "aa87ea8d1bfd444ac4f61a2f39733e15", "score": "0.54025817", "text": "def __init__(self, html_path):\n self.rows = self.get_rows(html_path)\n\n self.document_id = AllPurposeParser(html_path).document_id\n\n self.document_type = self.get_field(0)\n self.instrument_no = self.get_field(1)\n self.multi_seq = self.get_field(2)\n self.min_ = self.get_field(3)\n self.cin = self.get_field(4)\n self.book_type = self.get_field(5)\n self.book = self.get_field(6)\n self.page = self.get_field(7)\n self.document_date = self.get_field(8)\n self.document_recorded = self.get_field(9)\n self.amount = convert_amount(self.get_field(10))\n self.status = self.get_field(11)\n self.prior_mortgage_doc_type = self.get_field(12)\n self.prior_conveyance_doc_type = self.get_field(13)\n self.cancel_status = self.get_field(14)\n self.remarks = self.get_field(15)\n self.no_pages_in_image = self.get_field(16)\n self.image = self.get_field(17)", "title": "" }, { "docid": "2d250af32537d6e41362507dd6dda055", "score": "0.5396107", "text": "def _populate_data_object(self, value, metadata):\n\n result = EgadsData(value, metadata)\n\n for key, val in self.output_properties.iteritems():\n result.__setattr__(key, val)\n\n\n return result", "title": "" }, { "docid": "36bcdd44fe702965d5badcb69f1c28da", "score": "0.5395602", "text": "def __init__(self, data=None, next=None):\n self.data = data\n self.next = next", "title": "" }, { "docid": "9db5b4c3394fd47a99945106fe2eda60", "score": "0.5395114", "text": "def getPageObject(page_class, path, unittest, headers, post_variables={}):\n\n def getPostVariables(self, *args, **kwargs):\n \"\"\"Set the post variables to the variables\n passed to the getPageObject method\"\"\"\n self.post_vars = post_variables\n\n # Create fake handler for page request\n fake_handler = FakeHandler(unittest, path)\n\n # Add test headers to headers object\n for header_key in headers:\n fake_handler.headers[header_key] = headers[header_key]\n\n # Obtain the page object\n page_object = page_class(fake_handler)\n\n # Override the getPostVariables method\n method_type = type(page_class.getPostVariables)\n page_object.getPostVariables = method_type(getPostVariables, page_object, page_class)\n\n # Return page object\n return page_object", "title": "" }, { "docid": "9f614e6cc63517c18d91c9264463128e", "score": "0.5393649", "text": "def from_meta(cls, meta, options, engine, renderer=renderers.Plain):\n page = cls(options, engine)\n page.meta = meta\n page.options = options\n page.renderer = renderer\n\n if 'pagination' in meta:\n logging.debug('from_meta: current page %d' %\n meta['pagination']['cur_page'])\n\n # Make a template environment. Hopefully no one expects this to ever\n # change after it is instantiated.\n if Page.tmpl_env is None:\n Page.tmpl_env = jinja2.Environment(loader=GlobFileLoader(\n page.options.get('template_dir', 'templates')))\n\n page.build_meta()\n return page", "title": "" }, { "docid": "47907d0ce18f2b88fce883b162152a04", "score": "0.5389442", "text": "def __init__(self):\n\n self.set_data_service_obj(PostDataService())", "title": "" }, { "docid": "47907d0ce18f2b88fce883b162152a04", "score": "0.5389442", "text": "def __init__(self):\n\n self.set_data_service_obj(PostDataService())", "title": "" }, { "docid": "47907d0ce18f2b88fce883b162152a04", "score": "0.5389442", "text": "def __init__(self):\n\n self.set_data_service_obj(PostDataService())", "title": "" }, { "docid": "47907d0ce18f2b88fce883b162152a04", "score": "0.5389442", "text": "def __init__(self):\n\n self.set_data_service_obj(PostDataService())", "title": "" }, { "docid": "d4d825ef3e9b3fb5ee4ca39a81b4be81", "score": "0.5385631", "text": "def from_data(cls, data):\n return object.__new__(cls)", "title": "" }, { "docid": "27aaf3b4a8f3817fe39085beaecbecbc", "score": "0.53828114", "text": "def dup(self):\n new = PageVersion(page=self.page)\n new.title = self.title\n new.body = self.body\n\n return new", "title": "" }, { "docid": "e7bee562c59c8e06d7adb3c5335adf8e", "score": "0.53799456", "text": "def __init__(self, db_url, data):\n self.engine = create_engine(db_url)\n self.metadata = MetaData()\n self.store = Table('store', self.metadata,\n Column('key', String(255), primary_key=True),\n Column('value', PickleType(protocol=3))\n )\n self.metadata.create_all(self.engine)\n self.conn = self.engine.connect()\n self.conn.execute(self.store.insert(), data)", "title": "" }, { "docid": "15952bb43b18d35962148784bd32f805", "score": "0.53729486", "text": "def __init__(self, data):\r\n self.__data = copy.deepcopy(data)", "title": "" }, { "docid": "fa3dd4b2b319534f95f0bda92a1a7d3b", "score": "0.53630465", "text": "def __init__(self, container):\n self.data = container()", "title": "" } ]
3d4382612aa7770d02e39bdc03b6f970
Subtract Array or scalar from Array and return an Array.
[ { "docid": "59464f622b7797b7b2a44aebf4a383d3", "score": "0.5544128", "text": "def __rsub__(self,other):\n return self._data.__rsub__(other)", "title": "" } ]
[ { "docid": "544f638cf0221de411c0dd23bb37b663", "score": "0.67351913", "text": "async def np_subtract(self, a, b):\n stype = type(b) if isinstance(b, self.SecureArray) else type(a)\n a_shape = getattr(a, 'shape', (1,))\n b_shape = getattr(b, 'shape', (1,))\n shape = np.broadcast_shapes(a_shape, b_shape)\n if not stype.frac_length:\n await self.returnType((stype, shape))\n else:\n await self.returnType((stype, a.integral and b.integral, shape))\n a, b = await self.gather(a, b)\n return a - b", "title": "" }, { "docid": "bbaf3134a8fc2df42994bdd0a9be5420", "score": "0.66743433", "text": "def __sub__(self, other):\r\n\r\n if isinstance(other, Array):\r\n result = self._new_like_me(\r\n _get_common_dtype(self, other, self.queue))\r\n self._axpbyz(result,\r\n self.dtype.type(1), self,\r\n other.dtype.type(-1), other)\r\n return result\r\n else:\r\n # subtract a scalar\r\n if other == 0:\r\n return self.copy()\r\n else:\r\n result = self._new_like_me(\r\n _get_common_dtype(self, other, self.queue))\r\n self._axpbz(result, self.dtype.type(1), self, -other)\r\n return result", "title": "" }, { "docid": "2afe11d31d7e0ffd93dfba73dab4cb11", "score": "0.6320853", "text": "def subarrays(*args):\n\tlgt = len(args)\n\tif verifyarrays(args):\n\t\ta = args[0]\n\t\tfor i in range(lgt):\n\t\t\tif i != 0 :\n\t\t\t\ta -= args[i]\n\t\treturn a", "title": "" }, { "docid": "a950aea213d174664635a5721539b029", "score": "0.6301881", "text": "def difference(a: np.ndarray, b: np.ndarray) -> np.ndarray:\n return np.abs(a - b)", "title": "" }, { "docid": "b4a6d95f36fc3a4e42e73865011be6c3", "score": "0.6200017", "text": "def substract(li1,li2):\n result=array(li1)-array(li2)\n if isinstance(li1, list): result=list(result)\n elif isinstance(li1, tuple): result=tuple(result) \n return result", "title": "" }, { "docid": "4ca072ae0f602ac4c2a08db9c6af0dc7", "score": "0.6136242", "text": "async def vector_sub(self, x, y):\n if x == []:\n return []\n\n x, y = x[:], y[:]\n stype = type(x[0]) # all elts assumed of same type\n n = len(x)\n if not stype.frac_length:\n await self.returnType(stype, n)\n else:\n y0_integral = (isinstance(y[0], int) or\n isinstance(y[0], self.SecureObject) and y[0].integral)\n await self.returnType((stype, x[0].integral and y0_integral), n)\n\n x, y = await self.gather(x, y)\n for i in range(n):\n x[i] = x[i] - y[i]\n return x", "title": "" }, { "docid": "d16a8c980327d39cb35b0d56a9b043b4", "score": "0.6124264", "text": "def test_substract_array(self):\n # test data\n a = np.array([1, 2, 3, 4, 5, 6, 7, 8, 9, 10]).astype(np.float32)\n b = np.array([10, 20, 30, 40, 50, 60, 70, 80, 90, 100]).astype(np.float32)\n c = np.array(1).astype(np.float32)\n\n a_gpu = gpuarray.to_gpu(a)\n b_gpu = gpuarray.to_gpu(b)\n c_gpu = gpuarray.to_gpu(c)\n\n result = (a_gpu - b_gpu).get()\n assert (a - b == result).all()\n\n result = (b_gpu - a_gpu).get()\n assert (b - a == result).all()\n\n result = (a_gpu - c_gpu).get()\n assert (a - c == result).all()\n\n result = (c_gpu - a_gpu).get()\n assert (c - a == result).all()", "title": "" }, { "docid": "d58394d45f9e5870572dc7af2d15c794", "score": "0.6121835", "text": "def removeArray(self, *args):\n return _osg.VertexBufferObject_removeArray(self, *args)", "title": "" }, { "docid": "aab8ba34d475cef06d4c723ee09e7048", "score": "0.60220575", "text": "def difference(self, *args):\n return reduce(operator.sub, args, self)", "title": "" }, { "docid": "ce73e1be822b40cc18157ae0b4f9d283", "score": "0.5998649", "text": "def _return(self, ary):\n if isinstance(ary, Array):\n return ary\n return Array(ary, copy=False)", "title": "" }, { "docid": "fd8296ba84245b3cc8572bfc5a3857f9", "score": "0.59306556", "text": "def __sub__(self, other: Any) -> 'Vector':\n return self.copy().subtract(other)", "title": "" }, { "docid": "cefecd26567cbe8a5db27fcdd3d71f9b", "score": "0.5896459", "text": "def __sub__(self,other):\n return self._data - other", "title": "" }, { "docid": "fabbe5eee0513282be85644fb68ff71c", "score": "0.5845594", "text": "def __neg__(self):\n r = self.copy()\n a = r._a\n for i in range(len(a)):\n a[i] = -a[i]\n return r", "title": "" }, { "docid": "76b98dfcfa73244b78167f0353676283", "score": "0.58382344", "text": "def resta_vectores(x,y):\n\tresultado = np.subtract(x, y)\n\treturn resultado", "title": "" }, { "docid": "6bf098b82b1d229507cb4a8ea33a92e4", "score": "0.58364385", "text": "async def np_negative(self, a):\n if not a.frac_length:\n await self.returnType((type(a), a.shape))\n else:\n await self.returnType((type(a), a.integral, a.shape))\n a = await self.gather(a)\n return -a", "title": "" }, { "docid": "017324b470bdd6ffd12342ab6a0b7e98", "score": "0.5779355", "text": "def __sub__(self, other):\n if issubclass(other.__class__, tuple):\n return Vector(self[0] - other[0], self[1] - other[1])\n elif isinstance(other, (int, long, float, complex)):\n return Vector(self[0] - other, self[1] - other)\n else:\n raise TypeError(\"Can only subtract vectors or numbers from a vector.\")", "title": "" }, { "docid": "7951f36542f5542f8b1a20c3d1c315f1", "score": "0.57409865", "text": "def _absoluteSubtract(self, array,tmpArray,outArray):\r\n import numpy\r\n #subtract shifted image from imput image\r\n tmpArray = array - tmpArray\r\n #take the absolute value of tmpArray\r\n tmpArray = numpy.fabs(tmpArray)\r\n #save maximum value of outArray or tmpArray and save in outArray\r\n outArray = numpy.maximum(tmpArray,outArray)\r\n #zero out tmpArray before reuse\r\n tmpArray = tmpArray * 0.\r\n\r\n return (tmpArray,outArray)", "title": "" }, { "docid": "b1763c4ab4697cb30ebfb11073febacc", "score": "0.57375365", "text": "async def np_fliplr(self, a):\n await self.returnType((type(a), a.shape))\n a = await self.gather(a)\n return np.fliplr(a)", "title": "" }, { "docid": "e638206e0b9ec8ab6d5c5f3bb456f9c1", "score": "0.5714192", "text": "def subtract(self, other: Any) -> 'Vector':\n return self.add(-other)", "title": "" }, { "docid": "df0198f39256c126f829006d3b2c8af1", "score": "0.56866586", "text": "def REVERSE_ARRAY(expression):\n return {'$reverseArray': expression}", "title": "" }, { "docid": "66dd0ecaa564e5d1bcc3a1cd0584c412", "score": "0.56673133", "text": "def inverse(self, value):\n array_shape = self.derived_shape[:self.derived_ndims]\n if not any(array_shape):\n raise RuntimeError(f\"Invalid array size {self.derived_shape}\")\n\n array_len = np.prod(array_shape)\n if len(value) < array_len:\n raise RuntimeError(f\"cannot reshape array of size {len(value)} \"\n f\"into shape {tuple(array_shape)}. Check IOC configuration.\")\n\n return np.array(value[:array_len]).reshape(array_shape)", "title": "" }, { "docid": "ba7c339a6d2847bc575836b4b3be14b1", "score": "0.56662905", "text": "def __rsub__(self, otherMatrixOrScalar):\n return -self.__sub__(otherMatrixOrScalar)", "title": "" }, { "docid": "cdea139c3645f8ded3d1121ca5e3e1e6", "score": "0.56635094", "text": "def array_difference(ar1, ar2, assume_unique=False):\n if any(isinstance(el, np.ndarray) for el in ar2) or any(\n isinstance(el, list) for el in ar2\n ):\n return reduce(\n lambda x, y: np.setdiff1d(x, y, assume_unique=assume_unique), (ar1, *ar2)\n )\n elif isinstance(ar2, np.ndarray) or isinstance(ar2, list):\n return np.setdiff1d(ar1, ar2, assume_unique=assume_unique)\n else:\n raise ValueError(\"ar2 has a wrong type: {}\".format(type(ar2)))", "title": "" }, { "docid": "ffeae7b0511cd9e734613cda71eaf0cd", "score": "0.56366986", "text": "def __sub__(self, rhs: \"Vector\") -> \"Vector\":\n rhs = Vector(rhs)\n assert len(self) == len(rhs)\n return Vector(x - y for x, y in zip(self, rhs))", "title": "" }, { "docid": "86f9d46ac49ecbec439762abb773266a", "score": "0.5622039", "text": "def __neg__(self):\n if isinstance(self.value, np.ndarray):\n return self.__class__(np.ndarray.__neg__(self.value), self.unit)\n return self.__class__(-self.value, self.unit)", "title": "" }, { "docid": "795678bf440d62a787af50777bb508ec", "score": "0.55793345", "text": "def popArray(array: List[int]) -> int:\n if array:\n val = array.pop(0)\n else:\n val = None\n return val", "title": "" }, { "docid": "0239aadbbaab3a161de02801480a834b", "score": "0.5560206", "text": "def pop(self, name):\n array = self._ds[name].values\n self._ds = self._ds.drop_vars(name)\n return array", "title": "" }, { "docid": "209c64b0c908034a472c8c65604d47d6", "score": "0.5530659", "text": "def arrayDifference(a,b): \n\n for i in a:\n if i in b:\n a = list(filter((i).__ne__, a))\n return a", "title": "" }, { "docid": "1590e3327f6f2b99e72907593d645d80", "score": "0.5525656", "text": "def ret(self):\n return self.array.toarray()", "title": "" }, { "docid": "451e6a8f70d6412f996deb02f5b23855", "score": "0.5518046", "text": "def subtract(a, b):\n return Tensor._op(Subtract, a, b)", "title": "" }, { "docid": "3af7cc1fbe9ebce817a33003ecd1632d", "score": "0.55082923", "text": "def __rsub__(self, a):\n return FE(a) - self", "title": "" }, { "docid": "31b4bf78e22bce1e01a62f2307300c5d", "score": "0.549148", "text": "def _sub(self, rhs, left = False):\n\n (rows, cols) = self.size()\n\n return Matrix([[rhs[i][j] - self._container[i][j] if left else self._container[i][j] - rhs[i][j] for j in range(cols)] for i in range(rows)])", "title": "" }, { "docid": "dc8fd8bfcccd5c4709f1d390000a0704", "score": "0.54817444", "text": "def subtract(lar1, lar2, join='inner', cast=True, missone='ignore',\n misstwo='ignore'): \n return binaryop(np.subtract, lar1, lar2, join=join, cast=cast,\n missone=missone, misstwo=misstwo)", "title": "" }, { "docid": "9494c700cfcbc68f26bedc6ba0ffb3ac", "score": "0.5470614", "text": "def subtract (self, a: Number) -> Number:\n self.__evaluate_input(a)\n self.__memory -= a\n return self.__memory", "title": "" }, { "docid": "a3d37eb21d0af0c7d2ea124e0ca95abc", "score": "0.5456691", "text": "def _unsigned_subtract(a, b):\n # coerce to a single type\n signed_to_unsigned = {\n np.byte: np.ubyte,\n np.short: np.ushort,\n np.intc: np.uintc,\n np.int_: np.uint,\n np.longlong: np.ulonglong\n }\n dt = np.result_type(a, b)\n try:\n dt = signed_to_unsigned[dt.type]\n except KeyError:\n return np.subtract(a, b, dtype=dt)\n else:\n # we know the inputs are integers, and we are deliberately casting\n # signed to unsigned\n return np.subtract(a, b, casting='unsafe', dtype=dt)", "title": "" }, { "docid": "8d7dd06ea6019ffe5aaba8147ea7ca3b", "score": "0.5441784", "text": "def __sub__(self, other):\n\n assert(self.uvdata.dtype == other.uvdata.dtype)\n assert(len(self.uvdata) == len(other.uvdata))\n\n self_copy = copy.deepcopy(self)\n self_copy.uvdata = self.uvdata - other.uvdata\n\n return self_copy", "title": "" }, { "docid": "9765ac599bc3300f2602f4eccb1892ab", "score": "0.54414874", "text": "def substract(inputs):\n return inputs[0]-inputs[1]", "title": "" }, { "docid": "1dee0ec76e376195070f2dd7b68ff9d7", "score": "0.54414856", "text": "def _array_std(data):\n if not isinstance(data, (ndarray, DataArray, DataFrame, Series, Index)):\n data = np.array(data)\n if not np.iterable(data):\n data = np.atleast_1d(data)\n return data", "title": "" }, { "docid": "cfbd200e9d16148ba4cc0bd6887d2716", "score": "0.54362756", "text": "def normalize_data(array:np.array) -> np.array:\n \n normalizing_vector = array[:,66:72]#66:72 are the columns for lowerback\n for _ in range(21):\n normalizing_vector = np.hstack((normalizing_vector,array[:,66:72])) \n array = np.subtract(array,normalizing_vector) \n return array", "title": "" }, { "docid": "2350243c21ac184b64129a6c676bec34", "score": "0.54273844", "text": "def subtract(self, other):\r\n\r\n\t\treturn self.__sub__(other)", "title": "" }, { "docid": "6808e1a0ba91510764173b11a804ebb5", "score": "0.54219407", "text": "def __sub__(self, otherMatrixOrScalar):\n return self.__add__(-otherMatrixOrScalar)", "title": "" }, { "docid": "6243aec7fc44fd3f1913525f062db6da", "score": "0.54183954", "text": "def inv(self):\n return self.__class__([-t for t in self.data])", "title": "" }, { "docid": "18fbb67ac3212f5c2e838b7aa04cb02e", "score": "0.5395165", "text": "def __sub__(self, other):\n return self.difference(other)", "title": "" }, { "docid": "834b90248af64080bb69e2d4df8d3b21", "score": "0.53947526", "text": "def __sub__(self, other: Vector) -> Optional[Vector]:\n return self.subtract(other)", "title": "" }, { "docid": "e2e115344e33857bffea53d57bc1a3c9", "score": "0.53926975", "text": "def __neg__(self):\n return Matrix([[-e for e in row] for row in self])", "title": "" }, { "docid": "eca1f8106da89bf76007b2fe2d92bed5", "score": "0.5390095", "text": "def __sub__(self, other):\r\n\t\tif self.rows!=other.rows or self.columns!=other.rows:\r\n\t\t\traise ValueError(\"cannot subtract matrices with different sizes\")\r\n\t\telse:\r\n\t\t\tS = Matrix(self.dimensions)\r\n\t\t\tfor i in range(self.rows):\r\n\t\t\t\tfor j in range(self.columns):\r\n\t\t\t\t\tS[i,j] = self[i,j] - other[i,j]\r\n\t\t\treturn S", "title": "" }, { "docid": "cc96e5283409ab62c273cd7eddb993bc", "score": "0.5384097", "text": "async def matrix_sub(self, A, B, tr=False):\n A, B = [r[:] for r in A], [r[:] for r in B]\n n1, n2 = len(A), len(A[0])\n await self.returnType(type(A[0][0]), n1, n2)\n A, B = await self.gather(A, B)\n for i in range(n1):\n for j in range(n2):\n A[i][j] = A[i][j] - (B[j][i] if tr else B[i][j])\n return A", "title": "" }, { "docid": "ff79eff1fe20db53eb65fac8a1369112", "score": "0.5381328", "text": "def vector_subtract(x, y):\r\n return [x_i - y_i for x_i, y_i in zip(x, y)]", "title": "" }, { "docid": "30a3c2f33483390256cc90c97c7a28d7", "score": "0.5369111", "text": "def __sub__(self, other: Union[Matrix, float]) -> Matrix:\n if isinstance(other, Matrix):\n matrix = Matrix.reshape(\n [a - b for a, b in zip(self, other)], shape=self.shape\n )\n else:\n value = float(other)\n matrix = Matrix(\n matrix=[[item - value for item in row] for row in self.matrix]\n )\n return matrix", "title": "" }, { "docid": "792fcce176ef195ce30855f3438dfa46", "score": "0.5357667", "text": "def test_substract_array(ctx_factory):\r\n #test data\r\n a = np.array([1, 2, 3, 4, 5, 6, 7, 8, 9, 10]).astype(np.float32)\r\n b = np.array([10, 20, 30, 40, 50,\r\n 60, 70, 80, 90, 100]).astype(np.float32)\r\n\r\n context = ctx_factory()\r\n queue = cl.CommandQueue(context)\r\n\r\n a_gpu = cl_array.to_device(queue, a)\r\n b_gpu = cl_array.to_device(queue, b)\r\n\r\n result = (a_gpu - b_gpu).get()\r\n assert (a - b == result).all()\r\n\r\n result = (b_gpu - a_gpu).get()\r\n assert (b - a == result).all()", "title": "" }, { "docid": "59b059c04f2ea5357eb4c0c4d1dca4aa", "score": "0.535476", "text": "def __sub__(self, other):\n if self.h != other.h or self.w != other.w:\n raise(ValueError, \"Matrices can only be subtracted if the dimensions are the same\") \n \n # TODO - your code here\n sub_value = []\n for i in range(self.h):\n #create an empty list row=[]\n row = []\n for j in range(self.w):\n row.append(self[i][j] - other[i][j])\n sub_value.append(row)\n \n return Matrix(sub_value)", "title": "" }, { "docid": "2cf5863f433409ed4d14825372484cbc", "score": "0.53339636", "text": "def subtract(vec_1, vec_2):\n\n # subtract two vectors \n\n return vec_3", "title": "" }, { "docid": "498ab29c60fcc82352858799ae3a19d7", "score": "0.5331507", "text": "def __sub__(self, offset: Union[Cartesian, Number]):\n if isinstance(offset, Number):\n return Cartesian.from_collection([x-offset for x in self])\n else:\n return Cartesian.from_collection([x-o for x, o in zip(self, offset)])", "title": "" }, { "docid": "ee5520d9c0a3317cc55cb270b9746560", "score": "0.53269637", "text": "def __invert__(self):\n v = +self\n for i in range(1, 4): v[i] = -v[i]\n return v", "title": "" }, { "docid": "f2267c11957254fc445289ebdbfaac32", "score": "0.53260595", "text": "def exclusion(a: np.ndarray, b: np.ndarray) -> np.ndarray:\n ab = a + b - 2 * a * b\n return ab", "title": "" }, { "docid": "0b80c4a3302c8df03abf5d0a83c37d01", "score": "0.53234893", "text": "def subtract(list1, list2):\r\n if len(list1) != len(list2):\r\n print('You used the function subtract with 2 vectors of different length')\r\n exit()\r\n else:\r\n return [list2[i] - list1[i] for i in range(len(list1))]", "title": "" }, { "docid": "ef680f81f8110dba7e0cd42d0bd79892", "score": "0.5319724", "text": "def standardise (array, y=None):\n\n # If DataFrame, extract relevant columns and call method again.\n if isinstance(array, pd.DataFrame):\n X = array[[VARX, VARY]].values.astype(np.float)\n return standardise(X)\n\n # If receiving separate arrays\n if y is not None:\n x = array\n assert x.shape == y.shape\n shape = x.shape\n X = np.vstack((x.flatten(), y.flatten())).T\n X = standardise(X)\n x,y = list(X.T)\n x = x.reshape(shape)\n y = y.reshape(shape)\n return x,y\n\n # Check(s)\n assert array.shape[1] == 2\n\n # Standardise\n X = np.array(array, dtype=np.float)\n for dim, var in zip([0,1], [VARX, VARY]):\n X[:,dim] -= float(AXIS[var][1])\n X[:,dim] /= float(AXIS[var][2] - AXIS[var][1])\n pass\n\n return X", "title": "" }, { "docid": "1e69496ae90490db012eef95ebec6398", "score": "0.53175443", "text": "def subtract(vec_1, vec_2):\n\n # subtract two vectors\n vec_3 = [float(vec_2[0]) - float(vec_1[0]), float(vec_2[1]) - float(vec_1[1]), float(vec_2[2]) - float(vec_1[2])]\n\n return vec_3", "title": "" }, { "docid": "53904a56d7e73dcfdb2872c299e8a2b7", "score": "0.52973336", "text": "def inverse_transformation(data):\n return -data", "title": "" }, { "docid": "6fffa9b53d08c281af4b42e13868b717", "score": "0.52952945", "text": "def __rsub__(self, lhs):\n\n if type(lhs) is not Matrix:\n raise ValueError(\"Two Matrices are required for subtraction\")\n elif self.size() != lhs.size():\n raise ValueError(\"Both Matrices must have the same size\")\n \n return self._sub(lhs, True)", "title": "" }, { "docid": "a9d45128fc526852537205926520d3fa", "score": "0.528837", "text": "def getArray(self):\n return self.array", "title": "" }, { "docid": "4bd2ccb9596e1bd4ecfba0efc30a3f33", "score": "0.528566", "text": "def __neg__(self):\n return self.__class__(-self[0], -self[1], -self[2], -self[3])", "title": "" }, { "docid": "a3c4dbff7c392ce11669721d43c71065", "score": "0.5285632", "text": "def sub(self, other):\n # Insert your code here\n # Initializing the Resultant Matrix\n Sub=[]\n for i in range(0,len(self.mat)):\n Sub.append([])\n for j in range(0,len(self.mat[0])):\n Sub[i].append(0)\n try: \n if len(self.mat)!=len(other):\n raise TypeError()\n elif len(self.mat[0])!=len(other[0]):\n raise TypeError()\n else:\n for i in range(0,len(self.mat)):\n for j in range(0,len(self.mat[0])):\n Sub[i][j]=self.mat[i][j]-other[i][j]\n print(\"Subtraction\",Sub)\n except:\n print(\"The Matrices are not compatible;enter compatible matrix\") \n return Sub", "title": "" }, { "docid": "04ab82ee28433868958f7790b0531a77", "score": "0.52728796", "text": "def copy(self): # -> Array:\n ...", "title": "" }, { "docid": "3ac134722ef91b2d15c8e4998efcf102", "score": "0.52688235", "text": "def SUB(self):\n a = self.POP()\n b = self.POP()\n c = b - a\n self.PUSH(b)\n self.PUSH(a)\n self.PUSH(c)", "title": "" }, { "docid": "c559a41ee42d55037e1f403f04cf6d7a", "score": "0.5263219", "text": "def resta_matrices(x,y):\n\tresultado = np.subtract(x, y)\n\treturn resultado", "title": "" }, { "docid": "7adea0e3dccd093ef05618ff15743b85", "score": "0.52587366", "text": "def __sub__(self, other):\n newObj = self.clone()\n if isinstance(other, G1DConnections):\n for i in xrange(len(newObj)):\n (newObj[i])[2] -= (other[i])[2]\n else:\n for i in xrange(len(newObj)):\n (newObj[i])[2] -= other\n return newObj", "title": "" }, { "docid": "d496ee9b5489d8df412bd5dc1509dc2e", "score": "0.52437323", "text": "def subtract(self, other: Vector, inplace: bool = False) -> Optional[Vector]:\n assert isinstance(\n other, Vector), 'item to be subtracted must be a Vector'\n assert len(self) == len(other), 'vectors should be the same length'\n\n sub_of_elements = [self_item - other_item for self_item, other_item in\n zip(self.items, other.items)]\n\n if inplace:\n self.items = sub_of_elements\n return None\n\n return Vector(sub_of_elements)", "title": "" }, { "docid": "3ca9791cbc9715c46a82683b4694c077", "score": "0.5238291", "text": "def __neg__(self):\n m = +self\n for i in range(0, 4):\n for j in range(0, 4): m[i, j] = -m[i, j]\n return m", "title": "" }, { "docid": "e233983d9508a95c7b47bf7c7ee66ee6", "score": "0.5238071", "text": "def normalize_array(arr):\n arr = arr - arr.min()\n arr /= arr.max()\n return arr", "title": "" }, { "docid": "99234d5d995a2eb9e0a1d9f756d3e9a6", "score": "0.523402", "text": "def flipped(self) -> np.ndarray:\r\n return np.fliplr(self)", "title": "" }, { "docid": "dbdfee58bad390440eaaf86321b8e0fe", "score": "0.5231598", "text": "def unarray(data, u_data):\n \n if len(data) != len(u_data):\n raise IndexError('Two arrays have different leght: {0} and {1}'.format(\n len(data),\n len(u_data),\n ))\n \n # Old Version\n # return numpy.array([unc.ufloat(data[i], u_data[i]) for i in range(0, len(data))])\n\n return unumpy.uarray(data, u_data)", "title": "" }, { "docid": "7a3c75162465d8b3ac0e5835e9d88cd3", "score": "0.5230885", "text": "def __sub__(self, other):\n vector_ = Vector(self.x - other.x, self.y - other.y, self.z - other.z)\n return vector_", "title": "" }, { "docid": "54803783b9f0a8d3c19517445ae4da2a", "score": "0.5230227", "text": "def array(self):\n return self._a", "title": "" }, { "docid": "f23cf59f0b630556a0bf69ee79f82eda", "score": "0.5221321", "text": "def profile_subtracted_image(self) -> aa.Array2D:\r\n return self.image - self.blurred_image", "title": "" }, { "docid": "5764afe3578d9f4be12d74c12cb72be3", "score": "0.52176505", "text": "def _arraylike_copy(arr):\n if type(arr) != numpy.ndarray:\n return numpy.array(arr)\n else:\n return arr.copy()", "title": "" }, { "docid": "5764afe3578d9f4be12d74c12cb72be3", "score": "0.52176505", "text": "def _arraylike_copy(arr):\n if type(arr) != numpy.ndarray:\n return numpy.array(arr)\n else:\n return arr.copy()", "title": "" }, { "docid": "6749e1fd1c04b1e7f855811fb97cb589", "score": "0.5213268", "text": "def __sub__(self, other):\n return Vector(self.x - other.x, self.y - other.y)", "title": "" }, { "docid": "a27f9343c04e65c7dde7052d914f949e", "score": "0.5211775", "text": "def inverse(self) -> Rigid3Array:\n inv_rotation = self.rotation.inverse()\n inv_translation = inv_rotation.apply_to_point(-self.translation)\n return Rigid3Array(inv_rotation, inv_translation)", "title": "" }, { "docid": "cb8458676cba00a16212639e4baaa353", "score": "0.5204715", "text": "def getData(self, copy=True):\n data = self._array if not copy else numpy.array(self._array, copy=True)\n return data", "title": "" }, { "docid": "07117f5d286c66cfdb5c4e4dca1b7006", "score": "0.51855296", "text": "def __array__(self):\n\n return self.data", "title": "" }, { "docid": "6545aed2b2fed8508238fb1c9a43dd82", "score": "0.51841396", "text": "def subtract(self, num1: float, num2: float = None) -> float:\n if num2 is None:\n result = self.memory - num1\n self.memory = result\n else:\n result = num1 - num2\n self.memory = result\n return result", "title": "" }, { "docid": "6d7f40bfe4e65bf9289c25553ed2fefb", "score": "0.51793605", "text": "def __array__(self):\n\n return self._data", "title": "" }, { "docid": "50f03428271369c260af59ca0f469106", "score": "0.5176461", "text": "def __sub__(self, other):\n temp_der = {}\n if isinstance(other, (int, float)):\n # Subtract a scalar from a AutoDiff object\n return AutoDiff(self.val - float(other), self.der.copy(), self.name)\n elif isinstance(other, AutoDiff):\n # Subtract two AutoDiff objects\n var_union = self.get_variables().union(other.get_variables())\n temp_val = self.val - other.val\n for variable in var_union:\n temp_der[variable] = self.der.get(variable, 0) - other.der.get(variable, 0)\n return AutoDiff(temp_val, temp_der, self.name)\n else:\n raise TypeError(\"Invalid input type!\")", "title": "" }, { "docid": "8a37182714daac18752b6ed11301341a", "score": "0.51696414", "text": "def reset(self) -> List[int]:\n self.array = self.original\n self.original = self.original[:]\n return self.array", "title": "" }, { "docid": "7a5209f8368dda8b4d3b8ab2dbad8365", "score": "0.51685274", "text": "def __sub__(self, rhs):\n\n if type(rhs) is not Matrix:\n raise ValueError(\"Two Matrices are required for subtraction\")\n elif self.size() != rhs.size():\n raise ValueError(\"Both Matrices must have the same size\")\n \n return self._sub(rhs)", "title": "" }, { "docid": "7500f5d043960a2910cc8cf6459934fd", "score": "0.5166", "text": "def inverse_transform(self, y: np.ndarray) -> np.ndarray:\n y = y.reshape(-1, *self._y_shape[1:])\n return y", "title": "" }, { "docid": "8c7f92e2cf891ff459bc66c367760fc2", "score": "0.51652855", "text": "def subtract(self, vector):\n return self.__sub__(vector)", "title": "" }, { "docid": "2de68e942a80d6b726953895fa4b6e82", "score": "0.5160659", "text": "def __sub__(self,other):\n result = self.duplicate()\n result -= other\n return result", "title": "" }, { "docid": "4443f726d9d6060ee084068f4acc04c3", "score": "0.51458955", "text": "def subtract(self, other):\n if not isinstance(other, PivotCounterBase):\n return NotImplemented\n result = self.unpivot()\n result.subtract(other.unpivot())\n return self.__class__(result)", "title": "" }, { "docid": "13e0cc5e3e0ed2fccb89b927afb83fa3", "score": "0.5145219", "text": "def __sub__(self, other):\n return self._mathematical_operator(self, other, operator.sub)", "title": "" }, { "docid": "13e0cc5e3e0ed2fccb89b927afb83fa3", "score": "0.5145219", "text": "def __sub__(self, other):\n return self._mathematical_operator(self, other, operator.sub)", "title": "" }, { "docid": "13e0cc5e3e0ed2fccb89b927afb83fa3", "score": "0.5145219", "text": "def __sub__(self, other):\n return self._mathematical_operator(self, other, operator.sub)", "title": "" }, { "docid": "13e0cc5e3e0ed2fccb89b927afb83fa3", "score": "0.5145219", "text": "def __sub__(self, other):\n return self._mathematical_operator(self, other, operator.sub)", "title": "" }, { "docid": "e758632a7e8aa725770d47e5bff509bf", "score": "0.5142554", "text": "def get_vector(x_array, y_array, pair):\n x = x_array[:,pair[0]]-x_array[:,pair[1]]\n y = y_array[:,pair[0]]-y_array[:,pair[1]]\n return [x, y]", "title": "" }, { "docid": "30b8c6e9ffbc32be1c6817a07b5d189d", "score": "0.51413035", "text": "def revert(self, y: np.ndarray) -> np.ndarray:\n return np.vectorize(self.revert_index.get)(y)", "title": "" }, { "docid": "60797871286c7f547aed551d6a9bee1b", "score": "0.51412827", "text": "def __isub__(self,other):\n self._data -= other\n return self", "title": "" }, { "docid": "7a9dd64889b05d8855a8c36648e11bd9", "score": "0.51404965", "text": "def sub(v1: Vec, v2: Vec) -> Vec:\n return [ (n1 - n2) for (n1, n2) in _zip(v1, v2) ]", "title": "" }, { "docid": "6b19dc239cb9c38e5f86c8f91e07035f", "score": "0.51388484", "text": "def cross(self, other: Vec3Array) -> Vec3Array:\n new_x = self.y * other.z - self.z * other.y\n new_y = self.z * other.x - self.x * other.z\n new_z = self.x * other.y - self.y * other.x\n return Vec3Array(new_x, new_y, new_z)", "title": "" }, { "docid": "e149a7720aeaf893a29cbbb1e76fe6af", "score": "0.51249355", "text": "def minus(self, d):\n if isinstance(d,Vector):\n v = d\n return (Vector(self.x - v.x, self.y - v.y, self.z - v.z))\n return (Vector(self.x - d, self.y - d, self.z - d))", "title": "" } ]
34735739c4aec822adaeec95f1a32577
Test resend stored flows.
[ { "docid": "85345835261afa6f632b05828456798e", "score": "0.71355134", "text": "def test_resend_stored_flows(self, mock_install_flows):\n dpid = \"00:00:00:00:00:00:00:01\"\n switch = get_switch_mock(dpid, 0x04)\n mock_event = MagicMock()\n flow = {\"command\": \"add\", \"flow\": MagicMock()}\n\n flows = {\"flow_list\": [flow]}\n mock_event.content = {\"switch\": switch}\n self.napp.controller.switches = {dpid: switch}\n self.napp.stored_flows = {dpid: flows}\n self.napp.resend_stored_flows(mock_event)\n mock_install_flows.assert_called()", "title": "" } ]
[ { "docid": "78c89020740b93fa4642991d3e31423f", "score": "0.58568835", "text": "def test_store_changed_flows(self, mock_save_flow, _):\n dpid = \"00:00:00:00:00:00:00:01\"\n switch = get_switch_mock(dpid, 0x04)\n switch.id = dpid\n flow = {\n \"priority\": 17,\n \"cookie\": 84114964,\n \"command\": \"add\",\n \"match\": {\"dl_dst\": \"00:15:af:d5:38:98\"},\n }\n match_fields = {\n \"priority\": 17,\n \"cookie\": 84114964,\n \"command\": \"add\",\n \"dl_dst\": \"00:15:af:d5:38:98\",\n }\n flows = {\"flow\": flow}\n\n command = \"add\"\n flow_list = {\n \"flow_list\": [\n {\"match_fields\": match_fields, \"command\": \"delete\",\n \"flow\": flow}\n ]\n }\n self.napp.stored_flows = {dpid: flow_list}\n self.napp._store_changed_flows(command, flows, switch)\n mock_save_flow.assert_called()\n\n self.napp.stored_flows = {}\n self.napp._store_changed_flows(command, flows, switch)\n mock_save_flow.assert_called()", "title": "" }, { "docid": "9f343c50161d29cf4a6ae70a601d487f", "score": "0.5760159", "text": "def test_outbound_permanent_failure(self):\n\n def _cb(*args, **kwargs):\n \"\"\"\n Callback handler that raises an error when called\n \"\"\"\n return defer.fail(ValueError(402, 'Payment Required'))\n\n # monkey patch so we can mock errors happening remotely\n self.transport.proxy = FakeXMLRPCService(_cb)\n\n # send a message to the transport which'll hit the FakeXMLRPCService\n # and as a result raise an error\n yield self.dispatch(self.mk_msg(),\n rkey='%s.outbound' % self.transport_name)\n\n [failure] = self.get_dispatched_failures()\n self.assertEqual(failure['failure_code'], 'permanent')", "title": "" }, { "docid": "987d96b0836a45dd8fdb88c17694e692", "score": "0.56061494", "text": "def test_delegates(self):\n resp = self.assert_status_code(200, method='POST',\n endpoint=self.endpoint + '?path=/new_path')\n self.assertEqual(resp, '')\n self.sch.reset.assert_called_once_with('/new_path')", "title": "" }, { "docid": "5b07579fcad717a3c68128d7451af420", "score": "0.56039166", "text": "def test_receive_workflow(self):\n data = [(\"send_ref\",\n \"WB_TEST_000001\",\n ),\n (\"purchase_ref\",\n \"PO_TEST_000001\",\n ),\n (\"site_id\",\n \"Same Warehouse\",\n \"autocomplete\",\n ),\n (\"type\",\n \"Other Warehouse\",\n \"option\",\n )\n ]\n result = self.helper_inv_receive(\"normal\", data)\n send_id = self.helper_inv_recv_get_id(result)\n\n data = [(\"item_id\",\n \"Blankets\",\n \"supply_widget\",\n ),\n (\"item_pack_id\",\n \"Piece\",\n \"option\",\n ),\n (\"quantity\",\n \"3\",\n ),\n ]\n result = self.helper_inv_track_item(\"normal\", send_id, data)\n result = self.helper_inv_recv_shipment(\"normal\", send_id, data)", "title": "" }, { "docid": "9dc5c2a77dac5c64e4884ba9d8013907", "score": "0.556723", "text": "def test_apply_retract(self):\n self.project.Status = 3\n self.project.EndDate = datetime.now().date() + timedelta(days=2)\n self.project.Progress = None\n self.project.Apply = 'system'\n self.project.save()\n\n # student\n s = self.users.get('r-s')\n\n # Test apply\n view = \"students:apply\"\n self.client.force_login(s)\n response = self.client.get(reverse(view, kwargs={\"pk\": self.p}))\n self.assertEqual(response.status_code, 200, msg=\"Student cannot apply to project!\")\n self.assertTrue(Application.objects.exists(), msg=\"Application is not made!\")\n\n # Test retract\n view = \"students:retractapplication\"\n app = Application.objects.get(Student=s)\n response = self.client.get(reverse(view, kwargs={\"application_id\": app.id}))\n self.assertEqual(response.status_code, 200, msg=\"Student cannot retract application!\")\n self.assertFalse(Application.objects.exists(), msg=\"Application is not retracted!\")\n\n self.client.logout()", "title": "" }, { "docid": "cde4225f64e87493e4c385d0a4545e5b", "score": "0.5499765", "text": "def test_submit_self_service_recovery_flow(self):\n pass", "title": "" }, { "docid": "42c24e7cea0bc1b58403802333c02f14", "score": "0.5469508", "text": "def test_invalid_finalize_demand(self):\n #close demand first\n self.test_valid_finalize_demand()\n #try to close again\n url = reverse('demand-finalize', kwargs={'pk': self.demand1.pk})\n response = client.put(url, format='json')\n self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN)\n self.assertEqual(response.data, {'status': 'Demand has been already closed!'})", "title": "" }, { "docid": "910c304b0ce882477ea7b83988900881", "score": "0.54488003", "text": "def test_outbound_temporary_failure(self):\n\n def _cb(*args, **kwargs):\n \"\"\"\n Callback handler that raises an error when called\n \"\"\"\n return defer.fail(xmlrpc.Fault(503, 'oh noes!'))\n\n # monkey patch so we can mock errors happening remotely\n self.transport.proxy = FakeXMLRPCService(_cb)\n\n # send a message to the transport which'll hit the FakeXMLRPCService\n # and as a result raise an error\n yield self.dispatch(self.mk_msg(),\n rkey='%s.outbound' % self.transport_name)\n\n self.assertEqual(self.get_dispatched_events(), [])\n self.assertEqual(self.get_dispatched_messages(), [])\n [failure] = self.get_dispatched_failures()\n self.assertEqual(failure['failure_code'], 'temporary')\n original_msg = failure['message']\n self.assertEqual(original_msg['to_addr'], '27761234567')\n self.assertEqual(original_msg['from_addr'], '27761234567')\n self.assertEqual(original_msg['content'], 'hello world')", "title": "" }, { "docid": "cc4872ddead2d9894fb0250bcfb0ca9b", "score": "0.5439106", "text": "def test_handle_rest_confirm_success(\n mock_send_reply, make_handler_params,\n):\n params = make_handler_params(\"reset confirm\")\n handle_reset(params)\n\n params.storage.put.assert_called_with(params.storage.PLANS_ENTRY, {})\n mock_send_reply.assert_not_called()", "title": "" }, { "docid": "8747eb94984f43f5be7b65bb7920e8c4", "score": "0.5420123", "text": "def test_no_strict_delete(self, *args):\n (mock_save_flow, _, _) = args\n dpid = \"00:00:00:00:00:00:00:01\"\n switch = get_switch_mock(dpid, 0x04)\n switch.id = dpid\n stored_flow = {\n \"command\": \"add\",\n \"flow\": {\n \"actions\": [{\"action_type\": \"set_vlan\", \"vlan_id\": 300}],\n \"cookie\": 6191162389751548793,\n \"match\": {\"dl_vlan\": 300, \"in_port\": 1},\n },\n }\n stored_flow2 = {\n \"command\": \"add\",\n \"flow\": {\n \"actions\": [],\n \"cookie\": 4961162389751548787,\n \"match\": {\"in_port\": 2},\n },\n }\n flow_to_install = {\n \"cookie\": 6191162389751548793,\n \"cookie_mask\": 18446744073709551615,\n }\n flow_list = {\"flow_list\": [stored_flow, stored_flow2]}\n command = \"delete\"\n self.napp.stored_flows = {dpid: flow_list}\n\n self.napp._store_changed_flows(command, flow_to_install, switch)\n mock_save_flow.assert_called()\n self.assertEqual(len(self.napp.stored_flows), 1)", "title": "" }, { "docid": "0f8be24ac64e7d8f94d75d10676ef64d", "score": "0.5388894", "text": "def test_rest_add_and_delete_without_dpid(self, mock_install_flows):\n api = get_test_client(self.napp.controller, self.napp)\n\n for method in ['flows', 'delete']:\n url = f'{self.API_URL}/v2/{method}'\n\n response_1 = api.post(url, json={'flows': [{\"priority\": 25}]})\n response_2 = api.post(url)\n\n self.assertEqual(response_1.status_code, 202)\n self.assertEqual(response_2.status_code, 400)\n\n self.assertEqual(mock_install_flows.call_count, 2)", "title": "" }, { "docid": "1d537eaeaf3d301492c418a01752ccad", "score": "0.5373466", "text": "def test_requeue_on_failure(self):\n first_test = get_test(self.server, 'runner1')\n assert_equal(first_test['class_path'], 'test.test_runner_server_test DummyTestCase')\n assert_equal(first_test['methods'], ['test'])\n\n self.run_test('runner1', should_pass=False)\n\n second_test = get_test(self.server, 'runner2')\n assert_equal(second_test['class_path'], 'test.test_runner_server_test DummyTestCase')\n assert_equal(second_test['methods'], ['test'])\n\n self.run_test('runner2', should_pass=False)\n\n assert_equal(get_test(self.server, 'runner3'), None)", "title": "" }, { "docid": "9937addce18abb8395442379d2692bed", "score": "0.53324246", "text": "def test_closedPotentialDataLoss(self):\r\n self.protocol.connectionLost(failure.Failure(PotentialDataLoss()))\r\n return self.protocol.deferred", "title": "" }, { "docid": "22f10341795b6170e37381a6a5495ac4", "score": "0.5328665", "text": "def RedoTest(self):\n Debug(\"> RedoTest\")\n try:\n msg = _Msg(self._id, _MsgType.RedoTest)\n msg = pickle.loads(self._agent.redoTest(pickle.dumps(msg)))\n except Exception as e:\n self.Reconnect()\n raise RedoTestException(\"Communication error with Agent %s\" % self._name)\n if msg.type != _MsgType.Ack:\n raise PeachException(\"Lost connection to Agent %s during RedoTest call.\" % self._name)\n Debug(\"< RedoTest\")\n\n return msg.results", "title": "" }, { "docid": "2d36671e871219e3d1d958d2ae8ad2d0", "score": "0.5318892", "text": "def resend(event, context):\n target_url = os.environ.get('TARGET_URL')\n resp = requests.post(target_url, data = event['data'])\n\n print(resp.status_code)", "title": "" }, { "docid": "e74250f7074330c57354feea6fb00168", "score": "0.5314571", "text": "def test_failing_passing(self):\n # cover failing delivery\n self._notification_run()", "title": "" }, { "docid": "ab6b4014a110b3dcd59cbbdb58756b1c", "score": "0.52899975", "text": "def test_get_self_service_recovery_flow(self):\n pass", "title": "" }, { "docid": "2dcf2f5a368531bf1e52f8afb61779eb", "score": "0.52837104", "text": "def test_stream_request_backfill_deleted(self):\n\n response = self.dcp_client.open_producer(\"mystream\")\n assert response['status'] == SUCCESS\n\n resp = self.mcd_client.stats('failovers')\n vb_uuid = long(resp['vb_0:0:id'])\n\n # set 3 items and delete delete first 2\n self.mcd_client.set('key1', 0, 0, 'value', 0)\n self.mcd_client.set('key2', 0, 0, 'value', 0)\n self.mcd_client.set('key3', 0, 0, 'value', 0)\n self.mcd_client.set('key4', 0, 0, 'value', 0)\n self.mcd_client.set('key5', 0, 0, 'value', 0)\n self.mcd_client.set('key6', 0, 0, 'value', 0)\n self.wait_for_persistence(self.mcd_client)\n self.mcd_client.delete('key1', 0, 0)\n self.mcd_client.delete('key2', 0, 0)\n\n backfilling = False\n tries = 10\n while not backfilling and tries > 0:\n # stream request until backfilling occurs\n self.dcp_client.stream_req(0, 0, 0, 5,\n vb_uuid)\n stats = self.mcd_client.stats('dcp')\n num_backfilled = \\\n int(stats['eq_dcpq:mystream:stream_0_backfilled'])\n backfilling = num_backfilled > 0\n tries -= 1\n self.sleep(2)\n\n assert backfilling, \"ERROR: backfill task did not start\"\n\n # attempt to stream deleted mutations\n stream = self.dcp_client.stream_req(0, 0, 0, 3, vb_uuid)\n response = stream.next_response()", "title": "" }, { "docid": "908d28b0de3ae9d81a94d9a21d775101", "score": "0.5272425", "text": "def test_rest_add_and_delete_with_dpid(self, mock_install_flows):\n api = get_test_client(self.napp.controller, self.napp)\n data = {'flows': [{\"priority\": 25}]}\n for method in ['flows', 'delete']:\n url_1 = f'{self.API_URL}/v2/{method}/00:00:00:00:00:00:00:01'\n url_2 = f'{self.API_URL}/v2/{method}/00:00:00:00:00:00:00:02'\n\n response_1 = api.post(url_1, json=data)\n response_2 = api.post(url_2, json=data)\n\n self.assertEqual(response_1.status_code, 202)\n if method == 'delete':\n self.assertEqual(response_2.status_code, 202)\n\n self.assertEqual(mock_install_flows.call_count, 3)", "title": "" }, { "docid": "cf6babd898d88be81dafd6ff7f44dea0", "score": "0.52660155", "text": "def teardown_flow(self):\n self._exec_flow_function('teardown', (self, ))\n self.state = 'destroyed'\n self.save()", "title": "" }, { "docid": "ceff200ea4eb45264b0cb609f402ca68", "score": "0.52543545", "text": "def test_reroute_queue(self):\n self.startQmf()\n session = self.session\n \"Set up test queue\"\n session.exchange_declare(exchange=\"alt.direct1\", type=\"direct\")\n session.queue_declare(queue=\"alt-queue1\", exclusive=True, auto_delete=True)\n session.exchange_bind(queue=\"alt-queue1\", exchange=\"alt.direct1\", binding_key=\"routing_key\")\n session.exchange_declare(exchange=\"alt.direct2\", type=\"direct\")\n session.queue_declare(queue=\"alt-queue2\", exclusive=True, auto_delete=True)\n session.exchange_bind(queue=\"alt-queue2\", exchange=\"alt.direct2\", binding_key=\"routing_key\")\n session.queue_declare(queue=\"reroute-queue\", exclusive=True, auto_delete=True, alternate_exchange=\"alt.direct1\")\n session.exchange_bind(queue=\"reroute-queue\", exchange=\"amq.direct\", binding_key=\"routing_key\")\n\n twenty = range(1,21)\n props = session.delivery_properties(routing_key=\"routing_key\")\n mp = session.message_properties(application_headers={'x-qpid.trace' : 'A,B,C'})\n for count in twenty:\n body = \"Reroute Message %d\" % count\n msg = Message(props, mp, body)\n session.message_transfer(destination=\"amq.direct\", message=msg)\n\n pq = self.qmf.getObjects(_class=\"queue\", name=\"reroute-queue\")[0]\n\n \"Reroute top message from reroute-queue to alternate exchange\"\n result = pq.reroute(1, True, \"\", {})\n self.assertEqual(result.status, 0) \n pq.update()\n aq = self.qmf.getObjects(_class=\"queue\", name=\"alt-queue1\")[0]\n self.assertEqual(pq.msgDepth,19)\n self.assertEqual(aq.msgDepth,1)\n\n \"Verify that the trace was cleared on the rerouted message\"\n url = \"%s://%s:%d\" % (self.broker.scheme or \"amqp\", self.broker.host, self.broker.port or 5672)\n conn = qpid.messaging.Connection(url)\n conn.open()\n sess = conn.session()\n rx = sess.receiver(\"alt-queue1;{mode:browse}\")\n rm = rx.fetch(1)\n self.assertEqual(rm.properties['x-qpid.trace'], '')\n conn.close()\n\n \"Reroute top 9 messages from reroute-queue to alt.direct2\"\n result = pq.reroute(9, False, \"alt.direct2\", {})\n self.assertEqual(result.status, 0) \n pq.update()\n aq = self.qmf.getObjects(_class=\"queue\", name=\"alt-queue2\")[0]\n self.assertEqual(pq.msgDepth,10)\n self.assertEqual(aq.msgDepth,9)\n\n \"Reroute using a non-existent exchange\"\n result = pq.reroute(0, False, \"amq.nosuchexchange\", {})\n self.assertEqual(result.status, 4)\n\n \"Reroute all messages from reroute-queue\"\n result = pq.reroute(0, False, \"alt.direct2\", {})\n self.assertEqual(result.status, 0) \n pq.update()\n aq = self.qmf.getObjects(_class=\"queue\", name=\"alt-queue2\")[0]\n self.assertEqual(pq.msgDepth,0)\n self.assertEqual(aq.msgDepth,19)\n\n \"Make more messages\"\n twenty = range(1,21)\n props = session.delivery_properties(routing_key=\"routing_key\")\n for count in twenty:\n body = \"Reroute Message %d\" % count\n msg = Message(props, body)\n session.message_transfer(destination=\"amq.direct\", message=msg)\n\n \"Reroute onto the same queue\"\n result = pq.reroute(0, False, \"amq.direct\", {})\n self.assertEqual(result.status, 0) \n pq.update()\n self.assertEqual(pq.msgDepth,20)", "title": "" }, { "docid": "7f636d98927aee89b758bd2a93c6314b", "score": "0.52429664", "text": "def test_prepare_requeue(self):\n pass", "title": "" }, { "docid": "04810baf72d5ba160102bf707e450e62", "score": "0.52379906", "text": "async def test_webpage_reload(options):\r\n web = WebGear_RTC(source=return_testvideo_path(), logging=True, **options)\r\n try:\r\n # run webgear_rtc\r\n async with TestClient(web()) as client:\r\n response = await client.get(\"/\")\r\n assert response.status_code == 200\r\n\r\n # create offer and receive\r\n (offer_pc, data) = await get_RTCPeer_payload()\r\n response_rtc_answer = await client.post(\r\n \"/offer\",\r\n data=data,\r\n headers={\"Content-Type\": \"application/json\"},\r\n )\r\n params = response_rtc_answer.json()\r\n answer = RTCSessionDescription(sdp=params[\"sdp\"], type=params[\"type\"])\r\n await offer_pc.setRemoteDescription(answer)\r\n response_rtc_offer = await client.get(\r\n \"/offer\",\r\n data=data,\r\n headers={\"Content-Type\": \"application/json\"},\r\n )\r\n assert response_rtc_offer.status_code == 200\r\n # simulate webpage reload\r\n response_rtc_reload = await client.post(\r\n \"/close_connection\",\r\n data=\"0\",\r\n )\r\n # close offer\r\n await offer_pc.close()\r\n offer_pc = None\r\n data = None\r\n # verify response\r\n logger.debug(response_rtc_reload.text)\r\n assert response_rtc_reload.text == \"OK\", \"Test Failed!\"\r\n\r\n # recreate offer and continue receive\r\n (offer_pc, data) = await get_RTCPeer_payload()\r\n response_rtc_answer = await client.post(\r\n \"/offer\",\r\n data=data,\r\n headers={\"Content-Type\": \"application/json\"},\r\n )\r\n params = response_rtc_answer.json()\r\n answer = RTCSessionDescription(sdp=params[\"sdp\"], type=params[\"type\"])\r\n await offer_pc.setRemoteDescription(answer)\r\n response_rtc_offer = await client.get(\r\n \"/offer\",\r\n data=data,\r\n headers={\"Content-Type\": \"application/json\"},\r\n )\r\n assert response_rtc_offer.status_code == 200\r\n # shutdown\r\n await offer_pc.close()\r\n except Exception as e:\r\n if \"enable_live_broadcast\" in options and isinstance(\r\n e, (AssertionError, MediaStreamError)\r\n ):\r\n pytest.xfail(\"Test Passed\")\r\n else:\r\n pytest.fail(str(e))\r\n finally:\r\n web.shutdown()", "title": "" }, { "docid": "ef0212341a2b6b35a7741009e9c56a2b", "score": "0.52290714", "text": "def renegotiate_pending(self):", "title": "" }, { "docid": "5b8e3e776a33fb901fc24b88b4d53743", "score": "0.5227289", "text": "def test_verify_reset(self):\n pass", "title": "" }, { "docid": "e25ca1a6dcd0cbc7c4364d4b1ff19390", "score": "0.5226039", "text": "def test_change_pending_to_rejected(self):\n workflow = Workflow('pending')\n workflow.on_event('reject')\n self.assertEqual(workflow.state, RejectedState())", "title": "" }, { "docid": "502b7fda50404802e5d6a44c881dda62", "score": "0.5225899", "text": "def test_for_RegistryService_finishTask_method():\n\n message = {'a': 'b'}\n\n instance = RegistryService(message=message, job_id=uuid4())\n instance.startTask()\n instance.finishTask()\n\n instance.task.refresh_from_db()\n\n assert instance.task\n assert instance.task.status == 'DONE'", "title": "" }, { "docid": "545a6df17b0a8b616a113fab0ed47785", "score": "0.5211458", "text": "def test_retraible_error_occurs_and_retry_failed(self):\n events = [copy.deepcopy(_event_test_transaction)] * 2\n\n first_response = _create_api_response([_RETRIABLE_ERR, _SUCCESS])\n rest_response = _create_api_response([_RETRIABLE_ERR])\n self.mock_service.mutate.side_effect = [\n first_response, rest_response, rest_response, rest_response,\n rest_response\n ]\n blb = blob.Blob(events=events, location='')\n\n blb = self.test_hook.send_events(blb)\n\n self.assertEqual(self.mock_service.mutate.call_count, 5)\n self.assertEqual(len(blb.failed_events), 1)", "title": "" }, { "docid": "53e6c34357f104f4558af46ab7fcf3d9", "score": "0.5199748", "text": "def test_end_to_end():\n\n base_url = os.environ.get('E2E_URL', 'http://localhost:8080')\n\n # Use retry because it will take some indeterminate time for the pub/sub\n # message to be processed.\n @retry(wait_exponential_multiplier=2000, stop_max_attempt_number=3)\n def test_request():\n # Check that the book's information was updated.\n print(\"connecting to {}...\".format(base_url))\n response = requests.get(base_url)\n assert response.status_code == 200\n assert response.text == \"Hello World\"\n\n # Run tests\n try:\n test_request()\n finally:\n # No cleanup necessary\n pass", "title": "" }, { "docid": "0284576a78bee0d3ff3612967f2746a1", "score": "0.51760745", "text": "def test_end_channel(self):\n pass", "title": "" }, { "docid": "55997d034e33bfcc8f547365c207a337", "score": "0.51709574", "text": "async def test_retry_change_of_status(app, session, mocker):\n filing_id, business_id = create_registration_data('SP', tax_id='993775204BC0001')\n json_filing = {\n 'filing': {\n 'header': {\n 'name': 'dissolution'\n },\n 'dissolution': {}\n }\n }\n filing = create_filing(json_filing=json_filing, business_id=business_id)\n filing._filing_type = 'dissolution'\n filing.save()\n filing_id = filing.id\n\n mocker.patch('entity_bn.bn_processors.dissolution_or_put_back_on.request_bn_hub', return_value=(500, ''))\n\n for _ in range(10):\n try:\n await process_event({\n 'type': 'bc.registry.business.dissolution',\n 'data': {\n 'filing': {\n 'header': {'filingId': filing_id}\n }\n }\n }, app)\n\n except BNException:\n continue\n except BNRetryExceededException:\n break\n\n request_trackers = RequestTracker.find_by(business_id,\n RequestTracker.ServiceName.BN_HUB,\n RequestTracker.RequestType.CHANGE_STATUS,\n filing_id=filing_id)\n assert request_trackers\n assert len(request_trackers) == 1\n assert request_trackers[0].is_processed is False\n assert request_trackers[0].retry_number == 9", "title": "" }, { "docid": "2e8d6a913ddaa4b98988dd31caf76987", "score": "0.51601505", "text": "def test_confirm_ring(self):\n pass", "title": "" }, { "docid": "fc6ff4312173dce84e3c02cf2573053a", "score": "0.5159829", "text": "def test_basic_workflow(self, config):\n user_id = str(uuid4())\n case_id = str(uuid4())\n user = dummy_user(user_id)\n\n initial_payload = get_restore_payload(config.restore_url, config.domain, user)\n synclog_id = synclog_id_from_restore_payload(\n initial_payload\n )\n\n # payload should not contain any cases\n check_xml_line_by_line(\n dummy_restore_xml(user, synclog_id, items=3),\n initial_payload,\n )\n\n factory = CaseFactory(\n config.receiver_url,\n domain=config.domain,\n form_extras={\n 'user_id': user_id,\n }\n )\n case_attrs = {\n 'create': True,\n 'user_id': user_id,\n 'owner_id': user_id,\n 'case_type': 'gangster',\n 'case_name': 'Fish',\n 'update': {'last_name': 'Mooney'}\n }\n factory.create_or_update_case(\n CaseStructure(case_id, attrs=case_attrs),\n form_extras={'headers': {'last_sync_token': synclog_id}}\n )\n\n restore_payload = get_restore_payload(config.restore_url, config.domain, user, since=synclog_id)\n new_synclog_id = synclog_id_from_restore_payload(restore_payload)\n # restore still does not contain case\n check_xml_line_by_line(\n dummy_restore_xml(user, new_synclog_id, items=3),\n restore_payload,\n )\n\n # update the case\n case_updates = {'cover_job': 'restaurant owner'}\n date_modified = datetime.utcnow()\n factory.create_or_update_case(\n CaseStructure(case_id, attrs={'update': case_updates, 'date_modified': date_modified}),\n form_extras={\n 'user_id': user_id,\n # 'headers': {\n # 'last_sync_token': new_synclog_id\n }#}\n )\n\n restore_payload = get_restore_payload(config.restore_url, config.domain, user, since=new_synclog_id)\n new_new_synclog_id = synclog_id_from_restore_payload(restore_payload)\n\n case_attrs['create'] = False\n case_attrs['update'].update(case_updates)\n case_block = CaseBlock(case_id, date_modified=date_modified, **case_attrs)\n # restore contain case\n check_xml_line_by_line(\n dummy_restore_xml(user, new_new_synclog_id, case_xml=case_block.as_string(), items=4),\n restore_payload,\n )", "title": "" }, { "docid": "4b563f1055b7f209bfbbb601b09112fd", "score": "0.51565135", "text": "def test_automated_refund_task(session):\n DirectPayAutomatedRefundTask().process_cc_refunds()\n assert True", "title": "" }, { "docid": "b109fedde1c9cf56a8ec58268bce87aa", "score": "0.5152632", "text": "def test_refund_transfer_after_2nd_hop(\n raiden_chain,\n token_addresses,\n deposit,\n network_wait,\n):\n # Topology:\n #\n # 0 -> 1 -> 2 -> 3\n #\n app0, app1, app2, app3 = raiden_chain\n token_address = token_addresses[0]\n payment_network_identifier = app0.raiden.default_registry.address\n token_network_identifier = views.get_token_network_identifier_by_token_address(\n views.state_from_app(app0),\n payment_network_identifier,\n token_address,\n )\n\n # make a transfer to test the path app0 -> app1 -> app2 -> app3\n identifier_path = 1\n amount_path = 1\n mediated_transfer(\n app0,\n app3,\n token_network_identifier,\n amount_path,\n identifier_path,\n timeout=network_wait,\n )\n\n # drain the channel app2 -> app3\n identifier_drain = 2\n amount_drain = deposit * 8 // 10\n direct_transfer(\n app2,\n app3,\n token_network_identifier,\n amount_drain,\n identifier_drain,\n timeout=network_wait,\n )\n\n # wait for the nodes to sync\n gevent.sleep(0.2)\n\n assert_synched_channel_state(\n token_network_identifier,\n app0, deposit - amount_path, [],\n app1, deposit + amount_path, [],\n )\n assert_synched_channel_state(\n token_network_identifier,\n app1, deposit - amount_path, [],\n app2, deposit + amount_path, [],\n )\n assert_synched_channel_state(\n token_network_identifier,\n app2, deposit - amount_path - amount_drain, [],\n app3, deposit + amount_path + amount_drain, [],\n )\n\n # app0 -> app1 -> app2 > app3 is the only available path, but the channel\n # app2 -> app3 doesn't have capacity, so a refund will be sent on\n # app2 -> app1 -> app0\n identifier_refund = 3\n amount_refund = 50\n async_result = app0.raiden.mediated_transfer_async(\n token_network_identifier,\n amount_refund,\n app3.raiden.address,\n identifier_refund,\n )\n assert async_result.wait() is False, 'there is no path with capacity, the transfer must fail'\n\n gevent.sleep(0.2)\n\n # Lock structures with the correct amount\n\n send_locked1 = raiden_events_must_contain_entry(\n app0.raiden,\n SendLockedTransfer,\n {'transfer': {'lock': {'amount': amount_refund}}},\n )\n assert send_locked1\n\n send_refund1 = raiden_events_must_contain_entry(app1.raiden, SendRefundTransfer, {})\n assert send_refund1\n\n lock1 = send_locked1.transfer.lock\n refund_lock1 = send_refund1.lock\n assert lock1.amount == refund_lock1.amount\n assert lock1.secrethash == refund_lock1.secrethash\n\n send_locked2 = raiden_events_must_contain_entry(\n app1.raiden,\n SendLockedTransfer,\n {'transfer': {'lock': {'amount': amount_refund}}},\n )\n assert send_locked2\n\n send_refund2 = raiden_events_must_contain_entry(app2.raiden, SendRefundTransfer, {})\n assert send_refund2\n\n lock2 = send_locked2.transfer.lock\n refund_lock2 = send_refund2.lock\n assert lock2.amount == refund_lock2.amount\n assert lock2.secrethash\n assert lock2.expiration\n\n # channels have the amount locked because of the refund message\n assert_synched_channel_state(\n token_network_identifier,\n app0, deposit - amount_path, [lockstate_from_lock(lock1)],\n app1, deposit + amount_path, [lockstate_from_lock(refund_lock1)],\n )\n assert_synched_channel_state(\n token_network_identifier,\n app1, deposit - amount_path, [lockstate_from_lock(lock2)],\n app2, deposit + amount_path, [lockstate_from_lock(refund_lock2)],\n )\n assert_synched_channel_state(\n token_network_identifier,\n app2, deposit - amount_path - amount_drain, [],\n app3, deposit + amount_path + amount_drain, [],\n )", "title": "" }, { "docid": "0917d90e9f54f88e3135b7f70b69dc7c", "score": "0.5147368", "text": "def test_sends_reset():\n listener = MockListener()\n\n packet_log = rdpcap(\"test/inputs/tiniest-session.pcap\")\n listener, conn = create_session(packet_log)\n conn._close()\n\n syn_ack = packet_log[1]\n listener.dispatch(syn_ack)\n\n last_packet = listener.received_packets[-1]\n assert last_packet.sprintf(\"%TCP.flags%\") == \"R\"", "title": "" }, { "docid": "ac48878ae138adec1f715b35e03f03dd", "score": "0.514189", "text": "def test_change_rejected_to_rejected(self):\n workflow = Workflow('rejected')\n workflow.on_event('reject')\n self.assertEqual(workflow.state, RejectedState())", "title": "" }, { "docid": "eaa1b74e94b901dd4b2039a864974a5d", "score": "0.51320595", "text": "def test_send(self):\n pass", "title": "" }, { "docid": "2359c9631d6141cc1e56941801045576", "score": "0.5126732", "text": "async def test_should_not_reprocess_completed_message(tracker_app, tracker_db, session):\n message_id = '16fd2111-8baf-433b-82eb-8c7fada847aa'\n message_payload = {\n 'specversion': '1.x-wip',\n 'type': 'bc.registry.names.request',\n 'source': 'nr_pay',\n 'id': message_id,\n 'time': '',\n 'datacontenttype': 'application/json',\n 'identifier': '781020202',\n 'data': {\n 'header': {'nrNum': '781020202'},\n 'paymentToken': '234234234324asjdkfhjsdhf23949239423',\n 'statusCode': 'PAID'\n }\n }\n mock_msg = create_mock_message(message_payload)\n\n # mock out process_email function to return true to simulate successful email processing\n with patch.object(worker, 'process_email', return_value=True):\n await worker.cb_subscription_handler(mock_msg)\n result_1st_time = MessageProcessing.find_message_by_message_id(message_id=message_id)\n assert result_1st_time.message_seen_count == 1\n\n await worker.cb_subscription_handler(mock_msg)\n result_2nd_time = MessageProcessing.find_message_by_message_id(message_id=message_id)\n assert result_2nd_time.message_seen_count == 1\n assert result_2nd_time.last_update == result_1st_time.last_update\n assert result_2nd_time.status == 'COMPLETE'", "title": "" }, { "docid": "780d4284ad60bc843c188ab056790a70", "score": "0.5122653", "text": "def test_changePhase_response(self):\n print '\\nInicio - Prueba: changePhase'\n login = self.client.login(username='admin', password='admin')\n self.assertTrue(login)\n\n phase = Fase.objects.get(nombre='Fase01')\n dato_fase_mod = {'nombre': 'Fase_Prueba_modificado', 'estado': 'PEN', 'descripcion': 'Fase Test'}\n request = self.factory.post('/changephase/', dato_fase_mod)\n request.user = self.user\n response = changePhase(request, phase.id)\n\n if response == None:\n print 'Error Previsto: La fase especificada no existe\\nPor favor verifique los datos ingresados'\n else:\n self.assertEqual(response.status_code, 302, 'Error al modificar la Fase')\n\n print 'Fase modificada exitosamente'\n print Fase.objects.all()\n print 'Fin - Prueba: changePhase\\n'\n\n print 'Inicio - Prueba: changePhase(ERROR PREVISTO)'\n response = changePhase(request, 3)\n\n if response == None:\n print 'Error Previsto: La fase especificada no existe\\nPor favor verifique los datos ingresados'\n\n print 'Fin - Prueba: changePhase(ERROR PREVISTO)\\n'", "title": "" }, { "docid": "27cd8171876c1daa59d4703b7cdb778b", "score": "0.5117649", "text": "def test_can_re_invite_a_past_team_member(self):\n previous_invite = InvitationFactory(\n to_team=self.team_a, made_by=self.team_a_admin, accepted=True\n )\n self.client.force_login(user=self.team_a_admin)\n data = {\n \"email\": previous_invite.email,\n \"message\": \"would you like to join again?\",\n }\n response = self.client.post(\n reverse(self.path_name, kwargs={\"team_id\": self.team_a.pk}), data=data\n )\n self.assertEqual(response.status_code, status.HTTP_201_CREATED)\n self.assertEqual(\n Invitation.objects.filter(\n to_team=self.team_a, email=previous_invite.email\n ).count(),\n 2,\n )", "title": "" }, { "docid": "4222cd1797dfc446485f5cc18b702a7f", "score": "0.51123244", "text": "def test_refund_transfer(raiden_chain, token_addresses, deposit, network_wait):\n # Topology:\n #\n # 0 -> 1 -> 2\n #\n app0, app1, app2 = raiden_chain\n token_address = token_addresses[0]\n payment_network_identifier = app0.raiden.default_registry.address\n token_network_identifier = views.get_token_network_identifier_by_token_address(\n views.state_from_app(app0),\n payment_network_identifier,\n token_address,\n )\n\n # make a transfer to test the path app0 -> app1 -> app2\n identifier_path = 1\n amount_path = 1\n mediated_transfer(\n app0,\n app2,\n token_network_identifier,\n amount_path,\n identifier_path,\n timeout=network_wait,\n )\n\n # drain the channel app1 -> app2\n identifier_drain = 2\n amount_drain = deposit * 8 // 10\n direct_transfer(\n app1,\n app2,\n token_network_identifier,\n amount_drain,\n identifier_drain,\n timeout=network_wait,\n )\n\n # wait for the nodes to sync\n gevent.sleep(0.2)\n\n assert_synched_channel_state(\n token_network_identifier,\n app0, deposit - amount_path, [],\n app1, deposit + amount_path, [],\n )\n assert_synched_channel_state(\n token_network_identifier,\n app1, deposit - amount_path - amount_drain, [],\n app2, deposit + amount_path + amount_drain, [],\n )\n\n # app0 -> app1 -> app2 is the only available path, but the channel app1 ->\n # app2 doesn't have capacity, so a refund will be sent on app1 -> app0\n identifier_refund = 3\n amount_refund = 50\n async_result = app0.raiden.mediated_transfer_async(\n token_network_identifier,\n amount_refund,\n app2.raiden.address,\n identifier_refund,\n )\n assert async_result.wait() is False, 'there is no path with capacity, the transfer must fail'\n\n gevent.sleep(0.2)\n\n # A lock structure with the correct amount\n\n send_locked = raiden_events_must_contain_entry(\n app0.raiden,\n SendLockedTransfer,\n {'transfer': {'lock': {'amount': amount_refund}}},\n )\n assert send_locked\n\n send_refund = raiden_events_must_contain_entry(app1.raiden, SendRefundTransfer, {})\n assert send_refund\n\n lock = send_locked.transfer.lock\n refund_lock = send_refund.lock\n assert lock.amount == refund_lock.amount\n assert lock.secrethash\n assert lock.expiration\n\n # Both channels have the amount locked because of the refund message\n assert_synched_channel_state(\n token_network_identifier,\n app0, deposit - amount_path, [lockstate_from_lock(lock)],\n app1, deposit + amount_path, [lockstate_from_lock(refund_lock)],\n )\n assert_synched_channel_state(\n token_network_identifier,\n app1, deposit - amount_path - amount_drain, [],\n app2, deposit + amount_path + amount_drain, [],\n )", "title": "" }, { "docid": "f1cb052e6e048205e64d336ee60fc02b", "score": "0.5107739", "text": "def test_trial_ending(self, mock_storage_class, mock_send_email):\n now = timezone.now()\n\n owner1 = get(User)\n org1 = get(Organization, owners=[owner1])\n customer1 = get(djstripe.Customer)\n org1.stripe_customer = customer1\n org1.save()\n get(\n djstripe.Subscription,\n status=SubscriptionStatus.trialing,\n trial_start=now,\n trial_end=now + timedelta(days=7),\n created=now - timedelta(days=24),\n customer=customer1,\n )\n\n owner2 = get(User)\n org2 = fixture.get(Organization, owners=[owner2])\n customer2 = get(djstripe.Customer)\n org2.stripe_customer = customer2\n org2.save()\n get(\n djstripe.Subscription,\n status=SubscriptionStatus.trialing,\n trial_start=now,\n trial_end=now + timedelta(days=7),\n created=now - timedelta(days=25),\n )\n\n mock_storage = mock.Mock()\n mock_storage_class.return_value = mock_storage\n\n daily_email()\n\n self.assertEqual(mock_storage.add.call_count, 1)\n mock_storage.add.assert_has_calls(\n [\n mock.call(\n message=mock.ANY,\n extra_tags=\"\",\n level=31,\n user=owner1,\n ),\n ]\n )\n self.assertEqual(mock_send_email.call_count, 1)\n mock_send_email.assert_has_calls(\n [\n mock.call(\n subject=\"Your trial is ending soon\",\n recipient=owner1.email,\n template=mock.ANY,\n template_html=mock.ANY,\n context=mock.ANY,\n ),\n ]\n )", "title": "" }, { "docid": "594001de7015737ac1003b346293cfd5", "score": "0.5100149", "text": "def testAll():\n test_exchange()\n print(\"All tests passed\")", "title": "" }, { "docid": "e2cef2dc4aaf48f9d42885ae5823eaa9", "score": "0.5095431", "text": "def testConfirmarReserva(self):\n afiliado_id = 1\n ee = EspecialistaEspecialidad.objects.get(id=1)\n turno = Turno.objects.create(fecha=timezone.now(),\n ee=ee,\n sobreturno=False,\n estado=Turno.DISPONIBLE)\n b.reservarTurnos(afiliado_id, '12345678', [turno.id])\n reservados = b.get_turnos_reservados(afiliado_id)\n for reservado in reservados:\n b.confirmar_reserva([reservado['id']])\n linea = LineaDeReserva.objects.get(id=reservado['id'])\n self.assertEquals(linea.estado, Turno.PRESENTE)\n self.assertEquals(linea.turno.estado, Turno.PRESENTE)", "title": "" }, { "docid": "520e3a8c258e53d4d3fb68227162bc92", "score": "0.5078625", "text": "def test_post_action(sut: SystemUnderTest):\n # NOTE(bdodd): Actions better tested in the Redfish-Usecase-Checkers", "title": "" }, { "docid": "e6d17fda6177fe2ce267ae69d19db552", "score": "0.5071033", "text": "def test_start_requeue(self):\n pass", "title": "" }, { "docid": "cb964b1152404b5e6b49671649b84fd5", "score": "0.5065938", "text": "def test_reload(self):\n pass", "title": "" }, { "docid": "36348ae91f36679cd01dc95180967903", "score": "0.50636816", "text": "def test_event_flows_install_delete(self, mock_install_flows):\n dpid = \"00:00:00:00:00:00:00:01\"\n switch = get_switch_mock(dpid)\n self.napp.controller.switches = {dpid: switch}\n mock_flow_dict = MagicMock()\n event = get_kytos_event_mock(name='kytos.flow_manager.flows.delete',\n content={'dpid': dpid,\n 'flow_dict': mock_flow_dict})\n self.napp.event_flows_install_delete(event)\n mock_install_flows.assert_called_with('delete', mock_flow_dict,\n [switch])", "title": "" }, { "docid": "2d774053b847043e42421b0b5e11e53b", "score": "0.5037358", "text": "def test_post_deferred_execution_golden(self):\n new_spec = example_spec_random_dest_plate()\n new_spec[\"plan\"][\"destinations\"][0][\"details\"][\"id\"] = rnd_bc()\n uri = '/api/v1/rest/transform-specs'\n headers = [(\"Transform-Execution\", \"Deferred\")]\n rv = self.client.post(uri,\n data=json.dumps(new_spec),\n content_type=\"application/json\",\n headers=headers)\n assert rv.status_code == 201\n new_url = rv.headers['location']\n\n headers = [(\"Transform-Execution\", \"Immediate\")]\n rv = self.client.put(new_url,\n data=json.dumps(new_spec),\n content_type=\"application/json\",\n headers=headers)\n assert rv.status_code == 201\n result = json.loads(rv.data)\n data = result[\"data\"]\n\n date_executed = data[\"date_executed\"]\n assert date_executed is not None\n\n self.client.delete(new_url)\n assert self.client.get(new_url).status_code == 404", "title": "" }, { "docid": "790a01d5e4caa0f44899d0ec5cdf06ec", "score": "0.5029931", "text": "def _receiverTest(self, sentences, expectedFired=(), extraTest=None):\n for sentence in sentences:\n self.protocol.lineReceived(sentence)\n\n actuallyFired = self.receiver.called.keys()\n self.assertEqual(set(actuallyFired), set(expectedFired))\n\n if extraTest is not None:\n extraTest()\n\n self.receiver.clear()\n self.adapter.clear()", "title": "" }, { "docid": "87f21473e290a3ba05c4da8778de4b2d", "score": "0.50291747", "text": "def test_3_port_based_flows_test(self):\n self.ovs_obj.manage_flows(manage_type=\"delete\", br_name=config.OVS_BRIDGE, inputs=None)\n flow_input = {OvsConf.flow_inputs['Pri']: OvsConf.priority[0], OvsConf.flow_inputs['iPort']: OvsConf.ports[0]}\n action = {OvsConf.flow_actions[0]: [OvsConf.ports[1]]}\n self.ovs_obj.manage_flows(manage_type=\"add\", br_name=config.OVS_BRIDGE, inputs=flow_input, action=action)\n flow_input = {OvsConf.flow_inputs['Pri']: OvsConf.priority[0], OvsConf.flow_inputs['iPort']: OvsConf.ports[1]}\n action = {OvsConf.flow_actions[0]: [OvsConf.ports[0]]}\n self.ovs_obj.manage_flows(manage_type=\"add\", br_name=config.OVS_BRIDGE, inputs=flow_input, action=action)\n if self.mini:\n return self.mn.ping(\"h3\",\"h4\")\n self.ovs_obj.manage_flows(manage_type=\"delete\", br_name=config.OVS_BRIDGE, inputs=flow_input)\n if self.mini:\n return self.mn.ping(\"h3\",\"h4\")\n \"\"\"\n else:\n status = remote_ping(config.HOST1_IP,config.HOST1_USER,config.HOST1_PASSWORD,config.h2_port1,echo_count=10)\n if status:\n print \"Ping between hosts H1 & H2 through OVS is Successfull\\n\"\n self.ovs_obj.manage_flows(manage_type=\"delete\", br_name=config.OVS_BRIDGE, inputs=flow_input)\n status = remote_ping(config.HOST1_IP, config.HOST1_USER, config.HOST1_PASSWORD, config.h2_port1, echo_count=10)\n if not status:\n print \"Ping between hosts H1 & H2 through OVS failed as flows are deleted\\n\"\n self.ovs_obj.manage_flows(manage_type=\"add\", br_name=config.OVS_BRIDGE, inputs=flow_input, action=action)\n action = {OvsConf.flow_actions[2]: None}\n self.ovs_obj.manage_flows(manage_type=\"alter\", br_name=config.OVS_BRIDGE, inputs=flow_input, action=action)\n\n\n #flow_input = {OvsConf.flow_inputs['Pri']:OvsConf.priority[0],OvsConf.flow_inputs['iPort']:OvsConf.ports[0],OvsConf.flow_inputs['Protocol']:\"arp\"}\n #action = {OvsConf.flow_actions[0]:[OvsConf.ports[1]]}\n #self.ovs_obj.manage_flows(manage_type=\"add\",br_name=config.OVS_BRIDGE, inputs = flow_input, action = action)\n #action = {OvsConf.flow_actions[2]: None}\n #self.ovs_obj.manage_flows(manage_type=\"alter\", br_name=config.OVS_BRIDGE, inputs=flow_input,action=action)\n #action = {OvsConf.flow_actions[0]:[OvsConf.ports[1],4]}\n #self.ovs_obj.manage_flows(manage_type=\"alter\", br_name=config.OVS_BRIDGE, inputs=flow_input, action=action)\n #self.ovs_obj.manage_flows(manage_type=\"delete\", br_name=config.OVS_BRIDGE, inputs=flow_input)\n \"\"\"\n #if self.mini:self.mn.disconnect()", "title": "" }, { "docid": "9feab2adb61c38ec553926af0dfd30f3", "score": "0.50176084", "text": "def test_rsvp(self):\n self.client.force_login(self.user)\n response = self.client.get(reverse('event:event_rsvp', args=(self.event.id,)), follow=True)\n self.assertEqual(response.status_code, 200)\n self.assertContains(response, \"RSVP Successful!\")\n response = self.client.get(reverse('event:event_rsvp', args=(self.event.id,)), follow=True)\n self.assertContains(response, \"You have already RSVP\")", "title": "" }, { "docid": "626a6f334de8a5708b295b4ecf67f260", "score": "0.5015508", "text": "def test_delete_workflow(self):\n pass", "title": "" }, { "docid": "11ee5ddc6a737c6d21b879ed2cb29013", "score": "0.50116557", "text": "async def test_update(\n hass: HomeAssistant,\n hass_ws_client: WebSocketGenerator,\n schedule_setup: Callable[..., Coroutine[Any, Any, bool]],\n to: str,\n next_event: str,\n saved_to: str,\n) -> None:\n ent_reg = er.async_get(hass)\n\n assert await schedule_setup()\n\n state = hass.states.get(\"schedule.from_storage\")\n assert state\n assert state.state == STATE_OFF\n assert state.attributes[ATTR_FRIENDLY_NAME] == \"from storage\"\n assert state.attributes[ATTR_ICON] == \"mdi:party-popper\"\n assert state.attributes[ATTR_NEXT_EVENT].isoformat() == \"2022-08-12T17:00:00-07:00\"\n assert ent_reg.async_get_entity_id(DOMAIN, DOMAIN, \"from_storage\") is not None\n\n client = await hass_ws_client(hass)\n\n await client.send_json(\n {\n \"id\": 1,\n \"type\": f\"{DOMAIN}/update\",\n f\"{DOMAIN}_id\": \"from_storage\",\n CONF_NAME: \"Party pooper\",\n CONF_ICON: \"mdi:party-pooper\",\n CONF_MONDAY: [],\n CONF_TUESDAY: [],\n CONF_WEDNESDAY: [{CONF_FROM: \"17:00:00\", CONF_TO: to}],\n CONF_THURSDAY: [],\n CONF_FRIDAY: [],\n CONF_SATURDAY: [],\n CONF_SUNDAY: [],\n }\n )\n resp = await client.receive_json()\n assert resp[\"success\"]\n\n state = hass.states.get(\"schedule.from_storage\")\n assert state\n assert state.state == STATE_ON\n assert state.attributes[ATTR_FRIENDLY_NAME] == \"Party pooper\"\n assert state.attributes[ATTR_ICON] == \"mdi:party-pooper\"\n assert state.attributes[ATTR_NEXT_EVENT].isoformat() == next_event\n\n await client.send_json({\"id\": 2, \"type\": f\"{DOMAIN}/list\"})\n resp = await client.receive_json()\n assert resp[\"success\"]\n\n result = {item[\"id\"]: item for item in resp[\"result\"]}\n\n assert len(result) == 1\n assert result[\"from_storage\"][CONF_WEDNESDAY] == [\n {CONF_FROM: \"17:00:00\", CONF_TO: saved_to}\n ]", "title": "" }, { "docid": "f91ca2b1101fd1a691b8b363e2b015cd", "score": "0.5011518", "text": "def test_remove_observer_from_closed_state(self, mock_mailer_send):\n referral = factories.ReferralFactory(state=models.ReferralState.CLOSED)\n user = referral.users.first()\n other_observer = factories.UserFactory()\n factories.ReferralUserLinkFactory(\n referral=referral,\n user=other_observer,\n role=models.ReferralUserLinkRoles.OBSERVER,\n )\n self.assertEqual(referral.users.count(), 2)\n\n response = self.client.post(\n f\"/api/referrals/{referral.id}/remove_user/\",\n {\"user\": other_observer.id},\n HTTP_AUTHORIZATION=f\"Token {Token.objects.get_or_create(user=user)[0]}\",\n )\n self.assertEqual(response.status_code, 200)\n self.assertEqual(\n models.ReferralActivity.objects.count(),\n 1,\n )\n referral.refresh_from_db()\n self.assertEqual(referral.users.count(), 1)\n self.assertEqual(referral.state, models.ReferralState.CLOSED)\n mock_mailer_send.assert_not_called()", "title": "" }, { "docid": "ee6b82fd6e57418bd1a7ee1ec273387c", "score": "0.50062495", "text": "async def test_drop_match_as_referee(mocker):\n cog, mock_bot, _, bracket = init_reschedule_single_mocks(mocker)\n schedules_spreadsheet = await bracket.get_schedules_spreadsheet()\n mock_role = tosurnament_mock.REFEREE_ROLE_MOCK\n mock_user = tosurnament_mock.REFEREE_USER_MOCK\n mock_command = tosurnament_mock.CommandMock(cog.qualified_name, \"drop_match_as_referee\")\n mock_ctx = tosurnament_mock.CtxMock(mock_bot, mock_user, command=mock_command)\n await cog.drop_match_as_referee(cog, mock_ctx, \"T1-1\")\n mock_ctx.send.assert_called_once_with(\n \"As a __\" + mock_role.name + \"__, \" + mock_user.display_name + \" **succesfully dropped** the matches: T1-1\\n\"\n )\n assert schedules_spreadsheet.spreadsheet.get_updated_values_with_ranges() == ([\"Tier 1!H2:H2\"], [[[\"\"]]])", "title": "" }, { "docid": "1fae1cb6ab7952a09c1a1002e637d1d0", "score": "0.5002308", "text": "def test_gets_notification_on_invite_reject(self):\n \n \n invite = Invite.objects.get(inviter=self.inviter)\n self.client.logout()\n self.client.login(username=self.invitee.username, password=PASSWORD)\n\n response = self.client.post(f\"/event_invite_reject/{invite.id}/\")\n notifications = Notification.objects.filter(recipient=self.inviter)\n # invitee should have 1 notification\n self.assertEqual(1, len(notifications))\n\n notification = notifications[0]\n self.assertEqual(notification.actor, self.invitee)\n self.assertEqual(notification.verb, \"event invite rejected\")\n self.assertEqual(notification.data[\"title\"], \"hiking\")\n self.assertEqual(notification.data[\"url\"], f\"/event/{self.event_id}/\")\n self.assertIs(notification.target, None)", "title": "" }, { "docid": "473884dbb7773a26e4f350a409dcac89", "score": "0.49958736", "text": "def test_in_soft_shutdown_mode_concent_should_accept_messages_that_would_cause_transition_to_passive_state(self):\n\n compute_task_def = self._get_deserialized_compute_task_def(kwargs={'deadline': \"2017-12-01 11:00:00\"})\n task_to_compute = self._get_deserialized_task_to_compute(\n timestamp=\"2017-12-01 10:00:00\",\n compute_task_def=compute_task_def,\n )\n report_computed_task = self._get_deserialized_report_computed_task(\n timestamp=\"2017-12-01 10:59:00\",\n task_to_compute=task_to_compute,\n )\n\n with freeze_time(\"2017-12-01 11:00:00\"):\n config.SOFT_SHUTDOWN_MODE = False\n store_subtask(\n task_id=compute_task_def['task_id'],\n subtask_id=compute_task_def['subtask_id'],\n provider_public_key=self.PROVIDER_PUBLIC_KEY,\n requestor_public_key=self.REQUESTOR_PUBLIC_KEY,\n state=Subtask.SubtaskState.FORCING_REPORT,\n next_deadline=get_current_utc_timestamp() + settings.CONCENT_MESSAGING_TIME,\n task_to_compute=task_to_compute,\n report_computed_task=report_computed_task,\n )\n config.SOFT_SHUTDOWN_MODE = True\n\n self.stored_message_counter = 3\n\n serialized_ack_report_computed_task = self._get_serialized_ack_report_computed_task(\n timestamp=\"2017-12-01 11:00:05\",\n ack_report_computed_task=self._get_deserialized_ack_report_computed_task(\n timestamp=\"2017-12-01 11:00:05\",\n subtask_id=compute_task_def['subtask_id'],\n report_computed_task=report_computed_task,\n task_to_compute=task_to_compute\n ),\n requestor_private_key=self.REQUESTOR_PRIVATE_KEY\n )\n\n with freeze_time(\"2017-12-01 11:00:05\"):\n response = self.send_request(\n url='core:send',\n data=serialized_ack_report_computed_task,\n HTTP_CONCENT_CLIENT_PUBLIC_KEY=self._get_encoded_requestor_public_key(),\n )\n\n self.assertEqual(response.status_code, 202)\n self.assertEqual(len(response.content), 0)\n self._assert_stored_message_counter_increased(increased_by=1)\n self._test_subtask_state(\n task_id=compute_task_def['task_id'],\n subtask_id=compute_task_def['subtask_id'],\n subtask_state=Subtask.SubtaskState.REPORTED,\n provider_key=self._get_encoded_provider_public_key(),\n requestor_key=self._get_encoded_requestor_public_key(),\n expected_nested_messages={'task_to_compute', 'want_to_compute_task', 'report_computed_task', 'ack_report_computed_task'},\n )\n self._test_last_stored_messages(\n expected_messages=[\n message.tasks.AckReportComputedTask,\n ],\n task_id=compute_task_def['task_id'],\n subtask_id=compute_task_def['subtask_id'],\n )\n self._test_undelivered_pending_responses(\n subtask_id=compute_task_def['subtask_id'],\n client_public_key=self._get_encoded_provider_public_key(),\n expected_pending_responses_receive=[\n PendingResponse.ResponseType.ForceReportComputedTaskResponse,\n ]\n )\n self.assertEqual(len(mail.outbox), len(settings.ADMINS))", "title": "" }, { "docid": "daa004deaa16e431d0ae381998301912", "score": "0.49957865", "text": "def test_handle_reset_success(\n mock_send_reply, make_handler_params,\n):\n params = make_handler_params(\"reset\")\n handle_reset(params)\n\n params.storage.put.assert_not_called()\n mock_send_reply.assert_called_with(\n params.client,\n params.message,\n \"This will wipe all current lunches from my records. If you wish to continue, please type 'reset confirm'.\",\n )", "title": "" }, { "docid": "9d06ae838e47a514c150e5c25c80db4d", "score": "0.49892065", "text": "def test_rest_add_and_delete_with_dpi_fail(self, mock_install_flows):\n api = get_test_client(self.napp.controller, self.napp)\n data = {'flows': [{\"priority\": 25}]}\n for method in ['flows', 'delete']:\n url_1 = f'{self.API_URL}/v2/{method}/00:00:00:00:00:00:00:01'\n url_2 = f'{self.API_URL}/v2/{method}/00:00:00:00:00:00:00:02'\n url_3 = f'{self.API_URL}/v2/{method}/00:00:00:00:00:00:00:03'\n\n response_1 = api.post(url_1)\n response_2 = api.post(url_2, data=data)\n response_3 = api.post(url_2, json={})\n response_4 = api.post(url_3, json=data)\n\n self.assertEqual(response_1.status_code, 400)\n self.assertEqual(response_2.status_code, 415)\n self.assertEqual(response_3.status_code, 400)\n self.assertEqual(response_4.status_code, 404)\n\n self.assertEqual(mock_install_flows.call_count, 0)", "title": "" }, { "docid": "91cf10c41708682808b351c7f021ebc4", "score": "0.49889967", "text": "def test_handle_lost_db_update_task(self):\n\n # Get database update task\n when = now()\n self.assertFalse(self.system_task_mgr._is_db_update_completed)\n task = self.system_task_mgr.get_tasks_to_schedule(when)[0]\n self.assertTrue(task.id.startswith(DB_UPDATE_TASK_ID_PREFIX))\n task_1_id = task.id\n\n # Lose task after scheduling and get different task next time\n task.agent_id = self.agent_id\n self.task_mgr.launch_tasks([task], now())\n update = job_test_utils.create_task_status_update(task.id, task.agent_id, TaskStatusUpdate.LOST, now())\n self.task_mgr.handle_task_update(update)\n self.system_task_mgr.handle_task_update(update)\n task = self.system_task_mgr.get_tasks_to_schedule(when)[0]\n task_2_id = task.id\n self.assertTrue(task.id.startswith(DB_UPDATE_TASK_ID_PREFIX))\n self.assertNotEqual(task.id, task_1_id)\n self.assertFalse(self.system_task_mgr._is_db_update_completed)\n\n # Lose task after running and get different task next time\n task.agent_id = self.agent_id\n self.task_mgr.launch_tasks([task], now())\n update = job_test_utils.create_task_status_update(task.id, task.agent_id, TaskStatusUpdate.RUNNING, now())\n self.task_mgr.handle_task_update(update)\n self.system_task_mgr.handle_task_update(update)\n update = job_test_utils.create_task_status_update(task.id, task.agent_id, TaskStatusUpdate.LOST, now())\n self.task_mgr.handle_task_update(update)\n self.system_task_mgr.handle_task_update(update)\n task = self.system_task_mgr.get_tasks_to_schedule(when)[0]\n self.assertTrue(task.id.startswith(DB_UPDATE_TASK_ID_PREFIX))\n self.assertNotEqual(task.id, task_1_id)\n self.assertNotEqual(task.id, task_2_id)\n self.assertFalse(self.system_task_mgr._is_db_update_completed)", "title": "" }, { "docid": "bca8ab2ce681889c9b45c823785f9366", "score": "0.498717", "text": "def test_call_reroute(self):\n pass", "title": "" }, { "docid": "d5bf992f81639f2f43ef370bb34636b4", "score": "0.4986484", "text": "def test_reroute_alternate_exchange(self):\n self.startQmf()\n session = self.session\n # 1. Create 2 exchanges A and B (fanout) where B is the\n # alternate exchange for A\n session.exchange_declare(exchange=\"B\", type=\"fanout\")\n session.exchange_declare(exchange=\"A\", type=\"fanout\", alternate_exchange=\"B\")\n\n # 2. Bind queue X to B\n session.queue_declare(queue=\"X\", exclusive=True, auto_delete=True)\n session.exchange_bind(queue=\"X\", exchange=\"B\")\n\n # 3. Send 1 message to queue Y\n session.queue_declare(queue=\"Y\", exclusive=True, auto_delete=True)\n props = session.delivery_properties(routing_key=\"Y\")\n session.message_transfer(message=Message(props, \"reroute me!\"))\n\n # 4. Call reroute on queue Y and specify that messages should\n # be sent to exchange A\n y = self.qmf.getObjects(_class=\"queue\", name=\"Y\")[0]\n result = y.reroute(1, False, \"A\", {})\n self.assertEqual(result.status, 0)\n\n # 5. verify that the message is rerouted through B (as A has\n # no matching bindings) to X\n self.subscribe(destination=\"x\", queue=\"X\")\n self.assertEqual(\"reroute me!\", session.incoming(\"x\").get(timeout=1).body)\n\n # Cleanup\n for e in [\"A\", \"B\"]: session.exchange_delete(exchange=e)", "title": "" }, { "docid": "25bad68bbc8aeeac7e848cecc2bc5526", "score": "0.4982645", "text": "def test_long_alert_skip(self):\n\n self.assertEqual(len(mail.outbox), 0)\n\n now = timezone.now()\n draft = models.DraftGrantApplication.objects.get(pk=2)\n draft.created = now\n draft.save()\n cycle = models.GrantCycle.objects.get(pk=2)\n cycle.close = now + timedelta(days=7, hours=12)\n cycle.save()\n\n self.client.get('/mail/drafts/')\n self.assertEqual(len(mail.outbox), 0)", "title": "" }, { "docid": "daf79c10cf1d9bda7d0a0ab9c9447b33", "score": "0.4982328", "text": "def test_no_refund_is_created_while_not_accepted(self):\n with self.admin_access.repo_cnx() as cnx:\n count = self.refund_lines_count(cnx, self.account1)\n expense = cnx.create_entity('Expense', title=u'expense 1')\n self.add_expense_line(cnx, expense, self.account1)\n cnx.commit() # to fire corresponding operations\n newcount = self.refund_lines_count(cnx, self.account1)\n self.assertEqual(newcount, count)\n self.accept(cnx, expense)\n newcount = self.refund_lines_count(cnx, self.account1)\n self.assertEqual(newcount, count + 1)", "title": "" }, { "docid": "2a3f7207b39c51d4c1e0172e11000a5a", "score": "0.4978804", "text": "def test_republish_item_expired(self):\n signed_item = get_signed_item(self.name, self.value, PUBLIC_KEY,\n PRIVATE_KEY, 0)\n # Signed item with out of date expires argument.\n signed_item['expires'] = 123.456\n signed_item['uuid'] = self.uuid\n signed_item['sender'] = self.sender\n signed_item['recipient'] = self.recipient\n signed_item['reply_port'] = self.reply_port\n signed_item['version'] = self.version\n seal = get_seal(signed_item, PRIVATE_KEY)\n signed_item['seal'] = seal\n signed_item['message'] = 'store'\n message = from_dict(signed_item)\n node = Node(PUBLIC_KEY, PRIVATE_KEY, self.event_loop, self.connector,\n self.reply_port)\n node.data_store[message.key] = message\n patcher = patch('drogulus.dht.node.log.info')\n mock_log = patcher.start()\n node.republish(message.key)\n self.assertEqual(2, mock_log.call_count)\n expected = 'Republish check for key: %s' % message.key\n self.assertEqual(expected, mock_log.call_args_list[0][0][0])\n msg = '%s expired. Deleted from local data store.' % message.key\n self.assertEqual(msg, mock_log.call_args_list[1][0][0])\n patcher.stop()", "title": "" }, { "docid": "98520c79cf1688b304bf2b92968a9483", "score": "0.4973491", "text": "def test_custom_reset_action(self):\n config = self.env.config['ticket-workflow']\n config.set('_reset', '-> review')\n config.set('_reset.operations', 'reset_workflow')\n config.set('_reset.permissions', 'TICKET_BATCH_MODIFY')\n config.set('_reset.default', 2)\n self.perm_sys.grant_permission('user2', 'TICKET_BATCH_MODIFY')\n self._reload_workflow()\n\n actions1 = self.ctlr.get_ticket_actions(self.req1, self.ticket)\n actions2 = self.ctlr.get_ticket_actions(self.req2, self.ticket)\n chgs2 = self.ctlr.get_ticket_changes(self.req2, self.ticket, '_reset')\n\n self.assertEqual(1, len(actions1))\n self.assertNotIn((2, '_reset'), actions1)\n self.assertEqual(2, len(actions2))\n self.assertIn((2, '_reset'), actions2)\n self.assertEqual('review', chgs2['status'])", "title": "" }, { "docid": "0ec2cbb07bf725431ecf0b4e27ad7f35", "score": "0.4971786", "text": "def test_that_concent_should_change_subtask_state_if_verification_is_after_deadline(self):\n\n with freeze_time(\"2018-04-01 10:30:00\"):\n subtask = store_or_update_subtask(\n task_id=self.task_id,\n subtask_id=self.subtask_id,\n provider_public_key=self.PROVIDER_PUBLIC_KEY,\n requestor_public_key=self.REQUESTOR_PUBLIC_KEY,\n state=Subtask.SubtaskState.ADDITIONAL_VERIFICATION,\n next_deadline=get_current_utc_timestamp() + settings.ADDITIONAL_VERIFICATION_CALL_TIME,\n task_to_compute=self.report_computed_task.task_to_compute,\n report_computed_task=self.report_computed_task,\n )\n self._assert_stored_message_counter_increased(2)\n\n with freeze_time(parse_timestamp_to_utc_datetime(subtask.next_deadline.timestamp() + 1)):\n serialized_subtask_results_verify = self._get_serialized_subtask_results_verify(\n subtask_results_verify=self._get_deserialized_subtask_results_verify(\n subtask_results_rejected=self._get_deserialized_subtask_results_rejected(\n reason=message.tasks.SubtaskResultsRejected.REASON.VerificationNegative,\n report_computed_task=self.report_computed_task,\n )\n )\n )\n\n response = self.client.post(\n reverse('core:send'),\n data=serialized_subtask_results_verify,\n content_type='application/octet-stream',\n )\n\n assert response.status_code == 200\n\n subtask.refresh_from_db()\n self.assertEqual(subtask.state_enum, Subtask.SubtaskState.ACCEPTED)\n self.assertEqual(subtask.next_deadline, None)\n self._test_undelivered_pending_responses(\n subtask_id=subtask.subtask_id,\n client_public_key=self._get_encoded_provider_public_key(),\n client_public_key_out_of_band=self._get_encoded_provider_public_key(),\n expected_pending_responses_receive_out_of_band=[\n PendingResponse.ResponseType.SubtaskResultsSettled,\n ]\n )\n self._test_undelivered_pending_responses(\n subtask_id=subtask.subtask_id,\n client_public_key=self._get_encoded_requestor_public_key(),\n client_public_key_out_of_band=self._get_encoded_requestor_public_key(),\n expected_pending_responses_receive_out_of_band=[\n PendingResponse.ResponseType.SubtaskResultsSettled,\n ]\n )\n\n response_2 = self.client.post(\n reverse('core:receive_out_of_band'),\n data = self._create_requestor_auth_message(),\n content_type = 'application/octet-stream',\n )\n\n self._test_response(\n response_2,\n status = 200,\n key = self.REQUESTOR_PRIVATE_KEY,\n message_type = message.concents.SubtaskResultsSettled,\n fields = {\n 'task_to_compute': self.report_computed_task.task_to_compute,\n }\n )\n\n response_3 = self.client.post(\n reverse('core:receive_out_of_band'),\n data = self._create_provider_auth_message(),\n content_type = 'application/octet-stream',\n )\n\n self._test_response(\n response_3,\n status = 200,\n key = self.PROVIDER_PRIVATE_KEY,\n message_type = message.concents.SubtaskResultsSettled,\n fields = {\n 'task_to_compute': self.report_computed_task.task_to_compute,\n }\n )", "title": "" }, { "docid": "6b572087ffd2b1a15bb36a8e1750344b", "score": "0.49714562", "text": "def test_send_reject(self, result, source, reasons):\n acse = ACSE(self.assoc)\n for reason in reasons:\n acse.send_reject(result, source, reason)\n\n primitive = self.assoc.dul.queue.get()\n with pytest.raises(queue.Empty):\n self.assoc.dul.queue.get(block=False)\n\n assert isinstance(primitive, A_ASSOCIATE)\n assert primitive.result == result\n assert primitive.result_source == source\n assert primitive.diagnostic == reason", "title": "" }, { "docid": "aefea937e014f820576275886dba2998", "score": "0.49710697", "text": "def end(self, test_outcome, details=None):\r\n self.data.update_result(test_outcome, details)", "title": "" }, { "docid": "2827bb2ffce3cc1bac39760167c5be02", "score": "0.49709073", "text": "def test_submission(self):\r\n\r\n self.assertFalse(self.sub.prepared)\r\n self.assertFalse(self.sub.sent)\r\n self.assertFalse(self.sub.sending)", "title": "" }, { "docid": "be58c6671fa4cfb67de8550ad28cae27", "score": "0.49706295", "text": "def test_restore():\n instance = Action()\n status = instance.restore('not a backup')\n assert isinstance(status, tuple)\n assert status[0] == False", "title": "" }, { "docid": "1077afbfc553fd9289279e7c53761e2e", "score": "0.4967384", "text": "def test_workflow_reject_quality(\n self, instance_obj, web_request, session, roles, role_name, origin_state\n ):\n obj, wf, request = self.prepare_obj_wf(\n instance_obj,\n web_request,\n roles[role_name],\n origin_state\n )\n\n wf.reject()\n session.flush()\n assert obj.state == 'rejected'\n assert obj.state_history[-1]['transition'] == 'reject'", "title": "" }, { "docid": "9a2bc05a0c67d009c7c41d9598555b49", "score": "0.49669638", "text": "def test_auto_rollback(self):\n session = self.session\n self.declare_queues([\"tx-autorollback-a\", \"tx-autorollback-b\", \"tx-autorollback-c\"])\n session.message_subscribe(queue=\"tx-autorollback-a\", destination=\"qa\")\n session.message_subscribe(queue=\"tx-autorollback-b\", destination=\"qb\")\n session.message_subscribe(queue=\"tx-autorollback-c\", destination=\"qc\")\n\n session2 = self.conn.session(\"worker\", 2)\n queue_a, queue_b, queue_c, ignore = self.perform_txn_work(session2, \"tx-autorollback-a\", \"tx-autorollback-b\", \"tx-autorollback-c\")\n\n for q in [queue_a, queue_b, queue_c]:\n try:\n extra = q.get(timeout=1)\n self.fail(\"Got unexpected message: \" + extra.body)\n except Empty: None\n\n session2.close()\n\n session.tx_select()\n\n self.enable_flow(\"qa\")\n queue_a = session.incoming(\"qa\")\n\n self.enable_flow(\"qb\")\n queue_b = session.incoming(\"qb\")\n\n self.enable_flow(\"qc\")\n queue_c = session.incoming(\"qc\")\n\n #check results\n for i in range(1, 5):\n msg = queue_a.get(timeout=1)\n self.assertEqual(\"Message %d\" % i, msg.body)\n session.message_accept(RangedSet(msg.id))\n\n msg = queue_b.get(timeout=1)\n self.assertEqual(\"Message 6\", msg.body)\n session.message_accept(RangedSet(msg.id))\n\n msg = queue_c.get(timeout=1)\n self.assertEqual(\"Message 7\", msg.body)\n session.message_accept(RangedSet(msg.id))\n\n for q in [queue_a, queue_b, queue_c]:\n try:\n extra = q.get(timeout=1)\n self.fail(\"Got unexpected message: \" + extra.body)\n except Empty: None\n\n #cleanup\n session.tx_commit()", "title": "" }, { "docid": "0f734f34de6740cf121d228858e9b63f", "score": "0.49658418", "text": "def test_finalcheck_data_changed(self):\n\n objs = [\n self.event,\n self.user,\n self.address,\n self.medical_details,\n *self.emergency_contacts,\n self.reg,\n self.reg.options.first(),\n self.reg.options.first().option,\n ]\n\n for obj in objs:\n with self.subTest(data=repr(obj)):\n response = self.client.get(self.final_check_url)\n obj.save()\n self.assertCache(response, changed=True)", "title": "" }, { "docid": "b6450560b0e47a5f993b6f406432234b", "score": "0.49641934", "text": "def test_recreateActivities(self):\n object_to_test = self.portal.portal_simulation\n createZODBPythonScript(self.getPortal().portal_skins.custom,\n 'ERP5Site_testRecreateActivityScript', \"\",\n \"context.manage_addProperty('custom_property_without_meaning', 'I was there', 'string')\")\n\n self.commit()\n object_to_test.activate().ERP5Site_testRecreateActivityScript()\n\n self.commit()\n # Verify if the final activity is created.\n self.assertTrue(object_to_test.hasActivity(method_id=\"ERP5Site_testRecreateActivityScript\"))\n self.portal.portal_activities.activate().ERP5Site_clearActivities()\n self.commit()\n self.assertTrue(object_to_test.hasActivity(method_id=\"ERP5Site_testRecreateActivityScript\"))\n self.assertTrue(self.portal.portal_activities.hasActivity(method_id='ERP5Site_clearActivities'))\n self.tic()\n self.assertFalse(object_to_test.hasActivity(method_id=\"ERP5Site_testRecreateActivityScript\"))\n self.assertFalse(self.portal.portal_activities.hasActivity(method_id='ERP5Site_clearActivities'))\n self.assertEqual(object_to_test.getProperty('custom_property_without_meaning'),\n 'I was there')", "title": "" }, { "docid": "027fafcf644ab757e5fc2d0390e131c0", "score": "0.49525243", "text": "def test_dbrules(self):\n\n testdata = u\"\"\"\n INSERT INTO attachmentrules(scope,checktype,action,regex,description,prio) VALUES\n ('[email protected]','contenttype','allow','application/x-executable','this user likes exe',1)\n \"\"\"\n self.session.execute(testdata)\n # copy file rules\n tmpfile = tempfile.NamedTemporaryFile(\n suffix='virus', prefix='fuglu-unittest', dir='/tmp')\n shutil.copy(TESTDATADIR + '/binaryattachment.eml', tmpfile.name)\n suspect = Suspect(\n '[email protected]', '[email protected]', tmpfile.name)\n\n result = self.candidate.examine(suspect)\n resstr = actioncode_to_string(result)\n self.assertEqual(resstr, \"DUNNO\")\n\n # another recipient should still get the block\n suspect = Suspect(\n '[email protected]', '[email protected]', tmpfile.name)\n\n result = self.candidate.examine(suspect)\n if type(result) is tuple:\n result, message = result\n resstr = actioncode_to_string(result)\n self.assertEqual(resstr, \"DELETE\")\n tmpfile.close()", "title": "" }, { "docid": "fbb5d5d2ae42f5831fed3cb5b53f644f", "score": "0.49482492", "text": "def test_republish_replication_lack_of_activity(self):\n signed_item = get_signed_item(self.name, self.value, PUBLIC_KEY,\n PRIVATE_KEY, 0)\n signed_item['uuid'] = self.uuid\n signed_item['sender'] = self.sender\n signed_item['recipient'] = self.recipient\n signed_item['reply_port'] = self.reply_port\n signed_item['version'] = self.version\n seal = get_seal(signed_item, PRIVATE_KEY)\n signed_item['seal'] = seal\n signed_item['message'] = 'store'\n message = from_dict(signed_item)\n node = Node(PUBLIC_KEY, PRIVATE_KEY, self.event_loop, self.connector,\n self.reply_port)\n node.replicate = MagicMock()\n node.data_store._set_item(message.key, (message, 123.45, 123.45))\n patcher = patch('drogulus.dht.node.log.info')\n mock_log = patcher.start()\n mock_handler = MagicMock()\n mock_handler.cancel = MagicMock()\n with patch.object(self.event_loop, 'call_later',\n return_value=mock_handler) as mock_call:\n node.republish(message.key)\n mock_call.assert_called_once_with(REPLICATE_INTERVAL,\n node.republish, message.key)\n self.assertEqual(3, mock_log.call_count)\n expected = 'Republish check for key: %s' % message.key\n self.assertEqual(expected, mock_log.call_args_list[0][0][0])\n msg = 'Republishing item %s.' % message.key\n self.assertEqual(msg, mock_log.call_args_list[1][0][0])\n msg = 'Removing %s due to lack of activity.' % message.key\n self.assertEqual(msg, mock_log.call_args_list[2][0][0])\n self.assertEqual(1, mock_handler.cancel.call_count)\n patcher.stop()", "title": "" }, { "docid": "72d3a4ab3241501b3565d7384ac1524a", "score": "0.4945555", "text": "def test_rollback(self):\n session = self.session\n queue_a, queue_b, queue_c, consumed = self.perform_txn_work(session, \"tx-rollback-a\", \"tx-rollback-b\", \"tx-rollback-c\")\n\n for q in [queue_a, queue_b, queue_c]:\n try:\n extra = q.get(timeout=1)\n self.fail(\"Got unexpected message: \" + extra.body)\n except Empty: None\n\n session.tx_rollback()\n\n #need to release messages to get them redelivered now:\n session.message_release(consumed)\n\n #check results\n for i in range(1, 5):\n msg = queue_a.get(timeout=1)\n self.assertEqual(\"Message %d\" % i, msg.body)\n session.message_accept(RangedSet(msg.id))\n\n msg = queue_b.get(timeout=1)\n self.assertEqual(\"Message 6\", msg.body)\n session.message_accept(RangedSet(msg.id))\n\n msg = queue_c.get(timeout=1)\n self.assertEqual(\"Message 7\", msg.body)\n session.message_accept(RangedSet(msg.id))\n\n for q in [queue_a, queue_b, queue_c]:\n try:\n extra = q.get(timeout=1)\n self.fail(\"Got unexpected message: \" + extra.body)\n except Empty: None\n\n #cleanup\n session.tx_commit()", "title": "" }, { "docid": "a497dfd7bda5ee0b36903bd3e40b2edc", "score": "0.49451008", "text": "def testCaches(self):\n\n client_id = \"C.\" + \"b\" * 16\n\n approval_id = self.RequestAndGrantClientApproval(\n client_id, requestor=self.token.username)\n\n f = self.api.Client(client_id).CreateFlow(\n name=flow_test_lib.SendingFlow.__name__)\n\n # Remove the approval from the data store, but it should still exist in the\n # security manager cache.\n self.RevokeClientApproval(\n client_id, approval_id, self.token, remove_from_cache=False)\n\n # If this doesn't raise now, all answers were cached.\n self.api.Client(client_id).Flow(f.flow_id).Get()\n\n self.ClearCache()\n\n # This must raise now.\n self.assertRaises(grr_api_errors.AccessForbiddenError,\n self.api.Client(client_id).Flow(f.flow_id).Get)", "title": "" }, { "docid": "f449b851985f682ecca45abcf9a3045a", "score": "0.4943535", "text": "def test_non_retriable_error_occurs(self):\n events = [copy.deepcopy(_event_test_transaction)] * 2\n\n response = _create_api_response([_NON_RETRIABLE_ERR, _SUCCESS])\n self.mock_service.mutate.return_value = response\n blb = blob.Blob(events=events, location='')\n\n blb = self.test_hook.send_events(blb)\n\n self.assertEqual(self.mock_service.mutate.call_count, 1)\n self.assertEqual(len(blb.failed_events), 1)", "title": "" }, { "docid": "89f5ec259466f934f624539a1d0b1ca0", "score": "0.4942996", "text": "def test_check_switch_consistency_delete(self, *args):\n (mock_flow_factory, mock_install_flows) = args\n dpid = \"00:00:00:00:00:00:00:01\"\n switch = get_switch_mock(dpid, 0x04)\n\n flow_1 = MagicMock()\n flow_1.as_dict.return_value = {'flow_1': 'data'}\n\n flow_list = [{\"command\": \"delete\",\n \"flow\": {'flow_1': 'data'}\n }]\n serializer = MagicMock()\n serializer.from_dict.return_value = flow_1\n\n switch.flows = [flow_1]\n\n mock_flow_factory.return_value = serializer\n self.napp.stored_flows = {dpid: {\"flow_list\": flow_list}}\n self.napp.check_switch_consistency(switch)\n mock_install_flows.assert_called()", "title": "" }, { "docid": "ed9c9a4e4b3d9bee577b92a5646537a1", "score": "0.4942011", "text": "def test_sync():\n x = ftp.retrlines('site sync %s' % start, callback)\n for r in received:\n print r\n if len(received) > 0:\n assert True\n else:\n assert False", "title": "" }, { "docid": "2ba06788c6f97c66a32a15851e8cb43c", "score": "0.49390537", "text": "def test_no_strict_delete_of10(self, *args):\n (mock_save_flow, _, _) = args\n dpid = \"00:00:00:00:00:00:00:01\"\n switch = get_switch_mock(dpid, 0x01)\n switch.id = dpid\n stored_flow = {\n \"command\": \"add\",\n \"flow\": {\n \"actions\": [{\"max_len\": 65535, \"port\": 6}],\n \"cookie\": 4961162389751548787,\n \"match\": {\n \"in_port\": 80,\n \"dl_src\": \"00:00:00:00:00:00\",\n \"dl_dst\": \"f2:0b:a4:7d:f8:ea\",\n \"dl_vlan\": 0,\n \"dl_vlan_pcp\": 0,\n \"dl_type\": 0,\n \"nw_tos\": 0,\n \"nw_proto\": 0,\n \"nw_src\": \"192.168.0.1\",\n \"nw_dst\": \"0.0.0.0\",\n \"tp_src\": 0,\n \"tp_dst\": 0,\n },\n \"out_port\": 65532,\n \"priority\": 123,\n },\n }\n stored_flow2 = {\n \"command\": \"add\",\n \"flow\": {\n \"actions\": [],\n \"cookie\": 4961162389751654,\n \"match\": {\n \"in_port\": 2,\n \"dl_src\": \"00:00:00:00:00:00\",\n \"dl_dst\": \"f2:0b:a4:7d:f8:ea\",\n \"dl_vlan\": 0,\n \"dl_vlan_pcp\": 0,\n \"dl_type\": 0,\n \"nw_tos\": 0,\n \"nw_proto\": 0,\n \"nw_src\": \"192.168.0.1\",\n \"nw_dst\": \"0.0.0.0\",\n \"tp_src\": 0,\n \"tp_dst\": 0,\n },\n \"out_port\": 655,\n \"priority\": 1,\n },\n }\n flow_to_install = {\"match\": {\"in_port\": 80, \"wildcards\": 4194303}}\n flow_list = {\"flow_list\": [stored_flow, stored_flow2]}\n command = \"delete\"\n self.napp.stored_flows = {dpid: flow_list}\n\n self.napp._store_changed_flows(command, flow_to_install, switch)\n mock_save_flow.assert_called()\n self.assertEqual(len(self.napp.stored_flows[dpid]['flow_list']), 1)", "title": "" }, { "docid": "90c0e9c5f1b4cd5be7815181ef4c22e4", "score": "0.49352738", "text": "def test_remove_url():\n alert = create_alert()\n addr = \"alerts-%[email protected]\" % alert.id\n #get into an 'ALERTING' state\n msg = MailRequest('fakepeer', sender, addr, open(home(\"tests/data/emails/confirmation.msg\")).read())\n msg['to'] = addr\n Router.deliver(msg)\n q = queue.Queue(email('run/alerts'))\n assert q.count() == 0\n\n #send a regular alerts email\n msg = MailRequest('fakepeer', sender, addr, open(home(\"tests/data/emails/alerts.msg\")).read())\n msg['to'] = addr\n Router.deliver(msg)\n assert len(Blurb.objects.all()) == 26, \"There are %s blurbs, expected 15.\" % len(Blurb.objects.all())\n alert = Alert.objects.all()[0]\n assert alert.removeurl == u\"/alerts/remove?s=AB2Xq4jsDy4ienBZYuYgWbzBWQ5i6LiD5L4y8JY&hl=en&gl=us&source=alertsmail&cd=4Ya67t6E3e4&cad=:s7:f1:v1:\"", "title": "" }, { "docid": "db6c1930b62cc0509b30cf70671369e4", "score": "0.49333984", "text": "def test_post_immediate_execution_golden(self):\n new_spec = example_spec_random_dest_plate()\n new_spec[\"plan\"][\"destinations\"][0][\"details\"][\"id\"] = rnd_bc()\n uri = '/api/v1/rest/transform-specs'\n headers = [(\"Transform-Execution\", \"Immediate\")]\n rv = self.client.post(uri,\n data=json.dumps(new_spec),\n content_type=\"application/json\",\n headers=headers)\n assert rv.status_code == 201\n new_url = rv.headers['location']\n result = json.loads(rv.data)\n data = result[\"data\"]\n\n date_executed = data[\"date_executed\"]\n assert date_executed is not None\n\n self.client.delete(new_url)\n assert self.client.get(new_url).status_code == 404", "title": "" }, { "docid": "6928761e079457a6515bbfaec9893c76", "score": "0.49285072", "text": "def test_update_workflow(self):\n pass", "title": "" }, { "docid": "ebb76f81c2614d9c1a43e979fd5b2c97", "score": "0.49271944", "text": "def test_admin_reject_order(self):\n # Test unregistered id\n # Correct format but not there\n response = self.client.put(\n 'api/v2/parcels/35420/reject', headers=self.admin_token_dict)\n data = json.loads(response.data)\n self.assertEqual(response.status_code, 404)\n self.assertEqual(\n data, {'message': 'No Parcel delivery order with that id'})\n # Test invalid format id\n response = self.client.put(\n 'api/v2/parcels/35uh420/reject', headers=self.admin_token_dict) # Incorrect id format\n data = json.loads(response.data)\n self.assertEqual(response.status_code, 400)\n self.assertEqual(data, {'message': 'Wrong id format'})\n # test with delivered parcel\n self.client.post('api/v2/parcels', data=json.dumps(self.order),\n headers=self.user_token_dict, content_type=\"application/json\")\n last_rec = self.db_conn.get_last_record_id()\n self.client.put(\n 'api/v2/parcels/{}/deliver'.format(last_rec), headers=self.admin_token_dict)\n response = self.client.put(\n 'api/v2/parcels/{}/reject'.format(last_rec), headers=self.admin_token_dict)\n self.assertEqual(json.loads(response.data)[\n 'message'], 'Unsuccesful, order already delivered')\n self.assertEqual(response.status_code, 400)", "title": "" }, { "docid": "ebd3891eb3dad031ca2a1f1dc9e7e18f", "score": "0.49228826", "text": "def test_incomplete_other(self):\n self.do_test(RPM_INCOMPLETE_OTHER_FEED_URL)", "title": "" }, { "docid": "cec2738a51a68df73bffeb32dddd68ea", "score": "0.49180228", "text": "def test_flow_control_stream_closed(self):\n\n response = self.dcp_client.open_producer(\"flowctl\")\n assert response['status'] == SUCCESS\n\n buffsize = 128\n response = self.dcp_client.flow_control(buffsize)\n assert response['status'] == SUCCESS\n\n end_seqno = 5\n for i in range(end_seqno):\n self.mcd_client.set('key' + str(i), 0, 0, 'value', 0)\n\n resp = self.mcd_client.stats('failovers')\n vb_uuid = long(resp['vb_0:0:id'])\n\n stream = self.dcp_client.stream_req(0, 0, 0, end_seqno, vb_uuid)\n max_timeouts = 10\n required_ack = False\n last_seqno = 0\n while stream.has_response() and max_timeouts > 0:\n resp = stream.next_response()\n\n if resp is None:\n\n # close\n self.dcp_client.close_stream(0)\n\n # ack\n ack = self.dcp_client.ack(buffsize)\n assert ack is None, ack['error']\n required_ack = True\n\n # new stream\n stream = self.dcp_client.stream_req(0, 0, last_seqno,\n end_seqno, vb_uuid)\n assert stream.status == SUCCESS, \\\n \"Re-open Stream failed\"\n\n max_timeouts -= 1\n\n elif resp['opcode'] == CMD_MUTATION:\n last_seqno += 1\n\n # verify stream closed\n assert last_seqno == end_seqno, \"Got %s\" % last_seqno\n assert required_ack, \"received non flow-controlled stream\"\n\n self.verification_seqno = end_seqno", "title": "" }, { "docid": "6613beda65e89e2f8e2f3dca3b7c1cc3", "score": "0.49138215", "text": "def test_closes_on_reset():\n listener = MockListener()\n\n packet_log = rdpcap(\"test/inputs/tiniest-session.pcap\")\n listener, conn = create_session(packet_log)\n syn_ack = packet_log[1]\n\n listener.dispatch(syn_ack)\n\n reset = syn_ack.copy()\n reset.payload.payload.flags = \"R\"\n reset.seq += 1\n listener.dispatch(reset)\n\n assert conn.state == \"CLOSED\"", "title": "" }, { "docid": "77084445feb01c00a262c84ce0949ab4", "score": "0.49106318", "text": "def test_delete_fsa_result(self):\n pass", "title": "" }, { "docid": "4c7b436523939471da50b63bb238a3f7", "score": "0.49075186", "text": "def test_valid_finalize_demand(self):\n url = reverse('demand-finalize', kwargs={'pk': self.demand1.pk})\n response = client.put(url, format='json')\n self.assertEqual(response.status_code, status.HTTP_200_OK)\n self.assertEqual(Demand.objects.get().status, Demand.CLOSED)", "title": "" }, { "docid": "ca4a40a532be9e59f30c2e2366888390", "score": "0.4905259", "text": "def test_reset(self):\n pass", "title": "" }, { "docid": "ca564f5d46d142f53494e0fe42238447", "score": "0.48987204", "text": "def test_task_fail(self):\n from kombu import Exchange\n e = Exchange('', type='direct')\n # declare queue\n consumer_queue = Queue('test.task.fail',\n e,\n channel=self._connection,\n routing_key='test.task.fail')\n client_queue = Queue('',\n e,\n durable=False,\n channel=self._connection)\n consumer_queue.declare()\n client_queue.declare()\n self.queues.append(consumer_queue)\n self.queues.append(client_queue)\n\n q = Qurator(task_exchange=e)\n\n @q.task(queue_name='test.task.fail')\n def fail(data):\n raise Exception('YOU FAIL!')\n\n client = RpcClient(exchange=e)\n client.task('fail', {'x': 1}, server_routing_key='test.task.fail')\n\n curr_queues = q.queues['fail']\n curr_callbacks = q.callbacks['fail']\n\n def still_around(body, message):\n self.assertFalse(message.acknowledged)\n message.ack()\n\n curr_callbacks.append(still_around)\n\n with Consumer(self._connection, curr_queues, callbacks=curr_callbacks):\n self._connection.drain_events(timeout=1)", "title": "" }, { "docid": "54cff7c68e0233a7af775d0bc622a951", "score": "0.48978707", "text": "def test_change_approved_to_rejected(self):\n workflow = Workflow('approved')\n workflow.on_event('reject')\n self.assertEqual(workflow.state, RejectedState())", "title": "" }, { "docid": "c2be99d77c117b948a5d3b18752bf379", "score": "0.48972225", "text": "def test_send_exception_demand_scan(self):\n self.test_master(\"e\")", "title": "" }, { "docid": "4a9962cd3ce9f795ac9af9fdb162139d", "score": "0.48969984", "text": "def test_connectionLostDone(self):\r\n self.setUpState('connected')\r\n\r\n # Connection closed by other party.\r\n self.api.protocol.connectionLost(failure.Failure(ResponseDone()))\r\n self.clock.advance(0)\r\n\r\n # A reconnect is attempted, but not before the back off delay.\r\n self.assertEqual(1, len(self.api.filterCalls))\r\n self.clock.advance(1)\r\n self.assertEqual(1, len(self.api.filterCalls))\r\n self.clock.advance(DELAY_INITIAL - 1)\r\n self.assertEqual(2, len(self.api.filterCalls))", "title": "" }, { "docid": "9cdae54af5fec5644ba0b0a71681f94b", "score": "0.48949128", "text": "def test_outbound_ok(self):\n\n def _cb(method_called, xmlrpc_payload):\n self.assertEqual(method_called, 'EAPIGateway.SendSMS')\n self.assertEqual(xmlrpc_payload['Priority'], 'standard')\n self.assertEqual(xmlrpc_payload['SMSText'], 'hello world')\n self.assertEqual(xmlrpc_payload['Service'], 'service')\n self.assertEqual(xmlrpc_payload['Receipt'], 'Y')\n self.assertEqual(xmlrpc_payload['Numbers'], '27761234567')\n self.assertEqual(xmlrpc_payload['Password'], 'password')\n self.assertEqual(xmlrpc_payload['Channel'], 'channel')\n now = datetime.utcnow()\n tomorrow = now + timedelta(days=1)\n self.assertEqual(xmlrpc_payload['Expiry'].hour, tomorrow.hour)\n self.assertEqual(xmlrpc_payload['Expiry'].minute, tomorrow.minute)\n self.assertEqual(xmlrpc_payload['Expiry'].date(), tomorrow.date())\n\n self.assertEqual(xmlrpc_payload['Delivery'].hour, now.hour)\n self.assertEqual(xmlrpc_payload['Delivery'].minute, now.minute)\n self.assertEqual(xmlrpc_payload['Delivery'].date(), now.date())\n\n return {\n 'Identifier': 'abc123'\n }\n\n self.transport.proxy = FakeXMLRPCService(_cb)\n\n msg = self.mk_msg()\n yield self.dispatch(msg,\n rkey='%s.outbound' % self.transport_name)\n\n self.assertEqual(self.get_dispatched_failures(), [])\n self.assertEqual(self.get_dispatched_messages(), [])\n [event_msg] = self.get_dispatched_events()\n self.assertEqual(event_msg['message_type'], 'event')\n self.assertEqual(event_msg['event_type'], 'ack')\n self.assertEqual(event_msg['sent_message_id'], 'abc123')\n # test that we've properly linked the identifier to our\n # internal id of the given message\n self.assertEqual(\n self.transport.get_message_id_for_identifier('abc123'),\n msg['message_id'])", "title": "" } ]
b809052736a786be5ebbddb33b1bc9ce
Creates a Path object representing the full path of an output feature class in the KML/KMZ format.
[ { "docid": "0a4454bcc59bea43f01939fe42e7cff8", "score": "0.6005049", "text": "def _feature_class_default_name(self, desc, output_workspace, **kwargs):\n return output_workspace.joinpath(desc.name + \".kmz\")", "title": "" } ]
[ { "docid": "0f5b0e3516d0e736cb18463ee0e55493", "score": "0.6318198", "text": "def export_kml():\n folder = 'cad'\n name = get_dataset_filename()\n \n # Create a cad folder in the temp directory if it does not exist\n working_folder = catalog_publish_folder + name + \"//\" + temp_folder + \"//\" + folder\n create_folder(working_folder, True)\n \n # Export the shapefile to the folder\n source = database_connection + \"\\\\\" + args.feature_class\n destination = working_folder + \"\\\\\" + name + \".kmz\"\n \n # Make a feature layer (in memory)\n debug(' - Generating KML file in memory from \"' + source + '\"')\n gp.MakeFeatureLayer_management(source, name, \"\", \"\")\n \n # Convert the layer to KML\n debug(' - Exporting KML file (KMZ) to \"' + destination + '\"')\n gp.LayerToKML_conversion(name, destination, \"20000\", \"false\", \"DEFAULT\", \"1024\", \"96\")\n\n # Publish the zipfile to the download folder\n publish_file(working_folder, name + \".kmz\",\"kml\")", "title": "" }, { "docid": "0ddd20f320a99ece84fd00a11ed1e6a5", "score": "0.6048646", "text": "def output_path(self):", "title": "" }, { "docid": "70de662a1197bbfb3ab348f44769df01", "score": "0.59567255", "text": "def output_path(self) -> Path:\n return self.path / OUTPUT_FILE_NAME", "title": "" }, { "docid": "e46576c2df4472a9368be645c677392d", "score": "0.59310794", "text": "def get_feature_object_name_and_path(cls, input_path, feature_dir):\n new_filename = os.path.basename(input_path)\n new_filename = scrub_underscore_suffix(new_filename)\n\n # Append model_name along with \"features\" to demarcate\n # different models when saving the feature vectors.\n new_filename = add_suffix_to_filename(\n new_filename, \"_features\" + \"_\" + cls.class_feature_name\n )\n if not os.path.isdir(os.path.join(feature_dir, cls.class_feature_name)):\n os.makedirs(os.path.join(feature_dir, cls.class_feature_name))\n feature_path = os.path.join(feature_dir, cls.class_feature_name, new_filename)\n feature_path = os.path.abspath(feature_path)\n return feature_path", "title": "" }, { "docid": "557bd66077bbaaa2a03ff10ecfe7067d", "score": "0.5896151", "text": "def get_kpath(self):\n if self._kpath is None:\n from matdb.kpoints import parsed_kpath\n self._kpath = parsed_kpath(self.atoms)\n\n return self._kpath", "title": "" }, { "docid": "853f345c4cec8a31b2c85b65168eff5f", "score": "0.58727056", "text": "def output_path(self) -> pathlib.Path:\n return self.path / self.file", "title": "" }, { "docid": "a6bbf47045cb4a28f187f624e1247c01", "score": "0.58236724", "text": "def CreatePath(self):", "title": "" }, { "docid": "a6bbf47045cb4a28f187f624e1247c01", "score": "0.5823312", "text": "def CreatePath(self):", "title": "" }, { "docid": "a2d391eba31ac5431ead5cc999b785ec", "score": "0.5614006", "text": "def outputPath(self):\n if self.path == []:\n print('Nothing in path. Skipping output...')\n return\n\n makedirs('output', exist_ok=True)\n # Scaling factor to gradually change colour\n ptImg = self.img.copy()\n ptPix = ptImg.load()\n l = 120.0 / len(self.path)\n for i, coords in enumerate(self.path):\n ptPix[coords] = (0, 255-int(i*l), 0)\n ptImg.save('output/Path-'+self.fileName)\n ptImg.close()", "title": "" }, { "docid": "087c13c591abd19f1dd1f0b4a1024b46", "score": "0.5428924", "text": "def output_path(self):\n return '%s/%s' % (os.path.abspath(os.path.dirname(__file__) + '/../../outputs'),\n self.identifier)", "title": "" }, { "docid": "d79015d50969186b6744b4afe3406c70", "score": "0.53893644", "text": "def make_output_structure(output_path):\n\n Path(f\"{output_path}/mosaics\").mkdir(parents=True, exist_ok=True)\n Path(f\"{output_path}/chips/pre\").mkdir(parents=True, exist_ok=True)\n Path(f\"{output_path}/chips/post\").mkdir(parents=True, exist_ok=True)\n Path(f\"{output_path}/loc\").mkdir(parents=True, exist_ok=True)\n Path(f\"{output_path}/dmg\").mkdir(parents=True, exist_ok=True)\n Path(f\"{output_path}/over\").mkdir(parents=True, exist_ok=True)\n Path(f\"{output_path}/shapes\").mkdir(parents=True, exist_ok=True)\n\n return True", "title": "" }, { "docid": "349676cc477dc4d38ea8fa2c5bab68be", "score": "0.5386216", "text": "def as_path(self):\n return os.path.join(self.package.as_path(), self.jclass.get())", "title": "" }, { "docid": "61f1cf81b68c6f3e369b89a2f09b4d45", "score": "0.5385024", "text": "def output_path(self):\n return self.files_dir() + '/output/'", "title": "" }, { "docid": "5f3630ae005cc7a2499f79d0fe8b3503", "score": "0.5379766", "text": "def output(filename):\n return os.path.join(*[os.sep, 'opt', 'ml', 'model', filename])", "title": "" }, { "docid": "5fbc05403b439b5cf469ccff2e30f569", "score": "0.5374211", "text": "def generate_outpath(in_path: PathType,\n out_path: Optional[PathType],\n suffix: str = None) -> PathType:\n in_path = pathlib.Path(in_path)\n if suffix is None:\n default_fname = '{}'.format(in_path.stem)\n else:\n default_fname = '{}.{}'.format(in_path.stem, suffix)\n\n if out_path is None:\n out_path = pathlib.Path(default_fname)\n else:\n out_path = pathlib.Path(out_path)\n if not out_path.suffix:\n out_path = out_path.joinpath(default_fname)\n if not out_path.parent.is_dir():\n msg = f'Error. Path \"{out_path.parent!s}\" does not exist.'\n raise ValueError(msg)\n return out_path", "title": "" }, { "docid": "60381abcc58f993ea380acfe99027bb6", "score": "0.5370048", "text": "def mk_output_path(odir, k):\n relpath = educe.pdtb.id_to_path(k)\n ofile_dirname = os.path.join(odir, os.path.dirname(relpath))\n ofile_basename = os.path.basename(relpath)\n if not os.path.exists(ofile_dirname):\n os.makedirs(ofile_dirname)\n return os.path.join(ofile_dirname, ofile_basename)", "title": "" }, { "docid": "b45ce2c6d56f8661fd6cc54ce39d04d5", "score": "0.53249043", "text": "def build_output_path(self):\n return self.data_file_path().join('..', '%s_output' % self.slug)", "title": "" }, { "docid": "0243007d45751b0b65c9f5e94d469d8a", "score": "0.5276601", "text": "def get_model_path(self):\n return self.root_path + \"model.ckpt\"", "title": "" }, { "docid": "987a81d72f5b4a474ced3e6665beb305", "score": "0.52739507", "text": "def create_model_output_folder(outputName):\n\tif \"\\\\\" in outputName:\n\t\toutputName=outputName.replace('\\\\','/')\n\tfolder= outputName.replace(outputName.split('/')[-1],'')\n\t\n\tif \"/\" in folder:\n\t\tif not os.path.exists(folder):\n\t\t\tos.makedirs(folder)", "title": "" }, { "docid": "7b6ea84420a883c7f02f0fda697a508f", "score": "0.52730644", "text": "def GetPath(self):", "title": "" }, { "docid": "7b6ea84420a883c7f02f0fda697a508f", "score": "0.52726805", "text": "def GetPath(self):", "title": "" }, { "docid": "7b6ea84420a883c7f02f0fda697a508f", "score": "0.52726805", "text": "def GetPath(self):", "title": "" }, { "docid": "7b6ea84420a883c7f02f0fda697a508f", "score": "0.52726805", "text": "def GetPath(self):", "title": "" }, { "docid": "7b6ea84420a883c7f02f0fda697a508f", "score": "0.52726805", "text": "def GetPath(self):", "title": "" }, { "docid": "7b6ea84420a883c7f02f0fda697a508f", "score": "0.52726805", "text": "def GetPath(self):", "title": "" }, { "docid": "7b6ea84420a883c7f02f0fda697a508f", "score": "0.52726805", "text": "def GetPath(self):", "title": "" }, { "docid": "7b6ea84420a883c7f02f0fda697a508f", "score": "0.52726805", "text": "def GetPath(self):", "title": "" }, { "docid": "7b6ea84420a883c7f02f0fda697a508f", "score": "0.52726805", "text": "def GetPath(self):", "title": "" }, { "docid": "1fdd53478ef295ff08d5abf97d9b2abe", "score": "0.52659833", "text": "def output_dir(self) -> Path:\n return paths.OUT_DIR / self.name", "title": "" }, { "docid": "3ccf3742670772eec594a27f10aabe23", "score": "0.5261726", "text": "def create_output_footprints(self):\n self.messages.addMessage(\"creating output footprint file\")\n out_path = os.path.dirname(self.outfootprints)\n out_name = os.path.basename(self.outfootprints)\n out_sref = arcpy.SpatialReference('Geographic Coordinate Systems/World/WGS 1984')\n arcpy.CreateFeatureclass_management(out_path, out_name, 'POLYGON', spatial_reference=out_sref)\n arcpy.AddField_management(self.outfootprints, \"rasterid\", \"SHORT\")\n arcpy.AddField_management(self.outfootprints, \"name\", \"TEXT\", 255)\n arcpy.AddField_management(self.outfootprints, \"date\", \"DATE\")\n arcpy.DeleteField_management (self.outfootprints, \"id\")", "title": "" }, { "docid": "ec8bbc62dab33cdd7487c737f9f5b4fd", "score": "0.5254749", "text": "def _m0_path(self):", "title": "" }, { "docid": "e2a9a76d19aa5939db539171694d634e", "score": "0.52338636", "text": "def get_output_path(self):\n return os.path.join(self.outdir, self.outfile)", "title": "" }, { "docid": "039a4d6efd3194afd2e85e0b4f5caf2a", "score": "0.5231837", "text": "def getOutPath(self, key, courselab):\n labPath = self.getDirPath(key, courselab)\n return \"%s/%s\" % (labPath, self.OUTPUT_FOLDER)", "title": "" }, { "docid": "fb416ec06ed3382f9f035648eaa76c54", "score": "0.5231334", "text": "def get_output_paths(self):\n pass", "title": "" }, { "docid": "24ad3b7dc63687db2b08455b2fa48a79", "score": "0.52274966", "text": "def to_readable_class_path(self, path, as_class_path=False):\n if not as_class_path:\n path = self.to_package(path).as_class_path()\n if not path:\n if StateProperty().is_project():\n return \"(Default Package)\"\n else:\n return \"(Unknown Package)\"\n return path", "title": "" }, { "docid": "433f2ca7ed09d99052416994d6231cf0", "score": "0.5224498", "text": "def construct_output_path(\n name: str,\n relative_path: Path,\n output_directory: Path,\n input_path: Path = None,\n output_format: str = TextgridFormats.SHORT_TEXTGRID,\n) -> Path:\n if isinstance(output_directory, str):\n output_directory = Path(output_directory)\n if output_format.upper() == \"LAB\":\n extension = \".lab\"\n elif output_format.upper() == \"JSON\":\n extension = \".json\"\n elif output_format.upper() == \"CSV\":\n extension = \".csv\"\n else:\n extension = \".TextGrid\"\n if relative_path:\n relative = output_directory.joinpath(relative_path)\n else:\n relative = output_directory\n output_path = relative.joinpath(name + extension)\n if output_path == input_path:\n output_path = relative.joinpath(name + \"_aligned\" + extension)\n os.makedirs(relative, exist_ok=True)\n relative.mkdir(parents=True, exist_ok=True)\n return output_path", "title": "" }, { "docid": "8990eb93c4fcc59860631d057ea9051a", "score": "0.5177936", "text": "def __init__(self, out_path):\n self.out_path = out_path", "title": "" }, { "docid": "a83db3ea7e3c55a4572f289025d9c551", "score": "0.51744777", "text": "def get_output(self, path):\n assert self._as_parameter_\n return Output._new_from_handle(zzub_archive_get_output(self, path))", "title": "" }, { "docid": "a1957a1f7b656a9d6ab71c1d31cfca50", "score": "0.51505685", "text": "def test_make_output_path():\n\n step = Step()\n output_path = step.make_output_path('junk_uncal.fits')\n assert output_path == 'junk_jwststep.fits'\n\n output_path = step.make_output_path('junk_uncal.fits', idx=1)\n assert output_path == 'junk_1_jwststep.fits'\n\n step.output_ext = '.asdf'\n output_path = step.make_output_path('junk_uncal')\n assert output_path == 'junk_jwststep.asdf'\n\n output_path = step.make_output_path('junk_uncal.fits', ext='asdf')\n assert output_path == 'junk_jwststep.asdf'\n\n step.output_dir = '/junk'\n step.output_ext = None\n output_path = step.make_output_path('junk_uncal.fits')\n assert output_path == path.join(step.output_dir, 'junk_jwststep.fits')", "title": "" }, { "docid": "8204f66e82d90cd419557a9b86250d8d", "score": "0.5133523", "text": "def create_output_paths(set_id):\n\n out_dir_path = OUT_DIRECTORY_PATTERN.format(set_id)\n\n # Create needed paths.\n train_data_path = out_dir_path + \"train_data\"\n train_labels_path = out_dir_path + \"train_labels\"\n validation_data_path = out_dir_path + \"validation_data\"\n validation_labels_path = out_dir_path + \"validation_labels\"\n test_data_path = out_dir_path + \"test_data\"\n test_labels_path = out_dir_path + \"test_labels\"\n\n return train_data_path, train_labels_path, \\\n validation_data_path, validation_labels_path, \\\n test_data_path, test_labels_path", "title": "" }, { "docid": "a508f48cb278bc7e872738092145e9ad", "score": "0.51319027", "text": "def path(self):\n\n return Path(self.full_name)", "title": "" }, { "docid": "6862e875e49ec148996804bb3ecb60f9", "score": "0.51157045", "text": "def generate_path_tree(self):\n pass", "title": "" }, { "docid": "c9e5215a8f5902913983d098729c644c", "score": "0.5114615", "text": "def write_labels(output_path):", "title": "" }, { "docid": "3723615668ac4d631e495caa002ad3d7", "score": "0.50953186", "text": "def path(self):\n raise NotImplementedError", "title": "" }, { "docid": "d159ef8f7741c6a8feee60565e48a486", "score": "0.50796866", "text": "def get_path(self):", "title": "" }, { "docid": "7b0a16c75f08c7255e27c9c373c4ac85", "score": "0.5074151", "text": "def modelpath(self, which, code2=None):\n path = '{0}.{1}.txt.gz'.format(self.sjexbase, which)\n if code2 is None:\n return path\n path2 = self.fname2('{0}.txt.gz'.format(which),code2, category='read')\n if os.path.exists(path2):\n return path2\n return path", "title": "" }, { "docid": "74a46eff778ec10f144f3c71328aa431", "score": "0.5065377", "text": "def get_img_output_path(self):\n return '/'.join([self.base_folder, self.img_output])", "title": "" }, { "docid": "fc2ced558bcfa420a109f7dc8a3343b9", "score": "0.5055187", "text": "def serving_model_path(output_uri: Text) -> Text:\n model_dir = serving_model_dir(output_uri)\n export_dir = os.path.join(model_dir, 'export')\n if tf.io.gfile.exists(export_dir):\n # TODO(b/160795287): Deprecate estimator based executor.\n absl.logging.warning(\n 'Support for estimator-based executor and model export'\n ' will be deprecated soon. Please use export structure '\n '<ModelExportPath>/serving_model_dir/saved_model.pb\"')\n model_dir = io_utils.get_only_uri_in_dir(export_dir)\n return io_utils.get_only_uri_in_dir(model_dir)\n else:\n # If dir doesn't match estimator structure, use serving model root directly.\n return model_dir", "title": "" }, { "docid": "e2c44ec449cf45f56259d097c40b538f", "score": "0.5040054", "text": "def ExportMIFileName(self) -> str:", "title": "" }, { "docid": "403d970e2d2a0a8011dfa31422d75814", "score": "0.5039584", "text": "def save_fit_paths(self, suffix=\"_fpaths.pckle\", save_mode = 1):\n try:\n if( save_mode):\n omode = \"wb\"\n else:\n omode = \"w\"\n fo = open(self.outputFile+self.vsuffix+suffix,omode)\n cPickle.dump([self.fpaths,self.paths,self.ftrees],fo,save_mode)\n fo.close()\n except Exception, error:\n print \"failed in save_fit_paths()\",error\n sys.exit()", "title": "" }, { "docid": "5336769d06cb9c30c1d51e9633474f83", "score": "0.50380933", "text": "def Path(self) -> str:", "title": "" }, { "docid": "5336769d06cb9c30c1d51e9633474f83", "score": "0.50380933", "text": "def Path(self) -> str:", "title": "" }, { "docid": "5336769d06cb9c30c1d51e9633474f83", "score": "0.50380933", "text": "def Path(self) -> str:", "title": "" }, { "docid": "74d53f68ffcd90eca739317e8dd9d3c3", "score": "0.5013982", "text": "def __fspath__(self):\n return osp.join(self.base_dir, self.env_name, self.algo_name, self.exp_id)", "title": "" }, { "docid": "c20bc355a09bc1cf18a6d92a1f39f901", "score": "0.5011293", "text": "def get_path(self):\n raise NotImplementedError()", "title": "" }, { "docid": "81e4c5962e3140e13a48dd8a27e3435d", "score": "0.5006212", "text": "def to_path(self):\n return self.directory + \"/\" + self.name", "title": "" }, { "docid": "846872a30505478f0564a35cc0ee6334", "score": "0.49624977", "text": "def get_output_path(node=None):\n if node==None: node = get_system()\n\n path = os.path.join(get_base_path(),'data','processed')\n \n return path", "title": "" }, { "docid": "08d37034bec83a0e8e9491db66dfd17d", "score": "0.49517703", "text": "def as_class_path(self):\n return \".\".join(self.package_paths)", "title": "" }, { "docid": "94acc0cc41fb6aa367ca5a1ee85123cf", "score": "0.49513608", "text": "def image_output_path(self, output_path, sequence_number=None):\n split = path.splitext(output_path)[0]\n base = path.basename(output_path)\n if sequence_number:\n sequence_number_left_padded = str(sequence_number).zfill(6)\n newname = f\"{base}.{sequence_number_left_padded}\"\n else:\n newname = base\n output_path = path.join(split, newname)\n return Path(f\"{output_path}.png\")", "title": "" }, { "docid": "d1ebfc70f9a7686dff41ea16945694d7", "score": "0.49452424", "text": "def OutputRootPath(self):\r\n\t\treturn self._get_attribute('outputRootPath')", "title": "" }, { "docid": "fcd6b984f0aa7662d43333e503f95600", "score": "0.4940185", "text": "def shp_to_kml():\t\n\tfolders = os.listdir('data/' + my.DATA_FOLDER + '/shp/')\n\tif '.DS_Store' in folders:\n\t\tfolders.remove('.DS_Store')\n\t#print folders\n\tfor folder in folders:\n\t\tprint '\\n' + folder + '\\n'\n\t\tpath = 'data/' + my.DATA_FOLDER + 'shp/' + folder + '/'\n\t\tkml_path = 'data/' + my.DATA_FOLDER + 'kml/' + \\\n\t\t\t\t\tfolder.replace(' Division Gangs', '') + '/'\n\t\tif not os.path.exists(kml_path):\n\t\t\tos.makedirs(kml_path)\n\n\t\tfiles = os.listdir(path)\n\t\tif '.DS_Store' in files:\n\t\t\tfiles.remove('.DS_Store')\n\t\tfiles_ = []\n\t\tfor file in files:\n\t\t\tfile = file.split('.')\n\t\t\tfiles_.append(file[0])\n\t\tfiles_ = list(set(files_))\n\t\t#pprint(files_)\n\n\t\tfor file in files_:\n\t\t\tshp = path + file + '.shp'\n\t\t\tkml = kml_path + file.replace('_', ' ') + '.kml'\n\t\t\togr2ogr.main([\"\",\"-f\", \"KML\", kml, shp])", "title": "" }, { "docid": "466e299648c97f0729e276c8e135172a", "score": "0.49392816", "text": "def PathProfile(self) -> Profile3d:", "title": "" }, { "docid": "9a2e9d354cc003d7deeb2f95ae3b23cb", "score": "0.49385214", "text": "def establish_new_path(self, output_folder_path: Path):\n self.new_path = output_folder_path.joinpath(self.title+\".txt\")", "title": "" }, { "docid": "85f5438877420e745ffb7939dfe3322d", "score": "0.49374145", "text": "def as_class_path(self):\n if self.package.is_empty():\n return self.jclass.get()\n elif self.jclass.is_empty():\n return self.package.as_class_path()\n return \".\".join([\n x for x in\n [self.package.as_class_path(), self.jclass.get()]\n if x\n ])", "title": "" }, { "docid": "d22cd7a430b03c81fc621f0a69dd784c", "score": "0.49321896", "text": "def make_output_file(self):\n sc = self\n if not sc.output_fn:\n if sc.verbose:\n g.note('no output file')\n return\n cmd = (\n sc.inkscape_bin,\n \"--without-gui\",\n \"--export-png=\" + sc.output_fn,\n \"--export-area-drawing\",\n \"--export-area-snap\",\n sc.working_fn)\n proc = subprocess.Popen(cmd, stderr=subprocess.PIPE)\n proc.communicate() # Wait for Inkscape to terminate.\n if sc.verbose:\n g.note('wrote: %s' % g.shortFileName(sc.output_fn))\n if Image: # trim transparent border\n try:\n img = Image.open(sc.output_fn)\n img = sc.trim(img, (255, 255, 255, 0))\n img.save(sc.output_fn)\n except IOError:\n g.trace('can not open %s' % sc.output_fn)\n sc.make_at_url_node_for_output_file()", "title": "" }, { "docid": "72512f1a120d02094e2a99a195d58fa1", "score": "0.4922218", "text": "def GetNewPath(self):", "title": "" }, { "docid": "f9143b1c28de6b9b3145d9e6cc833a5c", "score": "0.4919029", "text": "def getExportPath():\n return '/final/path/'", "title": "" }, { "docid": "d0aebe235c9df1d36f51d59fd6b5aeb7", "score": "0.49185914", "text": "def write_output(self, filename, output_format='csv'):\n try:\n filename = join(self.folder_absolute_path, filename) + '.' + output_format\n\n if output_format == 'json':\n self.features.to_json(path_or_buf=filename, index=False)\n else:\n if output_format == 'sql':\n self.features.to_sql(path_or_buf=filename, index=False)\n else:\n\n self.features.columns = self.features.columns.str.replace(\"-\", \"_\")\n self.features['id'] = self.features['id'].str.replace('-','_')\n self.features.to_csv(path_or_buf=filename, index=False)\n except:\n logging.error(\"Unexpected error on writing output\")", "title": "" }, { "docid": "c32f98ef37ffa3bd8cf54451c653d954", "score": "0.49160412", "text": "def toPath(prefix, metric):\n\n m = metric.replace(\".\", \"/\") + \".wsp\"\n return os.path.join(prefix, m)", "title": "" }, { "docid": "b76db82d225f3edee226fa8c532b299b", "score": "0.49143124", "text": "def path(self):\n return self.pad_path", "title": "" }, { "docid": "02914f93612bbb9cc51305afcbf16628", "score": "0.4908661", "text": "def export(self, path):\r\n raise NotImplementedError", "title": "" }, { "docid": "c5251d1499b9b588b42096f6a2fcf935", "score": "0.49081084", "text": "def export_area_as_SHP(self, filename_out,area_id, epsgOUT):\n\t\n\t\t\n self.cur.execute(\"SELECT ST_AsText(geom) FROM areas WHERE id = %s;\" % (area_id))\n areaST = self.cur.fetchall() \n geom = areaST[0][0]\n\t\t\n area = []\n if (geom.find('POLYGON') >= 0):\n i = geom.find('POLYGON((')\n j = geom.find('))')\n pts = geom[i+9:j]\n pts = pts.replace(',',' ').split()\n i = 0\n while i < len(pts):\n area.append([float(pts[i]), float(pts[i+1])])\n i += 2\n\n\t\t\n\t\tsrs_in = osr.SpatialReference()\n\t\tsrs_in.ImportFromEPSG(self.epsg)\n\t\tsrs_out = osr.SpatialReference()\t\t\t\n\t\tsrs_out.ImportFromEPSG(epsgOUT)\n\t\t\n\t\t#write the polygon data to KML\n driver = ogr.GetDriverByName('ESRI Shapefile')\n outfile = driver.CreateDataSource(filename_out)\n layerPolygons = outfile.CreateLayer(\"Polygons\", srs_out, ogr.wkbPolygon)\t\n featurePolygons = ogr.Feature(layerPolygons.GetLayerDefn())\n '''\n layerLinestring = outfile.CreateLayer(\"Linestring\", srs_out, ogr.wkbLineString) \n featureLinestring = ogr.Feature(layerLinestring.GetLayerDefn())\n line = ogr.Geometry(type=ogr.wkbLineString)\n '''\n #print outfile.GetLayerCount()\n \n #NOTE: To create a polygon you first need to create a ring\n #\t\tthen add it to the polygon\n ptCount = len(area)\n firstPt = area[0]\n lastPt = area[ptCount-1]\n polygon = ogr.Geometry(type=ogr.wkbPolygon)\t\t\t#create polygon\n ring = ogr.Geometry(type=ogr.wkbLinearRing)\t\t\t#create ring\n\t\t\n #Only convert the feature to polygon if is a closed linestring\n if firstPt[0] == lastPt[0] and firstPt[1] == lastPt[1]:\n print \"Valid LINESTRING\"\t \n i = 0\n while i < ptCount-1:\n p = area[i]\n ring.AddPoint(p[0],p[1])\n i += 1\n ring.CloseRings()\n polygon.AddGeometry(ring)\t\t\t\t\t\t#add ring to polygon\t\n polygon.AssignSpatialReference(srs_in)\n polygon.TransformTo(srs_out)\n featurePolygons.SetGeometry(polygon)\n layerPolygons.CreateFeature(featurePolygons) \n \n '''\n for p in area:\n line.AddPoint(p[0],p[1])\n\n line.AssignSpatialReference(srs_in)\n line.TransformTo(srs_out)\n featureLinestring.SetGeometry(line)\n layerLinestring.CreateFeature(featureLinestring) \n \n featureLinestring.Destroy()\n \n '''\n \n outfile = None\n featurePolygons.Destroy()", "title": "" }, { "docid": "dd21a47ababa283ecef422fa18dc4015", "score": "0.49046147", "text": "def PathName(self) -> str:", "title": "" }, { "docid": "6f3267569a449afab63c3ac971b33923", "score": "0.4900609", "text": "def export_shapefile():\n folder = 'shape'\n name = get_dataset_filename()\n \n # Create a shape folder in the temp directory if it does not exist\n working_folder = catalog_publish_folder + name + \"//\" + temp_folder + \"//\" + folder\n create_folder(working_folder, True)\n\n # Create a folder for the shapefile (since it is a folder)\n create_folder(working_folder + \"\\\\\" + name)\n \n # Export the shapefile to the folder\n source = database_connection + \"\\\\\" + args.feature_class\n destination = working_folder + \"\\\\\" + name + \"\\\\\" + name + \".shp\"\n \n # Export the shapefile\n debug(' - Exporting to shapefile from \"' + source + '\" to \"' + destination + '\"')\n gp.CopyFeatures_management(source, destination, \"\", \"0\", \"0\", \"0\")\n \n # Zip up the files\n debug(' - Zipping the shapefile')\n zip = zipfile.ZipFile(working_folder + \"\\\\\" + name + \".zip\", \"w\")\n \n for filename in glob.glob(working_folder + \"/\" + name + \"/*\"):\n zip.write(filename, os.path.basename(filename), zipfile.ZIP_DEFLATED)\n \n zip.close()\n \n # Publish the zipfile to the download folder\n publish_file(working_folder, name + \".zip\",\"shape\")", "title": "" }, { "docid": "4e7c5643bded7b28ec00165bf6182ce5", "score": "0.4892126", "text": "def set_paths(self, output_dir: Optional[Union[str, Path]] = None) -> None:\n if output_dir is None:\n output_dir = os.path.join(os.getcwd(), f\"amici-{self.model_name}\")\n\n self.model_path = os.path.abspath(output_dir)\n self.model_swig_path = os.path.join(self.model_path, \"swig\")", "title": "" }, { "docid": "dba4900497a173fbef677ac8f92ee80d", "score": "0.48878017", "text": "def save_to_feature_class(self, output_feature_class):\n # dictionary to look up geometry types\n geometry_type = {\n 'esriGeometryPoint': 'POINT',\n 'esriGeometryMultipoint': 'MULTIPOINT',\n 'esriGeometryPolyline': 'POLYLINE',\n 'esriGeometryPolygon': 'POLYGON'\n }\n\n # create feature class\n fc = arcpy.CreateFeatureclass_management(\n out_path=os.path.dirname(output_feature_class),\n out_name=os.path.basename(output_feature_class),\n geometry_type=geometry_type[self.properties['geometryType']],\n spatial_reference=arcpy.SpatialReference(self.properties['extent']['spatialReference']['latestWkid'])\n )[0]\n\n # dictionary to look up field types\n field_type = {\n 'esriFieldTypeString': 'TEXT',\n 'esriFieldTypeFloat': 'FLOAT',\n 'esriFieldTypeDouble': 'DOUBLE',\n 'esriFieldTypeSmallInteger': 'SHORT',\n 'esriFieldTypeInteger': 'LONG',\n 'esriFieldTypeDate': 'DATE',\n 'esriFieldTypeGlobalID': 'GUID'\n }\n\n # fields variable for insert cursor later\n insert_field_list = []\n\n # add fields\n for field in self.properties['fields']:\n\n # if the field is not the object id or geometry field\n if self._validate_field(field):\n\n # if this is a text field, look up the length and use it, otherwise just use a blank placeholder\n if field_type[field['type']] == 'TEXT':\n length = field['length']\n else:\n length = \"\"\n\n # add this field name to the fields name list\n insert_field_list.append(field['name'])\n\n # add field with properties\n arcpy.AddField_management(\n in_table=fc,\n field_name=field['name'],\n field_type=field_type[field['type']],\n field_alias=field['alias'],\n field_length=length,\n )\n\n # if the max batch query is less than 100, set this to the batch size, otherwise use 100 since retrieving\n # greater than 100 records from feature services backed by enterprise SDE's using ObjectIDs is documented\n # to suffer from performance issues\n if self.properties['maxRecordCount'] > 100:\n batch_size = 100\n else:\n batch_size = self.properties['maxRecordCount']\n\n # get the list of all feature id's\n fid_list = self.get_fid_list()\n\n # create a list of lists (redundant?) to use for making query requests to the REST endpoint\n fid_batch_list = [fid_list[i: i + int(batch_size)] for i in range(0, len(fid_list)-1, int(batch_size))]\n\n # add geometry to the fields list\n insert_field_list.append('SHAPE@')\n\n # use an insert cursor to insert features\n with arcpy.da.InsertCursor(fc, insert_field_list) as insert_cursor:\n\n # iterate the batch lists\n for fid_batch in fid_batch_list:\n\n # make the rest call to get the features\n feature_list = self._get_feature_list(fid_batch)\n\n # for every feature in the feature list\n for feature in feature_list:\n\n # get a list of attribute values using iterator to match up dictionary values matching the fields\n # being used\n attribute_list = []\n\n # iterate the validated field names\n for attribute_name in insert_field_list:\n\n # do not get geometry\n if attribute_name != 'SHAPE@':\n\n # retrieve the value from the dictionary for the attribute name and add it to the list\n attribute_list.append(feature['attributes'][attribute_name])\n\n # add geometry to the list\n attribute_list.append(feature['geometry'])\n\n # use the insert cursor to insert a record\n insert_cursor.insertRow(attribute_list)\n\n # return the path to the feature class\n return fc", "title": "" }, { "docid": "f32359035aa47f4c48fe0e1a0a3ac710", "score": "0.48847365", "text": "def export(self, path):\n pass", "title": "" }, { "docid": "58d8fe7087757dda87cf56fc51e9680a", "score": "0.48847118", "text": "def get_path(geojson_path: str, action: str):\n file_and_extension = os.path.basename(geojson_path).split(\".\")\n new_file = f\"{'.'.join(file_and_extension[:-1])}_{action}.{file_and_extension[-1]}\"\n return os.path.join(os.path.dirname(geojson_path), new_file)", "title": "" }, { "docid": "3cb22b6f5ced4797e2560bdacacbe49b", "score": "0.48618275", "text": "def route2kml(route, fname):\n\n kml = simplekml.Kml()\n\n for city in route:\n kml.newpoint(\n name=city.name,\n coords=[\n math.degrees(city.lat),\n math.degrees(city.lat)\n ]\n )\n\n kml.save(fname)", "title": "" }, { "docid": "a3e174027ca4392bd3dc47da4b3c030f", "score": "0.4858324", "text": "def add_h5paths_out(outpaths, info):\n\n (h5path_wsmask, MAdilation,\n h5path_dist, h5path_lmm, sigmoidweighting) = info\n\n root, ds_main = outpaths['out'].split('.h5')\n for dsname, outpath in outpaths.items():\n grpname = ds_main + \"_steps\"\n outpaths[dsname] = os.path.join(root + '.h5' + grpname, dsname)\n\n if not h5path_wsmask:\n outpaths['wsmask'] = ''\n if MAdilation:\n outpaths['madil{:02d}'.format(MAdilation)] = ''\n if not h5path_dist:\n if sigmoidweighting:\n if not h5path_lmm:\n outpaths['distance_simple'] = ''\n outpaths['sheaths_simple'] = ''\n outpaths['distance_sigmod'] = ''\n else:\n outpaths['distance_simple'] = ''\n\n return outpaths", "title": "" }, { "docid": "b8b6a084c934ab3531c5fd8e0154a902", "score": "0.48450223", "text": "def kpath(self):\n from matdb.database import Database\n if isinstance(self.parent, Database):\n return self.get_kpath()\n elif isinstance(self.parent, Hessian):\n return self.parent.get_kpath()", "title": "" }, { "docid": "002df1c36df5fa7a7524292c9646b9dc", "score": "0.4844066", "text": "def getPath(self) -> unicode:\n ...", "title": "" }, { "docid": "4bf91423d00910301290f696ebbcf9f2", "score": "0.48383176", "text": "def input_path(self) -> Path:\n return self.path / INPUT_DIR_NAME", "title": "" }, { "docid": "2929e84d8f8f9de3a6f56bec6ae2db3b", "score": "0.4837608", "text": "def outdir(self):\n funcname = '.'.join(self.id().split('.')[-3:])\n return path.join(self.outputdir, funcname)", "title": "" }, { "docid": "ede72a5196f287db29f4077fc25d9baf", "score": "0.48357865", "text": "def __init__(self, folder_input_path: str, folder_output_path: str = None, output_type: str = \"png\"):\n\n self.folder_input_path, self.folder_output_path = self.check_paths(folder_input_path=folder_input_path,\n folder_output_path=folder_output_path)\n\n self.wmf_folder = self.folder_input_path / \"wmf_files\"\n\n self.wmf_folder.mkdir(parents=True, exist_ok=True)\n\n self.output_type = output_type", "title": "" }, { "docid": "4e700925be47835b8bdad3b9607fc252", "score": "0.48347443", "text": "def get_output_path(self, input_path=None, output_ext=None):\n found_input = bool(input_path)\n\n if found_input and input_path in self._filepath_map:\n return self._filepath_map[input_path]\n\n self._idx += 1\n\n if not found_input:\n input_path = self._default_filename_patt % self._idx\n\n filename = os.path.basename(input_path)\n name, ext = os.path.splitext(filename)\n\n # URL handling\n # @todo improve this, while still maintaining Unix/Windows path support\n name = name.replace(\"%\", \"-\")\n ext = ext.split(\"?\")[0]\n\n if output_ext is not None:\n ext = output_ext\n\n filename = name + ext\n\n key = name if self.ignore_exts else filename\n self._filename_counts[key] += 1\n\n count = self._filename_counts[key]\n if count > 1:\n filename = name + (\"-%d\" % count) + ext\n\n output_path = os.path.join(self.output_dir, filename)\n\n if found_input:\n self._filepath_map[input_path] = output_path\n\n return output_path", "title": "" }, { "docid": "9d6896f1e39a2302816d425e1767a345", "score": "0.4832463", "text": "def get_path(model_name: str, path_type=None) -> str:\n\n if model_name == 'dlib-lmk68':\n return ModelAPI._dlib_lmk68_router(path_type)\n\n elif model_name == 'openface-embed':\n return ModelAPI._openface_embed_router(path_type)", "title": "" }, { "docid": "e0c89aced230990fa09556a92a2ad143", "score": "0.48307207", "text": "def build_single_output(\n feature_config: BaseOutputFeatureConfig, output_features: Optional[Dict[str, OutputFeature]]\n ) -> OutputFeature:\n logger.debug(f\"Output {feature_config.type} feature {feature_config.name}\")\n output_feature_class = get_from_registry(feature_config.type, get_output_type_registry())\n output_feature_obj = output_feature_class(feature_config, output_features=output_features)\n return output_feature_obj", "title": "" }, { "docid": "3d97ffe88c714b354567a1f315a45848", "score": "0.48275766", "text": "def path(cls):\n return cls.__name__.lower()", "title": "" }, { "docid": "e9e9566a95bbf004de7e33416be68ab7", "score": "0.4825473", "text": "def generate_temp_filename(self, output, suffix=None):\n if not suffix:\n suffix = self._temp_suffix\n return Path(\"temp_{t}_{o}_{n:03d}\".format(\n t=self._TYPE, o=Path(output).stem,\n n=self.segment_number)).with_suffix(suffix)", "title": "" }, { "docid": "a9e78a69c99b3a77cce4233ed2020ac2", "score": "0.48145795", "text": "def serialize(self, outf):\n\n # create output dir if not exist\n os.makedirs(os.path.dirname(outf), exist_ok=True)\n # serialize CE CORE into RDF format (turtle)\n print('Serialize CE CORE KB to {} with turtle format...'.format(outf))\n self.graph.serialize(destination=outf, format='turtle')\n print('CE CORE KB serialized to {} with turtle format!'.format(outf))\n return True", "title": "" }, { "docid": "afcb1e5e33e88561988f45dca545577b", "score": "0.47968724", "text": "def get_path(self):\n # The path is generated in display coordinates, then converted back to\n # data coordinates.\n _path, fillable = self._get_path_in_displaycoord()\n if np.iterable(fillable):\n _path = Path.make_compound_path(*_path)\n return self.get_transform().inverted().transform_path(_path)", "title": "" }, { "docid": "dd37a5e576b80b69d5228f8a48481835", "score": "0.47918037", "text": "def generate_filepath(self, instance):\n\n # create filename based on instance and field name\n # we do not want to end up with StravaRoute or SwitzerlandMobilityRoute\n if apps.get_model(\"routes\", \"Route\") in getmro(instance.__class__):\n class_name = \"Route\"\n else:\n class_name = instance.__class__.__name__\n\n # generate unique id from unique fields:\n unique_id_values = []\n for field in self.unique_fields:\n unique_field_value = getattr(instance, field)\n\n # get field value or id if the field value is a related model instance\n unique_id_values.append(\n str(getattr(unique_field_value, \"id\", unique_field_value))\n )\n\n # filename, for example: route_data_<uuid>.h5\n filename = \"{class_name}_{field_name}_{unique_id}.h5\".format(\n class_name=class_name.lower(),\n field_name=self.name,\n unique_id=\"\".join(unique_id_values),\n )\n\n # generate filepath\n if callable(self.upload_to):\n filepath = Path(self.upload_to(instance, filename))\n else:\n dirname = self.upload_to\n filepath = Path(dirname, filename)\n return self.storage.generate_filename(filepath)", "title": "" }, { "docid": "fb542de0c12c01721e0b5bc225bf5a66", "score": "0.4787515", "text": "def path(self) -> pulumi.Output[Optional[str]]:\n return pulumi.get(self, \"path\")", "title": "" }, { "docid": "5bf48c5bd017aaa7efee656acbfcdcc4", "score": "0.47866398", "text": "def out_filename(self):\n file = path.join(self.working_directory, str(self.label))\n if self.add_dim:\n file += '_{:0.0f}'.format(self.height)\n if self.width:\n file += 'x{:0.0f}'.format(self.width)\n\n if self.add_dn:\n file += '_DN{:0.0f}'.format(self.add_dn)\n return file", "title": "" }, { "docid": "8c62ec5a1ef88b92c4c5621b520c101c", "score": "0.47860977", "text": "def creat_xs_fc(outpath, outname, spatial_ref, is_shp=True):\r\n shp = None\r\n if not is_shp:\r\n shp = os.path.join (outpath , '{0}.shp'.format (outname))\r\n if arcpy.Exists(shp):\r\n arcpy.Delete_management(shp)\r\n arcpy.CreateFeatureclass_management (outpath , '{0}.shp'.format (outname) , geometry_type=\"POLYLINE\" ,\r\n has_m=\"DISABLED\" ,\r\n has_z=\"DISABLED\" , spatial_reference=spatial_ref)\r\n else:\r\n shp = os.path.join (outpath , '{0}'.format (outname))\r\n if arcpy.Exists(shp):\r\n arcpy.Delete_management(shp)\r\n arcpy.CreateFeatureclass_management (outpath , '{0}'.format (outname) , geometry_type=\"POLYLINE\" ,\r\n has_m=\"DISABLED\" ,\r\n has_z=\"DISABLED\" , spatial_reference=spatial_ref)\r\n\r\n arcpy.AddField_management (shp , field_name=\"Tributary\" , field_alias=\"Tributary\" , field_type=\"TEXT\" ,\r\n field_length=25 , )\r\n\r\n arcpy.AddField_management (shp , field_name=\"Station\" , field_alias=\"Station\" , field_type=\"TEXT\" ,\r\n field_length=25 , )\r\n\r\n return shp", "title": "" }, { "docid": "810d7db5cc4e2adb524281a624f3c9db", "score": "0.47825044", "text": "def PathProfile(self) -> LoftProfile:", "title": "" }, { "docid": "2e4bf0c28ea299f9fd5f0ea05ee76a74", "score": "0.47806406", "text": "def test_pathy_export_spacy_model(temp_folder: Path) -> None:\n import spacy\n\n use_fs(temp_folder)\n bucket = Pathy(\"gs://my-bucket/\")\n bucket.mkdir(exist_ok=True)\n model = spacy.blank(\"en\")\n output_path = Pathy(\"gs://my-bucket/models/my_model\")\n model.to_disk(output_path)\n sorted_entries = sorted([str(p) for p in output_path.glob(\"*\")])\n expected_entries = [\n \"gs://my-bucket/models/my_model/config.cfg\",\n \"gs://my-bucket/models/my_model/meta.json\",\n \"gs://my-bucket/models/my_model/tokenizer\",\n \"gs://my-bucket/models/my_model/vocab\",\n ]\n assert sorted_entries == expected_entries", "title": "" }, { "docid": "71a7bc5c4c6f9cedf82ff0d18f123e9e", "score": "0.47784424", "text": "def GetPath(self, *args, **kw):", "title": "" }, { "docid": "40ef1f7dc0fbde3fd4ed7e35d313018d", "score": "0.47706366", "text": "def path(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"path\")", "title": "" } ]
d15c16502af6ce62d6e0a6aa48a02c38
First implentation of the two precedent algorithm
[ { "docid": "b1f85f94a850720f0d292914e99787b4", "score": "0.0", "text": "def master_algorithme(set_point, heta):\n #find the perfect hash table\n hash_table = epsilon_variation_algo(set_point, len(set_point))\n \n #define the minimal number of rectangle\n min_nb_rectangle = sqrt(len(set_point))\n \n #convert the hash table in a set of rectangles\n set_rectangle = [minimum_rect(hash_table[key]) for key in hash_table.keys()]\n #apply the NN algorithm while the condition is not False\n i = 0 \n while True:\n #find the NN\n afficher_plsr_pts_rect_1(set_rectangle, None, i)\n nearest_neighboor = naive_nearest_neighboor(set_rectangle)\n i+=1\n \n if len(set_rectangle) > 2:\n #merge the NN\n set_rectangle = merge_rectangle(nearest_neighboor, set_rectangle)\n #stop the algorithm\n else:\n return set_rectangle", "title": "" } ]
[ { "docid": "a8dcf31285714d06d3927c1343a0f07a", "score": "0.70782053", "text": "def algorithm(self):", "title": "" }, { "docid": "75f9f33adb7fc5ee9bca5067ef34a714", "score": "0.58826745", "text": "def next_trick(self):", "title": "" }, { "docid": "f5cd32a98def5a9aeaf4aa06e0d6235f", "score": "0.5818549", "text": "def precompute(self,x1,x2):\n pass", "title": "" }, { "docid": "d315e80eb7eefd4f0fdc1bc80f0f84de", "score": "0.57278717", "text": "def session_00018_line_318():", "title": "" }, { "docid": "135fa61e8ea998d21ad0c7e88d306398", "score": "0.56644154", "text": "def __mul__(self, other):\n \"\"\" WARNING: do not try to impliment this function on alarm... \"\"\"\n\n new_scope = []\n for scope in self.scope:\n new_scope.append(scope)\n for scope in other.scope:\n if scope not in self.scope:\n new_scope.append(scope)\n # print \"new_scope\", new_scope\n\n\n new_ranges = {}\n for i in new_scope:\n if (i in self.scope):\n # print \"self.ranges: \", self.ranges # testing\n new_ranges[i] = self.ranges[i]\n elif (i in other.scope):\n new_ranges[i] = other.ranges[i]\n\n\n # print \"new_range: \", new_ranges # testing\n\n\n\n\n x1Ux2_scope = len(new_scope)\n # print \"x1Ux2_scope\", x1Ux2_scope # testing\n\n x1Ux2_cardinality_values = 1\n for key in new_ranges:\n x1Ux2_cardinality_values *= new_ranges[key]\n # print \"x1Ux2_cardinality_values\", x1Ux2_cardinality_values # testing\n\n\n \"\"\" This is the start the implimentation of Alogithm 10.A.1 on pg. 359 \"\"\"\n\n j, k = 0, 0 # Line 1\n assignment = []\n psi_values = []\n\n for l in range(x1Ux2_scope): # Line 2\n assignment.append(0) # Line 3\n\n for i in range(x1Ux2_cardinality_values - 1): # Line 4\n psi_values.append(self.vals[j] * other.vals[k]) # Line 5\n\n for l in new_scope: # Line 6 (modified from the actual algoithem)\n\n assignment[new_scope.index(l)] += 1 # Line 7\n\n if assignment[new_scope.index(l)] == new_ranges[l]: # Line 8\n assignment[new_scope.index(l)] = 0 # Line 9\n\n j = j - (new_ranges[l] - 1) * Factor.stride(self, l) # Line 10\n k = k - (new_ranges[l] - 1) * Factor.stride(other, l) # Line 11\n\n else: # Line 12\n j = j + Factor.stride(self, l) # Line 13\n k = k + Factor.stride(other, l) # Lin3 14\n break # Line 15\n\n\n # print psi_values # testing\n\n psi_values.append(self.vals[j] * other.vals[k])\n new_scope.reverse()\n # END PLACEHOLDER CODE\n return Factor(new_scope, psi_values, new_ranges) # Line 16", "title": "" }, { "docid": "2859810b10b652b5c43604cab0be7747", "score": "0.5631979", "text": "def solution(self):\n pass", "title": "" }, { "docid": "1ce6b293babf58c50644b5eb546b5241", "score": "0.5536008", "text": "def nextP(A, B):\n global a2, b2\n V1 = B - A\n\n t = Vecteur([1, -1 * ((b2) / (a2)) * B.coord[0] / B.coord[1]])\n n = Vecteur([((b2) / (a2)) * B.coord[0] / B.coord[1], 1])\n\n V2 = ((V1 * t) * t) - ((V1 * n) * n)\n\n q1 = V2.coord[0]\n q2 = V2.coord[1]\n k = (-2 * q1 * B.coord[0] / (a2) - 2 * q2 *\n B.coord[1] / (b2)) / (q1 * q1 / a2 + q2 * q2 / b2)\n # print(type(k))\n\n return B + k * V2", "title": "" }, { "docid": "bf462020843ba6f0b46f9c2eac0fe753", "score": "0.5491115", "text": "def compute(self):\r\n\r\n a,b = len(self.A),len(self.B)\r\n\r\n #On crée la matrice chemin qui donne pour chaque case M la(les) case(s) précédente(s) à partir de laquelle elle a été calculée\r\n\r\n chemin = [[[] for i in range(a+2)] for i in range (b+2)]\r\n\r\n for j in range(2, a+2):\r\n chemin[1][j] = [2]\r\n\r\n for i in range(2,b+2):\r\n chemin[i][1] = [1]\r\n\r\n #On remplit la matrice de similarité M\r\n\r\n M = [[0 for i in range(a+2)] for i in range(b+2)]\r\n\r\n for j in range(2,a+2):\r\n M[0][j] = self.A[j-2]\r\n\r\n for i in range(2,b+2):\r\n M[i][0] = self.B[i-2]\r\n\r\n for j in range(1,a+2):\r\n M[1][j] = (j-1)*self.g\r\n\r\n for i in range(1,b+2):\r\n M[i][1] = (i-1)*self.g\r\n\r\n for j in range(2,a+2):\r\n for i in range(2,b+2):\r\n diag = self.m* (self.A[j-2] != self.B[i-2]) + M[i-1][j-1]\r\n haut = M[i-1][j] + self.g\r\n bas = M[i][j-1] + self.g\r\n\r\n l = [diag,haut,bas]\r\n M[i][j] = int(min(l))\r\n\r\n for k in range(3):\r\n if l[k] == min(l):\r\n chemin[i][j].append(k)\r\n\r\n i,j = b+1,a+1\r\n distance = M[i][j]\r\n\r\n #On crée deux tableaux qui A2 et B2 qui contiendront les chaines alignées par l'algorithme.\r\n A2,B2 = [],[]\r\n\r\n #On crée le tableau parcours qui remonte la matrice M\r\n parcours = []\r\n\r\n while ((i,j) != (2,2) and (i,j) != (2,1) and (i,j) != (1,2)):\r\n if chemin[i][j][0] == 0: #Remarque : parfois, plusieurs chemins sont possibles mais on n'en calcule qu'un seul\r\n parcours.append(0)\r\n i-=1\r\n j-=1\r\n elif chemin[i][j][0] == 1:\r\n parcours.append(1)\r\n i-=1\r\n elif chemin[i][j][0] == 2:\r\n parcours.append(2)\r\n j-=1\r\n else:\r\n break\r\n\r\n parcours = parcours[::-1]\r\n\r\n #Selon la dernière case atteinte on ajoute le premier caractère de A2 et B2\r\n\r\n if (i,j) == (2,2):\r\n A2.append(self.A[0])\r\n B2.append(self.B[0])\r\n\r\n elif (i,j) == (2,1):\r\n A2.append('=')\r\n B2.append(self.B[0])\r\n\r\n elif (i,j) == (1,2):\r\n A2.append(self.A[0])\r\n B2.append('=')\r\n\r\n #On suit ensuite le parcours que l'on a trouvé pour remplir A2 et B2\r\n\r\n for a in parcours:\r\n if a == 0:\r\n i+=1\r\n j+=1\r\n A2.append(self.A[j-2])\r\n B2.append(self.B[i-2])\r\n\r\n elif a == 1:\r\n i+=1\r\n A2.append('=')\r\n B2.append(self.B[i-2])\r\n\r\n elif a == 2:\r\n j+=1\r\n A2.append(self.A[j-2])\r\n B2.append('=')\r\n\r\n A2=\"\".join(A2)\r\n B2=\"\".join(B2)\r\n\r\n #On crée les attributs\r\n\r\n self.A2 = A2\r\n self.B2 = B2\r\n self.distance = distance", "title": "" }, { "docid": "c053d62355c05e23f4e999b4d150af0d", "score": "0.54583997", "text": "def dmp1(self):", "title": "" }, { "docid": "08a3fd3ddfab086f29c9b91351101572", "score": "0.54527193", "text": "def problem71():", "title": "" }, { "docid": "6e28626beacb18d4efe422b3002499ea", "score": "0.5451952", "text": "def __reduce__():", "title": "" }, { "docid": "d9f2602cbdeb4a767539061d1eef6197", "score": "0.5442272", "text": "def _pre_of_new(n1, n2):\n n1_class_counts = n1.class_counts()\n n2_class_counts = n2.class_counts()\n tp, fp = 0.0, 0.0\n n1_total = sum([count for c, count in n1_class_counts.items()\n if c != 'None'])\n n2_total = sum([count for c, count in n2_class_counts.items()\n if c != 'None'])\n\n for c1, count1 in n1_class_counts.items():\n if c1 != 'None':\n tp += (count1 * (count1 - 1.0)) / 2.0\n fp += count1 * (n1_total - count1) / 2.0\n for c2, count2 in n2_class_counts.items():\n if c2 != 'None':\n tp += (count2 * (count2 - 1.0)) / 2.0\n fp += count2 * (n2_total - count2) / 2.0\n\n for c1, count1 in n1_class_counts.items():\n for c2, count2 in n2_class_counts.items():\n if c1 != 'None' and c1 == c2:\n tp += count1 * count2\n elif c1 != 'None' and c2 != 'None' and c1 != c2:\n fp += count1 * count2\n\n # TODO(AK): probably want more assertions.\n assert tp + fp > 0.0\n assert tp / (tp + fp) <= 1.0\n assert tp / (tp + fp) >= 0.0\n\n return tp / (tp + fp)", "title": "" }, { "docid": "11725e0d4aa23fdfc0173c1231d04274", "score": "0.5415706", "text": "def _compute(self):", "title": "" }, { "docid": "f45a6995f008fe558e0b1a713dee2ae9", "score": "0.540002", "text": "def _algorithm(head, rel, tail):\n\t\thead = layers.l2_normalize(head, axis=-1)\n\t\trel = layers.l2_normalize(rel, axis=-1)\n\t\ttail = layers.l2_normalize(tail, axis=-1)\n\t\tscore = head + rel - tail\n\t\treturn score", "title": "" }, { "docid": "b4f08e760333598e2f7fc5ae9c2501cf", "score": "0.5388665", "text": "def algorithm():\n list_how_much_0_owes_1_with_amount_owed, list_friend = generate_data()\n\n print(\"#\" * 100)\n print(\"Algorithm\")\n print(\"#\" * 100)\n print()\n\n dict_k_pair_v_owed = defaultdict(int)\n\n # Algorithm part 1\n for i in list_how_much_0_owes_1_with_amount_owed:\n dict_k_pair_v_owed[i[0]] += i[1]\n\n # Print to show work\n print(\"Dict of Who owes Who (total amount owed)\")\n for pair, amount_owed in dict_k_pair_v_owed.items():\n print(pair, amount_owed)\n print()\n\n dict_k_pair_v_owed_total = dict()\n\n set_frozenset_used = set()\n\n # Algorithm part 2\n for pair, amount_owed in dict_k_pair_v_owed.items():\n\n frozenset_pair = frozenset(pair)\n\n if frozenset_pair not in set_frozenset_used:\n set_frozenset_used.add(frozenset_pair)\n dict_k_pair_v_owed_total[pair] = amount_owed\n continue\n\n else:\n\n pair_reversed = tuple(reversed(pair))\n\n value_of_pair_reversed = dict_k_pair_v_owed_total.get(pair_reversed)\n\n if amount_owed > value_of_pair_reversed:\n dict_k_pair_v_owed_total.pop(pair_reversed)\n dict_k_pair_v_owed_total[pair] = amount_owed - value_of_pair_reversed\n\n else:\n dict_k_pair_v_owed_total[pair_reversed] = dict_k_pair_v_owed_total[pair_reversed] - amount_owed\n\n # Print to show solution\n print(\"How much money friend index 0 owes friend index 1\")\n for pair, amount_owed in dict_k_pair_v_owed_total.items():\n print(pair, amount_owed)\n print()\n\n print(\"Proof that the Maximum amount transactions needed total is <= Combinations of size 2\")\n print(\"Combination: {} CHOOSE {} = {}\".format(len(list_friend), 2,\n math.comb(len(list_friend), 2)))\n print(\"Size of the result of my algorithm {}\".format(len(dict_k_pair_v_owed_total)))", "title": "" }, { "docid": "a0af3ed211bb6078ebf77275abc84820", "score": "0.5388352", "text": "def simplifyForFc(self):\n# nTdebug('Starting simplifyForFc for\\n:%r' % ( self ) )\n atomPairIdxJ = len(self.atomPairs) # starting from the end.\n while atomPairIdxJ > 1:\n atomPairIdxJ -= 1\n atomPairJ = self.atomPairs[atomPairIdxJ]\n atomPairJset = set(atomPairJ) # Important to use api of unsorted atoms in pair (left right will not matter)\n atom0J = atomPairJ[0]\n atom1J = atomPairJ[1]\n\n# nTdebug('For atomPairIdxJ %d using atoms J %s and %s' % ( atomPairIdxJ, atom0J, atom1J) )\n # speed up check on J as an early abort clause.\n if not (atom0J.hasPseudoAtom() or atom1J.hasPseudoAtom()):\n if not (atom0J.getPseudoOfPseudoAtom() or atom1J.getPseudoOfPseudoAtom()):\n# nTdebug('Skipping restraint without pseudo representing J atoms')\n continue\n\n for atomPairIdxI in range(atomPairIdxJ): # Compare only with the previous atom pairs\n atomPairI = self.atomPairs[atomPairIdxI]\n _atom0I = atomPairI[0]\n _atom1I = atomPairI[1]\n# nTdebug(' Using atoms I %s and %s' % ( atom0I, atom1I) )\n atomPairIset = set(atomPairI)\n atomPairIntersection = atomPairIset.intersection(atomPairJset)\n if not atomPairIntersection:\n# nTdebug(' No intersection')\n continue\n\n# At this point it is certain that there is an intersection of at least one atom between the two pairs.\n if len(atomPairIntersection) != 1:\n# nTdebug('More than one atom in atom set intersection: %s' % atomPairIntersection)\n continue\n\n atomInCommon = atomPairIntersection.pop() # get arbitrary element of set.\n atomIinCommonIdx = 0\n atomJinCommonIdx = 0\n atomItoMergeIdx = 1\n atomJtoMergeIdx = 1\n if atomPairI[atomIinCommonIdx] != atomInCommon:\n atomIinCommonIdx = 1\n atomItoMergeIdx = 0\n if atomPairJ[atomJinCommonIdx] != atomInCommon:\n atomJinCommonIdx = 1\n atomJtoMergeIdx = 0\n\n # Now we know which atoms are in common and consequently the others should be tried to merge.\n# nTdebug(' atominCommonIdx I %d and J %d for %s' % ( atomIinCommonIdx, atomJinCommonIdx, atomInCommon) )\n\n atomItoMerge = atomPairI[atomItoMergeIdx]\n atomJtoMerge = atomPairJ[atomJtoMergeIdx]\n\n atomIinCommon = atomPairI[atomIinCommonIdx]\n atomJinCommon = atomPairJ[atomJinCommonIdx]\n\n# nTdebug(' atomIinCommon %s == atomJinCommon %s' % ( atomIinCommon, atomJinCommon ))\n if atomIinCommon != atomJinCommon:\n nTcodeerror(' atoms toMerge I %s and J %s differ.' % ( atomItoMerge, atomJtoMerge) )\n continue\n # end if\n\n if atomItoMerge.getStereoPartner() != atomJtoMerge:\n# nTdebug(' atoms toMerge I %s and J %s have different parent if at all related.' % ( atomItoMerge, atomJtoMerge) )\n continue\n # end if\n\n pseudoOfAtom = atomItoMerge.pseudoAtom()\n if not pseudoOfAtom:\n# nTdebug(' no pseudo for this atom %s' % atomItoMerge)\n pseudoOfAtom = atomItoMerge.getPseudoOfPseudoAtom()\n if not pseudoOfAtom:\n nTwarning(' no pseudo of pseudoatom %s' % atomItoMerge) # happens in 1y0j for <Atom A.VAL205.CG1>\n continue\n # end if\n # end if\n\n# nTdebug( \" New pop atom: %s\" % pseudoOfAtom)\n # Change I maintaining order\n atomPairINewList = list(atomPairI)\n atomPairINewList[atomItoMergeIdx] = pseudoOfAtom\n self.atomPairs[atomPairIdxI] = tuple(atomPairINewList)\n# nTdebug(\"Now self.atomPairs[atomPairIdxI]: %s\" % str(self.atomPairs[atomPairIdxI]))\n # Remove J\n# nTdebug(\"Removing self.atomPairs[atomPairIdxJ]: %s\" % str(self.atomPairs[atomPairIdxJ]))\n del self.atomPairs[atomPairIdxJ]\n # Return quickly to keep code to the left (keep it simple).\n# nTdebug('Simplified.')\n return self.STATUS_SIMPLIFIED\n # end for\n # end while\n# nTdebug('Not simplified.')\n return self.STATUS_NOT_SIMPLIFIED", "title": "" }, { "docid": "07dcb95bbef275a97b442069bf6c8503", "score": "0.53762484", "text": "def session_00015_line_271():", "title": "" }, { "docid": "28152b516c23824e3c845d7e214752ab", "score": "0.53493476", "text": "def session_00016_line_284():", "title": "" }, { "docid": "acf9d8d1a0ee3271da4fc4caa5c018f1", "score": "0.5344391", "text": "def experiment16(l):\n s = {x:() for x in l}\n s[l[0]] = (0,l[0])\n s[l[1]] = (0,l[1])\n for a in l:\n for x in s:\n if(a-x in s and a-x != x):\n s[a] = (x,a-x) if x < a-x else (a-x,x)\n break\n\n #print(s)\n ans = {l[0]:(1,0),l[1]:(0,1)}\n for a in l[2:]:\n first = ans[s[a][0]]\n second = ans[s[a][1]]\n ans[a] = (first[0]+second[0],first[1]+second[1])\n for a in l:\n print(a,ans[a],ans[a][0]+ans[a][1],(ans[a][0]/(ans[a][0]+ans[a][1]),ans[a][1]/(ans[a][0]+ans[a][1])) if ans[a][0]+ans[a][1] > 0 else 0)", "title": "" }, { "docid": "35ae811da44ebf915cc56d74d00527b2", "score": "0.53190964", "text": "def apply(self):", "title": "" }, { "docid": "7c8658c79593f0f6a735f770582e580c", "score": "0.5302779", "text": "def adderExample():\n class Adder(Algorithm):\n _inputs_=('a','b')\n _outputs_=('c',)\n def __init__(self,name):\n self.name=name\n self.a=Variable(0)\n self.b=Variable(0)\n self.c=Variable(0)\n Algorithm.__init__(self)\n self.enabled.value=True\n \n def update(self):\n self.c.value=self.a.value+self.b.value\n\n a1=Adder(\"a1\")\n a2=Adder(\"a2\")\n i1,i2,i3=Variable(0),Variable(0),Variable(0)\n\n a2.c.observe(pp(\"a2.c value\"))\n a2.c.blocked.observe(pp(\"a2.c blocked\"))\n a1.c.observe(pp(\"a1.c value\"))\n a1.c.blocked.observe(pp(\"a1.c blocked\"))\n\n a2.a.track_variable(a1.c)\n a1.a.track_variable(i1)\n a1.b.track_variable(i2)\n a2.b.track_variable(i3)\n\n return ((i1,i2,i3),(a1,a2))", "title": "" }, { "docid": "2f3ffaa7d856f2c0a6f8092d299351a9", "score": "0.52935946", "text": "def C(n):\n rep = set()\n #nbre = 0\n if n%2 == 0:\n n2 = n//2\n for k in range(1, int(((n-2)/4)**0.5)+1):\n for a in range(1, (n2-1-2*k*(k-1))//(2*k)+1):\n if k == 1:\n for b in range(1, a+1):\n num = n2 - a*b\n if num%(a+b) == 0 and num >= (a+b) and num//(a+b) <= b:\n c = (n2-a*b)//(a+b)\n rep.add((a,b,c))\n #nbre += 1\n else:\n \n alpha = (a+2*(k-1))*4\n beta = (2*(k-1)*(a+k-2)-n2)*4\n #print(k, a, n2, alpha, beta)\n #D = s**2 + s*alpha + beta\n Dmin = max(2**2 + 2*alpha + beta, 1)\n Dmax = max((2*a)**2 + (2*a)*alpha + beta, 1)\n #print(Dmin, Dmax)\n for d, D in iterCarres(Dmin, Dmax):\n discr = alpha**2-4*(beta-D)\n sqdiscr = int(discr**0.5)\n if discr >= 0 and discr == sqdiscr**2 and sqdiscr%2 == 0:\n s = (sqdiscr-alpha)//2\n if (s+d)%2 == 0:\n c1 = (s+d)//2\n b = s-c1\n if N(a,b, c1, k) == n:\n if c1>=1 and b >= c1 and a >= b:\n rep.add((a,b,c1))\n\n c2 = (s-d)//2\n b = s-c2\n if N(a,b, c2, k) == n:\n if c2>= 1 and b >= c2 and a >= b:\n rep.add((a,b,c2))\n \n return len(rep) #nbre", "title": "" }, { "docid": "a4fd3a08868433e46ed0e4b23c847786", "score": "0.5275122", "text": "def get_proof_recursive(self, a, b, P, g, h, n):\n if n == 1:\n #return the tuple: a', b', L[], R[]\n #note total size is 2 * scalar_size + log(n) * 2 * point_size\n return (a[0], b[0], self.L, self.R)\n #Split the existing vectors into halves\n aL, aR = halves(a)\n bL, bR = halves(b)\n gL, gR = halves(g)\n hL, hR = halves(h)\n self.L.append(IPC(aL, bR, g=gR, h=hL, u=self.U).get_commitment())\n self.R.append(IPC(aR, bL, g=gL, h=hR, u=self.U).get_commitment())\n x, xb, x_sq, x_sqb, xinv, xinvb, x_sq_inv, x_sq_invb = self.fiat_shamir(\n self.L[-1], self.R[-1], P)\n #Construct change of coordinates for base points, and for vector terms\n gprime = []\n hprime = []\n aprime = []\n bprime = []\n for i in range(n/2):\n gprime.append(add_pubkeys([multiply(xinvb, g[i], False),\n multiply(xb, g[i+n/2], False)], False))\n hprime.append(add_pubkeys([multiply(xb, h[i], False),\n multiply(xinvb, h[i+n/2], False)], False))\n aprime.append(encode((x * decode(a[i],\n 256) + xinv * decode(a[i + n/2], 256)) % N, 256, 32))\n bprime.append(encode((xinv * decode(b[i],\n 256) + x * decode(b[i + n/2], 256)) % N, 256, 32))\n \n Pprime = add_pubkeys([P, multiply(x_sqb, self.L[-1], False),\n multiply(x_sq_invb, self.R[-1], False)], False)\n return self.get_proof_recursive(aprime, bprime, Pprime, gprime, hprime, n/2)", "title": "" }, { "docid": "3de60c7cd308e819780acb450afe72b1", "score": "0.52667046", "text": "def session_00019_line_331():", "title": "" }, { "docid": "868643b5fd5c2c91f5ca74c33e0a98e9", "score": "0.52657175", "text": "def jaro_comp(val1, val2):\n\n # If at least one of the values is empty return 0\n #\n if (val1 == '') or (val2 == ''):\n return 0.0\n\n # If both attribute values exactly match return 1\n #\n elif (val1 == val2):\n return 1.0\n\n len1 = len(val1) # Number of characters in val1\n len2 = len(val2) # Number of characters in val2\n\n halflen = int(max(len1, len2) / 2) - 1\n\n assingment1 = '' # Characters assigned in val1\n assingment2 = '' # Characters assigned in val2\n\n workstr1 = val1 # Copy of original value1\n workstr2 = val2 # Copy of original value1\n\n common1 = 0 # Number of common characters\n common2 = 0 # Number of common characters\n\n for i in range(len1): # Analyse the first string\n start = max(0, i - halflen)\n end = min(i + halflen + 1, len2)\n index = workstr2.find(val1[i], start, end)\n if (index > -1): # Found common character, count and mark it as assigned\n common1 += 1\n assingment1 = assingment1 + val1[i]\n print(assingment1)\n for i in range(len2): # Analyse the second string\n start = max(0, i - halflen)\n end = min(i + halflen + 1, len1)\n index = workstr1.find(val2[i], start, end)\n if (index > -1): # Found common character, count and mark it as assigned\n common2 += 1\n assingment2 = assingment2 + val2[i]\n print(assingment2)\n\n if (common1 != common2):\n common1 = float(common1 + common2) / 2.0\n\n if (common1 == 0): # No common characters within half length of strings\n return 0.0\n\n transposition = 0 # Calculate number of transpositions\n\n for i in range(len(assingment1)):\n if (assingment1[i] != assingment2[i]):\n print(assingment1[i],assingment2[i])\n transposition += 1\n print(transposition)\n transposition = transposition / 2.0\n\n\n common1 = float(common1)\n\n jaro_sim = 1. / 3. * (common1 / float(len1) + common1 / float(len2) + \\\n (common1 - transposition) / common1)\n\n assert (jaro_sim >= 0.0) and (jaro_sim <= 1.0), \\\n 'Similarity weight outside 0-1: %f' % (jaro_sim)\n\n return jaro_sim", "title": "" }, { "docid": "28968c35b13c6be24bdb84e08c4929b0", "score": "0.52598137", "text": "def exo2():\n M1 = MWT\n for j in log2(n): -1: 1:\n p = n/ 2^(j)\n sel = 1: p\n sel1 = 1: 2*p\n selw = p + 1: 2*p\n % average/ difference along X\n A = M1(sel, sel1, sel1)\n D = M1(selw, sel1, sel1)\n M1(1: 2: 2*p, sel1, sel1) = (A + D)/ sqrt(2)\n M1(2: 2: 2*p, sel1, sel1) = (A-D)/ sqrt(2)\n % average/ difference along Y\n A = M1(sel1, sel, sel1)\n D = M1(sel1, selw, sel1)\n M1(sel1, 1: 2: 2*p, sel1) = (A + D)/ sqrt(2)\n M1(sel1, 2: 2: 2*p, sel1) = (A-D)/ sqrt(2)\n % average/ difference along Z\n A = M1(sel1, sel1, sel)\n D = M1(sel1, sel1, selw)\n M1(sel1, sel1, 1: 2: 2*p) = (A + D)/ sqrt(2)\n M1(sel1, sel1, 2: 2: 2*p) = (A-D)/ sqrt(2)", "title": "" }, { "docid": "7a6c9f4d3281606a633eebb790432840", "score": "0.52573293", "text": "def _compute_shunts(self):", "title": "" }, { "docid": "1b92579afc41b08f1858d2c3d72151b7", "score": "0.52570015", "text": "def tql2(n, V, d, e):\n for i in range(1, n):\n e[i-1] = e[i]\n\n e[n-1] = 0.0\n\n f = 0.0\n tst1 = 0.0\n eps = math.pow(2.0, -52.0)\n\n for l in range(n):\n tst1 = max(tst1, abs(d[l]) + abs(e[l]))\n m = l\n\n while m < n:\n if abs(e[m]) <= eps*tst1:\n break\n m += 1\n\n if m > l:\n iter = 0\n\n while True:\n iter += 1\n g = d[l]\n p = (d[l+1] - g) / (2.0*e[l])\n r = (p**2 + 1)**0.5\n #r = hypot(p, 1.0)\n\n if p < 0:\n r = -r\n\n d[l] = e[l] / (p + r)\n d[l+1] = e[l] * (p + r)\n dl1 = d[l+1]\n h = g - d[l]\n\n for i in range(l+2, n):\n d[i] -= h\n\n f += h\n p = d[m]\n c = 1.0\n c2 = c\n c3 = c\n el1 = e[l+1]\n s = 0.0\n s2 = 0.0\n\n for i in range(m-1, l-1, -1):\n c3 = c2\n c2 = c\n s2 = s\n g = c*e[i]\n h = c*p\n r = (p**2 + e[i]**2)**0.5\n #r = hypot(p, e[i])\n e[i+1] = s*r\n s = e[i] / r\n c = p / r\n p = c*d[i] - s*g\n d[i+1] = h + s*(c*g + s*d[i])\n\n for k in range(n):\n h = V[k][i+1]\n V[k][i+1] = s*V[k][i] + c*h\n V[k][i] = c*V[k][i] - s*h\n\n p = -s*s2*c3*el1*e[l] / dl1\n e[l] = s*p\n d[l] = c*p\n\n if abs(e[l]) <= eps*tst1:\n break\n\n d[l] = d[l] + f\n e[l] = 0.0\n\n for i in range(n-1):\n k = i\n p = d[i]\n\n for j in range(i+1, n):\n if d[j] < p:\n k = j\n p = d[j]\n\n if k != i:\n d[k] = d[i]\n d[i] = p\n\n for j in range(n):\n p = V[j][i]\n V[j][i] = V[j][k]\n V[j][k] = p\n\n return V, d, e", "title": "" }, { "docid": "cd5220d5508a48e8737105db31a86ac5", "score": "0.5245568", "text": "def ProdScalaire(V1, V2):\r\n (Vx1, Vy1, Vz1) = V1\r\n (Vx2, Vy2, Vz2) = V2\r\n return(Vx1 * Vx2 + Vy1 * Vy2 + Vz1 * Vz2)", "title": "" }, { "docid": "7526d12deeebb0ea2f8f662a7ccbd451", "score": "0.52440774", "text": "def arrGen(P,V): #Let P and V be sorted lists\n\t#Does so using the recursive method described in the paper.\n\tif len(P) == 0: yield V; return\n\tif len(P) == 1: [v1,v2] = V; yield [v1,*P,v2]; return #This is for optimization.\n\tP = P.copy()\n\tp = P.pop(0)#P is now P', stored under the same variable for efficiency\n\tV1 = list(filter(lambda i: i < p, V)) #this is Vp from the notes\n\tV2 = V.copy() #this is just for optimization\n\tV2.insert(V.index(max(V1)) + 1, p)\n\tfor [v1,v2] in subsetIter(V1, 2):\n\t\tV3 = V2.copy() #this will be V' from the notes\n\t\tV3.remove(v1)\n\t\tV3.remove(v2)\n\t\tfor a in arrGen(P,V3): #here a' is selected\n\t\t\tj = a.index(p)\n\t\t\ta.insert(j+1,v2) #this line, and the following produce a, corresponding to a'.\n\t\t\ta.insert(j,v1)\n\t\t\tyield a", "title": "" }, { "docid": "92e933f1d02b3385ea7afcd7f1e1d33f", "score": "0.52235883", "text": "def compute_C(self):", "title": "" }, { "docid": "9a68215d37a9be7e0ff915870d129ea6", "score": "0.52072394", "text": "def Map( V, S1, S2, D1, D2):\r\n if (abs(S2 - S1) <= Epsilon):\r\n # print( (S1, S2, D1, D2))\r\n a = 1\r\n b = (D1 + D2) / 2.\r\n else :\r\n a = (D2 - D1) / (S2 - S1)\r\n b = D1 - a * S1\r\n return(a*V+b)", "title": "" }, { "docid": "be8db9369933e27e9da228826a115a82", "score": "0.51992214", "text": "def _k( s1, s2, embs, maxlen, _gap_decay, _match_decay, _order_coefs,D,dD_dgap):\n\tS = embs[s1] @ embs[s2].T\n\t# Main loop, where Kp, Kpp values are calculated.\n\tKp = np.ones(shape=(len(_order_coefs), maxlen, maxlen))\n\tdKp_dgap = np.zeros(shape=(len(_order_coefs), maxlen, maxlen))\n\tdKp_dmatch = np.zeros(shape=(len(_order_coefs), maxlen, maxlen))\n\tmatch_sq = _match_decay * _match_decay\n\n\n\tfor i in range(len(_order_coefs)-1):\n\t\taux1 = S * Kp[i]\n\t\taux2 = np.dot(aux1,D)\n\t\tKpp = match_sq * aux2\n\t\tKp[i + 1] = Kpp.T.dot(D).T\n\n\t\tdaux1_dgap = S * dKp_dgap[i]\n\t\tdaux2_dgap = daux1_dgap.dot(D) + aux1.dot(dD_dgap)\n\t\tdKpp_dgap = match_sq * daux2_dgap\n\t\tdKp_dgap[i + 1] = dKpp_dgap.T.dot(D).T + Kpp.T.dot(dD_dgap).T\n\n\t\tdaux1_dmatch = S * dKp_dmatch[i]\n\t\tdaux2_dmatch = daux1_dmatch.dot(D)\n\t\tdKpp_dmatch = (match_sq * daux2_dmatch) + (2 * _match_decay * aux2)\n\t\tdKp_dmatch[i + 1] = dKpp_dmatch.T.dot(D).T\n\t\t\t\n\n\t\t\t\n\t#Final calculation\n\tfinal_aux1 = S * Kp\n\tfinal_aux2 = np.sum(final_aux1, axis=1)\n\tfinal_aux3 = np.sum(final_aux2, axis=1)\n\tKi = match_sq * final_aux3\n\tk = Ki.dot(_order_coefs)\n\n\tfinal_daux1_dgap = S * dKp_dgap\n\tfinal_daux2_dgap = np.sum(final_daux1_dgap, axis=1)\n\tfinal_daux3_dgap = np.sum(final_daux2_dgap, axis=1)\n\tdKi_dgap = match_sq * final_daux3_dgap\n\tdk_dgap = dKi_dgap.dot(_order_coefs)\n\t\n\tfinal_daux1_dmatch = S * dKp_dmatch\n\tfinal_daux2_dmatch = np.sum(final_daux1_dmatch, axis=1)\n\tfinal_daux3_dmatch = np.sum(final_daux2_dmatch, axis=1)\n\tdKi_dmatch = match_sq * final_daux3_dmatch + (2 * _match_decay *final_aux3)\n\tdk_dmatch = dKi_dmatch.dot(_order_coefs)\n\n\tdk_dcoefs = Ki\n\treturn k, dk_dgap, dk_dmatch, dk_dcoefs", "title": "" }, { "docid": "f61dff72706e2134fc21c365b4e959ff", "score": "0.51923126", "text": "def wordseg_soft_acc(list_a: List[Tuple[str, str]],\n list_b: List[Tuple[str, str]]) -> float:\n i, j = 0, 0\n acc = 0.0\n a_l, b_l = 0, 0\n while i < len(list_a) and j < len(list_b):\n a_r = a_l + len(list_a[i][0]) - 1\n b_r = b_l + len(list_b[j][0]) - 1\n if a_r < b_l:\n i += 1\n a_l = a_r + 1\n continue\n if b_r < a_l:\n j += 1\n b_l = b_r + 1\n continue\n if a_l == b_l and a_r == b_r:\n acc += 1.0\n a_l, b_l = a_r + 1, b_r + 1\n i, j = i + 1, j + 1\n continue\n if a_l == b_l and a_r < b_r:\n cnt = 0.0\n tmp_a_r = a_r\n for k in range(i + 1, len(list_a)):\n tmp_a_r += len(list_a[k])\n cnt += 1.0\n if tmp_a_r == b_r:\n acc += cnt\n i, j = k + 1, j + 1\n a_l, b_l = tmp_a_r + 1, b_r + 1\n break\n i += 1\n continue\n if a_l == b_l and a_r > b_r:\n tmp_b_r = b_r\n for k in range(j + 1, len(list_b)):\n tmp_b_r += len(list_b[k])\n if tmp_b_r == a_r:\n acc += 1.0\n i, j = i + 1, k + 1\n a_l, b_l = a_r + 1, tmp_b_r + 1\n break\n j += 1\n continue\n i += 1\n return acc", "title": "" }, { "docid": "ef663699bebafa139a1bba305061b5c7", "score": "0.5191531", "text": "def RBO(l1, l2, p=0.9):\n if l1 == None: l1 = []\n if l2 == None: l2 = []\n \n sl,ll = sorted([(len(l1), l1),(len(l2),l2)])\n s, S = sl\n l, L = ll\n if s == 0: return 0\n\n # Calculate the overlaps at ranks 1 through l \n # (the longer of the two lists)\n ss = set([]) # contains elements from the smaller list till depth i\n ls = set([]) # contains elements from the longer list till depth i\n x_d = {0: 0}\n sum1 = 0.0\n for i in range(l):\n x = L[i]\n y = S[i] if i < s else None\n d = i + 1\n \n # if two elements are same then \n # we don't need to add to either of the set\n if x == y: \n x_d[d] = x_d[d-1] + 1.0\n # else add items to respective list\n # and calculate overlap\n else: \n ls.add(x) \n if y != None: ss.add(y)\n x_d[d] = x_d[d-1] + (1.0 if x in ss else 0.0) + (1.0 if y in ls else 0.0) \n #calculate average overlap\n sum1 += x_d[d]/d * pow(p, d)\n \n sum2 = 0.0\n for i in range(l-s):\n d = s+i+1\n sum2 += x_d[d]*(d-s)/(d*s)*pow(p,d)\n\n sum3 = ((x_d[l]-x_d[s])/l+x_d[s]/s)*pow(p,l)\n\n # Equation 32\n rbo_ext = (1-p)/p*(sum1+sum2)+sum3\n return rbo_ext", "title": "" }, { "docid": "47ef706b1db43359e2d82e0a3a59b00b", "score": "0.51862395", "text": "def alias_setup(probs):\r\n\r\n K = len(probs)\r\n\r\n q = np.zeros(K)\r\n\r\n J = np.zeros(K, dtype=np.int)\r\n\r\n\r\n\r\n smaller = []\r\n\r\n larger = []\r\n\r\n for kk, prob in enumerate(probs):\r\n\r\n q[kk] = K * prob\r\n\r\n if q[kk] < 1.0:\r\n\r\n smaller.append(kk)\r\n\r\n else:\r\n\r\n larger.append(kk)\r\n\r\n\r\n\r\n while len(smaller) > 0 and len(larger) > 0:\r\n\r\n small = smaller.pop()\r\n\r\n large = larger.pop()\r\n\r\n\r\n\r\n J[small] = large\r\n\r\n q[large] = q[large] + q[small] - 1.0\r\n\r\n if q[large] < 1.0:\r\n\r\n smaller.append(large)\r\n\r\n else:\r\n\r\n larger.append(large)", "title": "" }, { "docid": "1055e573c00b7c7ac204856f290fdec0", "score": "0.51753867", "text": "def SOL_2(x,y):\n n = len(x)\n m = len(y)\n if (n>1) and (m>=1):\n i = abs(n)//2\n j = coupure(x,y)\n # print(\"la coupure pour x={} y={} i:{} j:{}\".format(x,y,i,j))\n SOL_2(x[0:i],y[0:j])\n SOL_2(x[i:],y[j:])\n elif(n==1) and (m>1):\n a=align_lettre_mot(x,y)\n for letterX in a[0]:\n XA.append(letterX)\n for letterY in y:\n YA.append(letterY)\n else:\n # print(\"fina; n:{} m:{} {} {}\".format(n,m,x,y))\n if(n==0):\n for letterY in y:\n XA.append('-')\n else:\n for letterX in x:\n XA.append(letterX)\n if(m==0):\n for letterX in x:\n YA.append('-')\n else:\n for letterY in y:\n YA.append(letterY)", "title": "" }, { "docid": "d4107ab43c798a5efb09b250d79f4741", "score": "0.5173423", "text": "def subproblem2(p, q, k1, k2):\n \n eps = np.finfo(np.float64).eps\n norm = np.linalg.norm\n \n k12 = np.dot(k1, k2)\n pk = np.dot(p, k2)\n qk = np.dot(q, k1)\n \n # check if solution exists\n if (np.abs( 1 - k12**2) < eps):\n warnings.warn(\"No solution - k1 != k2\")\n return []\n \n a = np.matmul([[k12, -1], [-1, k12]],[pk, qk]) / (k12**2 - 1)\n \n bb = (np.dot(p,p) - np.dot(a,a) - 2*a[0]*a[1]*k12)\n if (np.abs(bb) < eps): bb=0\n \n if (bb < 0):\n warnings.warn(\"No solution - no intersection found between cones\")\n return []\n \n gamma = np.sqrt(bb) / norm(np.cross(k1,k2))\n if (np.abs(gamma) < eps):\n cm=np.array([k1, k2, np.cross(k1,k2)]).T\n c1 = np.dot(cm, np.hstack((a, gamma)))\n theta2 = subproblem1(k2, p, c1)\n theta1 = -subproblem1(k1, q, c1)\n return [(theta1, theta2)]\n \n cm=np.array([k1, k2, np.cross(k1,k2)]).T\n c1 = np.dot(cm, np.hstack((a, gamma)))\n c2 = np.dot(cm, np.hstack((a, -gamma)))\n theta1_1 = -subproblem1(q, c1, k1)\n theta1_2 = -subproblem1(q, c2, k1)\n theta2_1 = subproblem1(p, c1, k2)\n theta2_2 = subproblem1(p, c2, k2)\n return [(theta1_1, theta2_1), (theta1_2, theta2_2)]", "title": "" }, { "docid": "a6c7324e35da0dc2054d14493a83dca8", "score": "0.5165419", "text": "def solve(self):\n raise NotImplementedError('This is the method every algorithm has to implement')", "title": "" }, { "docid": "7b4b46718148a90cc0e80aa62cf09580", "score": "0.51627547", "text": "def ration(a, b, line: int, spot: int, iii: int, target: str) -> float:\n\n d1 = pd.read_csv(\"line \"+str(line)+\" Point \"+str(spot)+\" iteration \"+str(iii)+\" foreground1D.csv\")\n d2 = pd.read_csv(\"line \"+str(line)+\" Point \"+str(spot)+\" iteration \"+str(iii)+\" foreground2D.csv\")\n d1_ = pd.read_csv('../../data/background1D.csv')\n d2_ = pd.read_csv('../../data/background1D.csv')\n \n d1['I'] = d1['I'] - d1_['I']\n base1 = peakutils.baseline(d1['I'], 1)\n d1['I_base'] = d1['I'] - base1\n d1 = d1[(d1['W'] > 1220) & (d1['W'] < 1750)]\n\n d2['I'] = d2['I'] - d2_['I']\n d2 = d2[(d2['W'] > 2550) & (d2['W'] < 2850)]\n d2 = d2[(np.abs(stats.zscore(d2)) < 3).all(axis = 1)]\n base2 = peakutils.baseline(d2['I'], 1)\n d2['I_base'] = d2['I'] - base2\n \n def PseudoVoigtFunction(WavNr, Pos, Amp, GammaL, FracL):\n SigmaG = GammaL / np.sqrt(2*np.log(2)) # Calculate the sigma parameter for the Gaussian distribution from GammaL (coupled in Pseudo-Voigt)\n LorentzPart = Amp * (GammaL**2 / ((WavNr - Pos)**2 + GammaL**2)) # Lorentzian distribution\n GaussPart = Amp * np.exp( -((WavNr - Pos)/SigmaG)**2) # Gaussian distribution\n Fit = FracL * LorentzPart + (1 - FracL) * GaussPart # Linear combination of the two parts (or distributions)\n return Fit\n\n def one_pv(pars, x, data=None, eps=None): #Function definition\n # unpack parameters, extract .value attribute for each parameter\n a3 = pars['a3'].value\n c3 = pars['c3'].value\n s3 = pars['s3'].value\n f3 = pars['f3'].value\n\n peak1 = PseudoVoigtFunction(x.astype(float),c3, a3, s3, f3)\n\n model = peak1 # The global model is the sum of the Gaussian peaks\n\n if data is None: # if we don't have data, the function only returns the direct calculation\n return model, peak1\n if eps is None: # without errors, no ponderation\n return (model - data)\n return (model - data)/eps # with errors, the difference is ponderated\n\n def three_pv(pars, x, data=None, eps=None): #Function definition\n # unpack parameters, extract .value attribute for each parameter\n a1 = pars['a1'].value\n c1 = pars['c1'].value\n s1 = pars['s1'].value\n f1 = pars['f1'].value\n \n a4 = pars['a4'].value\n c4 = pars['c4'].value\n s4 = pars['s4'].value\n f4 = pars['f4'].value\n \n a2 = pars['a2'].value\n c2 = pars['c2'].value\n s2 = pars['s2'].value\n f2 = pars['f2'].value\n \n\n peak1 = PseudoVoigtFunction(x.astype(float), c1, a1, s1, f1)\n peak3 = PseudoVoigtFunction(x.astype(float), c4, a4, s4, f4)\n peak2 = PseudoVoigtFunction(x.astype(float), c2, a2, s2, f2)\n\n model = peak1 + peak3 + peak2 # The global model is the sum of the Gaussian peaks\n\n if data is None: # if we don't have data, the function only returns the direct calculation\n return model, peak1, peak3, peak2\n if eps is None: # without errors, no ponderation\n return (model - data)\n return (model - data)/eps # with errors, the difference is ponderated\n\n ps1 = Parameters()\n\n # (Name, Value, Vary, Min, Max, Expr)\n ps1.add_many(('a1', 1 , True, 0, None, None),\n ('c1', 1350, True, 1330, 1370, None),\n ('s1', 20, True, 10, 200, None), # 200 so that we get proper fit width of unpatterned peak \n ('f1', 0.5, True, 0, 1, None),\n ('a4', 1 , True, 0, None, None), # peak middle of GD\n ('c4', 1500, True, 1480, 1520, None),\n ('s4', 20, True, 10, 200, None), \n ('f4', 0.5, True, 0, 1, None),\n ('a2', 1, True, 0, None, None),\n ('c2', 1600, True, 1560, 1640, None),\n ('s2', 20, True, 10, 200, None),\n ('f2', 0.5, True, 0, 1, None))\n\n ps2 = Parameters()\n\n # (Name, Value, Vary, Min, Max, Expr)\n ps2.add_many(('a3', 1, True, 0, None, None),\n ('c3', 2700, True, 2650, 2750, None),\n ('s3', 20, True, 10, 200, None),\n ('f3', 0.5, True, 0, 1, None))\n\n x = d1['W']\n y = d1['I_base']\n out = minimize(three_pv, ps1, method = 'leastsq', args=(x, y))\n\n x2 = d2['W']\n y2 = d2['I_base']\n out2 = minimize(one_pv, ps2, method = 'leastsq', args=(x2, y2))\n\n df1 = pd.DataFrame({key: [par.value] for key, par in out.params.items()})\n df2 = pd.DataFrame({key: [par.value] for key, par in out2.params.items()})\n\n df = pd.concat([df1,df2],axis=1)\n\n if df['s1'].values > 300:\n df[['a1','c1','s1','f1']] = 0\n\n if df['s2'].values > 120:\n df[['a2','c2','s2','f2']] = 0\n\n if df['s3'].values > 120:\n df[['a3','c3','s3','f3']] = 0\n \n df.columns= ['D','PD','WD','FD','D1','PD1','WD1','FD1','G','PG','WG','FG','2D','P2D','W2D','F2D']\n \n df['GD']=df['G']/df['D']\n df['2DG']=df['2D']/df['G']\n df['file'] = ','.join(str(x) for x in [line, spot, iii])\n\n # print(df)\n # fit_results = pd.read_csv('fit_results.csv')\n # print(f\"Before FIT RESULTS:\\n {fit_results}\\n\")\n df.to_csv('fit_results.csv', header=False, index=False, mode = 'a')\n # print(f\"Appended FIT RESULTS:\\n {fit_results}\\n\")\n\n if df['WD'].values>120:\n if (df['D'].values>.3*df['G'].values or df['D1'].values > df['D'].values):\n print(\"Width D > 120 : patterning not done\")\n\n elif (df['WG'].values>120):\n print(\"Width G > 120: patterning not done\")\n df3=pd.read_csv('dataset.csv')\n df3['ratio'].replace(' ',np.nan, inplace=True)\n df4=df3.dropna(subset=[\"ratio\"])\n a=df4['ratio'].shape\n if target == '2DG':\n df3.loc[a[0],'ratio'] = df['2DG'].values[0]\n else:\n df3.loc[a[0],'ratio'] = df['GD'].values[0]\n df3.to_csv('dataset.csv',index=False)\n \n \n elif (np.mean(d1[d1['W']<1255]['I_base']) > 0.7*np.mean(d1[(d1['W']>1340) & (d1['W']<1350)]['I_base'])\\\n or np.mean(d1[(d1['W']>1400) & (d1['W']<1550)]['I_base']) > 0.7*np.mean(d1[(d1['W']>1340) & (d1['W']<1350)]['I_base'])) \\\n and (df['GD'].values[0] <= 1.2) :\n print(\"Intensity @ 1255 / 1500 abnormally high : patterning not done\")\n \n else:\n\n df3=pd.read_csv('dataset.csv')\n df3['ratio'].replace(' ',np.nan, inplace=True)\n df4=df3.dropna(subset=[\"ratio\"])\n a=df4['ratio'].shape\n if target == '2DG':\n print(f\"Extracting {target} to dataset.csv\")\n df3.loc[a[0],'ratio'] = df['2DG'].values[0]\n else:\n print(f\"Extracting {target} to dataset.csv\")\n df3.loc[a[0],'ratio'] = df['GD'].values[0]\n df3.to_csv('dataset.csv',index=False)\n \n return df['GD'].values[0]", "title": "" }, { "docid": "6c7d05c68545e26ea5cda6a20ee0879f", "score": "0.51607394", "text": "def experiment17(l,k,m):\n s = {x:() for x in l}\n s[l[0]] = (0,l[0])\n s[l[1]] = (0,l[1])\n for a in l:\n for x in s:\n if(a-x in s and a-x != x):\n s[a] = (x,a-x) if x < a-x else (a-x,x)\n break\n\n #print(s)\n coms = {x:[] for x in [0]+l}\n weird = [];\n weird2 = [];\n for a in l:\n coms[s[a][0]] += [s[a][1]]\n coms[s[a][1]] += [s[a][0]]\n for a in l:\n lo = 0\n hi = 0\n for c in coms[a]:\n if (k*c)%m < m/2:\n lo += 1\n else:\n hi += 1\n low = (k*a)%m < m/2\n if(lo != 0 and hi != 0):\n weird += [(a,lo,hi)]\n if ((low and hi > 0) or (not low and lo > 0)) and hi*lo == 0:\n weird2 += [(a,lo,hi)]\n #print(a,lo,hi,coms[a])\n print(\"WEIRD\")\n for w in weird:\n print(w)\n print(\"WEIRDER\")\n for w in weird2:\n print(w)", "title": "" }, { "docid": "a348e632b0028e6bec1239e2970714eb", "score": "0.5138175", "text": "def first_prime_over():", "title": "" }, { "docid": "301725575c75bae39124d23ee3560e96", "score": "0.5136804", "text": "def productoInternoV(v1,v2):\r\n d= daga(v1)\r\n return productoM(d,v2)", "title": "" }, { "docid": "a8294ec79b4cccfb86d57c907aaad5f9", "score": "0.5134629", "text": "def sol3(limtit) -> int:\n \n // implementation \n \n pass", "title": "" }, { "docid": "aec1ba07a9bf4264a60300ee06ad1862", "score": "0.51252824", "text": "def solve(self):\r\n pass", "title": "" }, { "docid": "aec1ba07a9bf4264a60300ee06ad1862", "score": "0.51252824", "text": "def solve(self):\r\n pass", "title": "" }, { "docid": "9cd2b705d6da14bb2e62c4c32589c575", "score": "0.511769", "text": "def pid(\n almA,\n almB,\n mode=2\n ):\n\n zipped = zip(almA,almB)\n idn_pos = 0\n int_gps = 0\n aln_pos = 0\n\n for charA,charB in zipped:\n tmp = [charA,charB].count('-')\n if tmp == 1:\n int_gps += 1\n elif tmp == 0 and charA == charB:\n idn_pos += 1\n aln_pos += 1\n elif tmp == 0:\n aln_pos += 1\n\n if mode == 2:\n try:\n return idn_pos / (aln_pos + int_gps)\n except ZeroDivisionError:\n #print('\\t'.join(almA))\n #print('\\t'.join(almB))\n #print('-----')\n return 0\n\n elif mode == 1: \n try:\n return idn_pos / aln_pos\n except ZeroDivisionError:\n #print('\\t'.join(almA))\n #print('\\t'.join(almB))\n #print('-----')\n return 0\n\n elif mode == 3:\n srt_seq = min(\n len(\n [i for i in almA if i != '-']\n ),\n len(\n [i for i in almB if i != '-']\n )\n )\n try:\n return idn_pos / srt_seq\n except ZeroDivisionError:\n #print('\\t'.join(strA))\n #print('\\t'.join(strB))\n #print('-----')\n return 0\n\n elif mode == 4:\n srt_seq = min(\n len(\n ''.join([i[0] for i in almA]).strip('-')\n ),\n len(\n ''.join([i[0] for i in almB]).strip('-')\n )\n )\n try:\n return idn_pos / srt_seq\n except ZeroDivisionError:\n print('\\t'.join(almA))\n print('\\t'.join(almB))\n print('-----')\n return 0\n\n elif mode == 5:\n \n return idn_pos / len(almA)", "title": "" }, { "docid": "7e973dd74aea32ed82aca60b17ad68e2", "score": "0.51157886", "text": "def conjecture1(n,k,r):\n vavb = sum([1/r[i] for i in range(n-2)]) >= 1/r[n-1]+1/r[n-2]-2/(r[n-1]+r[n-2])\n vavc = sum([1/(r[i]+r[n-1]) for i in range(n-1)]) >= 1/r[n-1]\n vbvc = sum([1/(r[i]+r[n-1]) for i in range(n-2)])+1/r[n-2] >= sum([1/r[i] for i in range(n-2)])+1/(r[n-2]+r[n-1])\n vbva = sum([1/r[i] for i in range(n-2)]) <= 1/r[n-1]+1/r[n-2]-2/(r[n-1]+r[n-2])\n vcva = sum([1/(r[i]+r[n-1]) for i in range(n-1)]) <= 1/r[n-1]\n vcvb = sum([1/(r[i]+r[n-1]) for i in range(n-2)])+1/r[n-2] <= sum([1/r[i] for i in range(n-2)])+1/(r[n-2]+r[n-1])\n if vavb and vavc:\n #Strategy A\n game_value = 2/sum([1/r[i] for i in range(n)])\n if game_value*10000//1 != v.varValue*10000//1:\n #*10000//1 to avoid rounding errors\n print(f\"Ouch... Value A: {game_value} != {v.varValue}\")\n for i in range(size_s):\n if len(searcher_strats[i]) == 1:\n pi = game_value/(2*r[i])\n if (x[i].varValue*10000//1)!=(pi*10000//1):\n print(\"Ouch...\")\n print(f\"{searcher_strats[i]} : {x[i].varValue} != {pi}\\n\")\n return False\n else:\n if int(x[i].varValue*10000)!=0:\n print(\"Ouch...\")\n print(f\"{searcher_strats[i]} : {x[i].varValue} != 0\\n\")\n return False\n print(\"Checked !\\n\")\n return True\n elif vbva and vbvc:\n #Strategy B\n game_value = 1/(sum([1/r[i] for i in range(n-2)])+1/(r[n-2]+r[n-1]))\n if game_value*10000//1 != v.varValue*10000//1:\n #*10000//1 to avoid rounding errors\n print(f\"Ouch... Value B: {game_value} != {v.varValue}\")\n for i in range(size_s):\n if len(searcher_strats[i]) == 1 and i<n-2:\n pi = game_value/r[i]\n if (x[i].varValue*10000//1)!=(pi*10000//1):\n print(\"Ouch...\")\n print(f\"{searcher_strats[i]} : {x[i].varValue} != {pi}\\n\")\n return False\n elif searcher_strats[i]==[n-2,n-1]:\n pi = game_value/(r[n-2]+r[n-1])\n if (x[i].varValue*10000//1)!=(pi*10000//1):\n print(\"Ouch...\")\n print(f\"{searcher_strats[i]} : {x[i].varValue} != {pi}\\n\")\n return False\n else:\n if int(x[i].varValue*10000)!=0:\n print(\"Ouch...\")\n print(f\"{searcher_strats[i]} : {x[i].varValue} != 0\\n\")\n return False\n print(\"Checked !\\n\")\n return True\n elif vcva and vcvb:\n #Strategy C\n game_value = 2/(sum([1/r[i] for i in range(n-1)])+sum([1/(r[i]+r[n-1]) for i in range(n-1)]))\n if game_value*10000//1 != v.varValue*10000//1:\n #*10000//1 to avoid rounding errors\n print(f\"Ouch... Value C: {game_value} != {v.varValue}\")\n for i in range(size_s):\n if len(searcher_strats[i]) == 1 and i!=n-1:\n pi = game_value/(2*r[i])\n if (x[i].varValue*10000//1)!=(pi*10000//1):\n print(\"Ouch...\")\n print(f\"{searcher_strats[i]} : {x[i].varValue} != {pi}\\n\")\n return False\n elif len(searcher_strats[i]) == 2 and searcher_strats[i][1]==n-1:\n pi = game_value/(2*(r[searcher_strats[i][0]]+r[searcher_strats[i][1]])) \n if (x[i].varValue*10000//1)!=(pi*10000//1):\n print(\"Ouch...\")\n print(f\"{searcher_strats[i]} : {x[i].varValue} != {pi}\\n\")\n return False\n else:\n if int(x[i].varValue*10000)!=0:\n print(\"Ouch...\")\n print(f\"{searcher_strats[i]} : {x[i].varValue} != 0\\n\")\n return False\n print(\"Checked !\\n\")\n return True", "title": "" }, { "docid": "e5ded93f33c86090f850283eff706d83", "score": "0.5114497", "text": "def pijav_bnb(problem, eps, solinfo, maxsteps):\n class Sub:\n def __init__(self, s1, s2, ival):\n self.s1 = s1\n self.s2 = s2\n self.ival = ival\n a = ival[0]\n b = ival[1]\n La = s1.S[0]\n Lb = s2.S[1]\n va = s1.value\n vb = s2.value\n self.c = (vb - va + La * a - Lb * b)/(La - Lb)\n self.bound = va + La * (self.c - a)\n boundcheck = vb + Lb * (self.c - b)\n if abs(self.bound - boundcheck) > 0.01:\n print(\"bound = \", self.bound, \", check \", boundcheck)\n print(\"a = \", a, \", b = \", b, \", va = \", va, \", vb = \", vb, \", La = \", La, \", Lb = \", Lb)\n print(\"s1 = \", s1)\n print(\"s2 = \", s2)\n\n def bnd(self):\n return self.bound\n\n def __repr__(self):\n return \"s1 = \" + str(self.s1) + \", s2 = \" + str(self.s2) + \", interval = \" + str(self.ival)\\\n + \"c =\" + str(self.c) + \", bound = \" + str(self.bound)\n\n\n f = smp.lambdify(problem.xvar, problem.fexpr)\n fslp = smp.lambdify(problem.xvar, problem.fexpr, slp)\n P = []\n ival = interval.Interval(problem.range)\n s1 = fslp(slp.Slope(ival, ival[0]))\n s2 = fslp(slp.Slope(ival, ival[1]))\n P.append(Sub(s1, s2, ival))\n fr = solinfo.value\n steps = 0\n\n # Expr.flagRecompRange = True\n while len(P) > 0 and steps <= maxsteps:\n steps = steps + 1\n sub = P.pop(0)\n if fr - sub.bnd() > eps:\n x1 = interval.Interval([sub.ival[0], sub.c])\n x2 = interval.Interval([sub.c, sub.ival[1]])\n\n # ns = fslp(slp.Slope(sub.ival, sub.c))\n # ns1 = ns\n # ns2 = ns\n\n ns1 = fslp(slp.Slope(x1, sub.c))\n ns2 = fslp(slp.Slope(x2, sub.c))\n\n\n # print(\"at \", sub.c, \" ns = \", ns)\n if ns1.value < fr:\n fr = ns1.value\n xr = sub.c\n P.append(Sub(sub.s1, ns1, x1))\n P.append(Sub(ns2, sub.s2, x2))\n solinfo.value = fr\n solinfo.x = xr\n return steps", "title": "" }, { "docid": "529ce01eb8b32601062e4cdd0fba7053", "score": "0.51092744", "text": "def A3(fn):\n#want to find a key that matches (M1, M2, M3) into (C1,C2,C3)\n\n returned_message1 = random_string(n_bytes)\n returned_message2 = random_string(n_bytes)\n returned_message3 = random_string(n_bytes)\n\n C1 = fn(returned_message1) \n C2 = fn(returned_message2) \n C3 = fn(returned_message3) \n\n #check that key is consistent with (M1,C1),(M2,C2), (M3,C3)\n for i in range(string_to_int((k_bytes)* \"\\xFF\")): #i is key\n key1 = int_to_string(i,1)\n key2_1 = xor_strings(E_I(key1,C1), returned_message1)\n key2_2 = xor_strings(E_I(key1,C2), returned_message2)\n key2_3 = xor_strings(E_I(key1,C3), returned_message3)\n if(key2_1 == key2_2 and key2_2 == key2_3):\n return key1+key2_1\n \n\n return i", "title": "" }, { "docid": "ebb9475143bdfa377f733590ad05785f", "score": "0.5108019", "text": "def main(*args):\n\n \"\"\"\n l1 = [\"1\", \"2\", \"3\", \"4\", \"2\", \"2\", \"4\", \"5\"]\n l2 = [\"3\", \"4\", \"5\"]\n l3 = [\"2\", \"4\", \"2\", \"1\", \"3\", \"3\", \"5\"]\n \"\"\"\n # Case 1 \n #l1 = [\"4\", \"4\", \"3\", \"3\", \"3\", \"2\", \"2\", \"1\"]\n #l2 = [\"3\", \"2\", \"2\"]\n #l3 = [\"4\", \"3\", \"2\", \"1\"]\n #l4 = [\"4\", \"4\", \"3\", \"3\", \"3\" ]\n #lt = [l1, l2, l3, l4]\n\n \"\"\"\n l1 = [\"2\", \"3\", \"4\"]\n l2 = [\"2\", \"4\"]\n lt = [l1, l2]\n \"\"\"\n \"\"\"\n l1 = [\"3\", \"2\", \"4\"]\n l2 = [\"4\", \"3\", \"2\"]\n lt = [l1, l2]\n \"\"\"\n \"\"\"\n l1 = [\"4\", \"2\", \"2\", \"2\", \"3\"]\n l2 = [\"2\", \"4\", \"2\", \"3\", \"2\"]\n lt = [l1, l2]\n \"\"\"\n l1 = [\"2\", \"3\", \"3\", \"3\", \"4\", \"5\"]\n l2 = [\"4\", \"3\", \"3\", \"3\", \"2\"]\n lt = [l1, l2]\n\n n = len(lt)\n jw = \"Jaro Winkler\\n\"\n st = \"Soft tf idf\\n\"\n sl = \"Sequence with t-norm lukasiewicz\\n\"\n sp = \"Sequence with t-norm product\\n\"\n for i in range(n):\n l1 = lt[i]\n for j in range(n):\n l2 = lt[j]\n jw += str((l1, l2))+\"\\t\"\n st += str((l1, l2))+\"\\t\"\n sl += str((l1, l2))+\"\\t\"\n sp += str((l1, l2))+\"\\t\"\n print(\"--------------------------------\")\n print((l1, l2))\n print(\"Jaro Winkler\", jaro_winkler(l1, l2))\n jw += str(jaro_winkler(l1, l2)) + \"\\t\"\n print(\"soft_tf_idf\", soft_tf_idf(l1, l2))\n st += str(soft_tf_idf(l1, l2)) + \"\\t\"\n print(\"sim_subseq lukasiewicz\", sim_subseq_t_norm_l(l1, l2))\n sl += str(sim_subseq_t_norm_l(l1, l2)) + \"\\t\"\n print(\"sim_subseq tnorm\", sim_subseq(l1, l2))\n sp += str(sim_subseq(l1, l2)) + \"\\t\"\n jw += \"\\n\"\n st += \"\\n\"\n sl += \"\\n\"\n sp += \"\\n\"\n\n jw +=\"\\n\"+st+\"\\n\"+sl+\"\\n\"+sp+\"\\n\"\n with open(\"results_caso4.csv\", \"w\") as fd:\n fd.write(jw)\n\n p1 = [43,96,74,38,35,43,22,56,35,80]\n p2 = [30,94,84,13,30,18,30,41,48,95]\n\n cl = SimList(p1, p2)\n print(\"testing s_rho_nomrmalized\")\n z = cl.spearmanr_rho() \n print(z)", "title": "" }, { "docid": "2c09fcd92e60c2fd508060aebfb30f19", "score": "0.5104186", "text": "def routing_two(x, P1, C0, r, rho):\n\n \"\"\" Stage one calculations \"\"\"\n \"\"\" ([xa, ya]) and ([xb, yb]) are treated as origin and intermediate point respectively \"\"\"\n\n A1 = x[0] ** 2 + x[1] ** 2\n\n B1 = 2 * ((-C0[0] * x[0]) + (-C0[1] * x[1]))\n\n C1 = C0[0] ** 2 + C0[1] ** 2 - r ** 2\n\n D1 = (B1 ** 2) - (4 * A1 * C1)\n\n v1 = 0\n\n if D1 > 0:\n s11 = (-B1 + D1 ** 0.5) / (2 * A1)\n\n s21 = (-B1 - D1 ** 0.5) / (2 * A1)\n\n if s11 >= s21:\n t1 = s21\n s21 = s11\n s11 = t1\n\n\n if (s11 < 1 and s21 < 1) or (s11 < 0 and s21 < 0):\n\n v1 = (min(s21, 1) - max(s11, 0)) * (A1 ** 0.5)\n\n else:\n\n v1 = 0\n\n\n F1 = (A1 ** 0.5) + (rho * v1 ** 3) # Function for the first stage\n\n\n \"\"\"Second Stage Calculations\"\"\"\n \"\"\" ([xa, ya]) and ([xb, yb]) are treated as intermediate point and the input point respectively \"\"\"\n\n A2 = (P1[0] - x[0]) ** 2 + (P1[1] - x[1]) ** 2\n\n B2 = 2 * ((x[0] - C0[0]) * (P1[0] - x[0]) + (x[1] - C0[1]) * (P1[1] - x[1]))\n\n C2 = (x[0] - C0[0]) ** 2 + (x[1] - C0[1]) ** 2 - r ** 2\n\n D2 = (B2 ** 2) - (4 * A2 * C2)\n\n v2 = 0\n\n if D2 > 0:\n s12 = (-B2 + D2 ** 0.5) / (2 * A2)\n\n s22 = (-B2 - D2 ** 0.5) / (2 * A2)\n\n if s12 >= s22:\n t2 = s22\n s22 = s12\n s12 = t2\n\n\n if (s12 < 1 and s22 < 1) or (s12 < 0 and s22 < 0):\n\n v2 = (min(s22, 1) - max(s12, 0)) * (A2 ** 0.5)\n\n else:\n\n v2 = 0\n\n\n F2 = (A2 ** 0.5) + (rho * v2 ** 3) # Function for the second stage\n\n F = F1 + F2 # Final objective function for intermediate point\n\n return F", "title": "" }, { "docid": "69a3e652d5b49f1c5e335858a3df0695", "score": "0.51013213", "text": "def _compute(self):\n # first, find all first order matches (matches depending only on the attr tuples)\n l.info(\"Phase 1: Coarse statistical matching\")\n for lib, lib_lmds in self.lmdb.lib_lmds.items():\n self._compute_first_order_matches(lib, lib_lmds)\n l.info(\"Phase 2: FunctionDiff\")\n for lib in self.lmdb.lib_lmds:\n self._compute_second_order_matches(lib)\n l.info(\"Phase 3: Callee context\")\n self._compute_third_order()\n l.info(\"Phase 4: Caller context\")\n self._compute_fourth_order()\n l.info(\"Phase 5: Cleanup\")\n self._dedup()", "title": "" }, { "docid": "6dc6d023fd32904806d6c2e16a63aea5", "score": "0.5095904", "text": "def findvertex_rp(tstart0, tend0, tstart1, tend1,\n tstart0_new, tend0_new, tstart1_new, tend1_new,\n phi0, eta0, q0, pt0, dxy0, dz0, pvx0, pvy0, pvz0,\n phi1, eta1, q1, pt1, dxy1, dz1, pvx1, pvy1, pvz1):\n\n global N\n # determine the length of scanned range in rp space\n range0 = tend0_new-tstart0_new\n range1 = tend1_new-tstart1_new\n \n # sampling running parameters for the two helices\n rp0 = np.linspace(tstart0_new, tend0_new, num=N)#,dtype=np.double)\n rp1 = np.linspace(tstart1_new, tend1_new, num=N)#,dtype=np.double)\n \n #sampling points as (N,3) array\n points0 = np.transpose(helix(rp0, phi0, eta0, q0, pt0,\n dxy0, dz0, pvx0, pvy0, pvz0))\n points1 = np.transpose(helix(rp1, phi1, eta1, q1, pt1,\n dxy1, dz1, pvx1, pvy1, pvz1))\n \n # matrices with sampling points, one with copied rows, one with with copied coloumns\n p0_mat = np.tile(points0,(N,1,1))\n p1_mat = np.transpose(np.tile(points1,(N,1,1)),(1, 0, 2))\n \n ds = np.sum((p0_mat-p1_mat)**2, axis=-1)\n min_of_ds = np.amin(ds)\n #ind_sort_x, ind_sort_y = np.unravel_index(np.argsort(ds.flatten())[:4],ds.shape)\n ind_sort_x_0, ind_sort_y_0 = np.unravel_index(np.argsort(ds.ravel()),\n (N,N),order='F')\n x_no_duplicates = []\n y_no_duplicates = []\n\n for i in ind_sort_x_0:\n if i not in x_no_duplicates:\n x_no_duplicates.append(i)\n \n for i in ind_sort_y_0:\n if i not in y_no_duplicates:\n y_no_duplicates.append(i)\n\n ind_sort_x_1, ind_sort_y_1 = (x_no_duplicates[:points_n],\n y_no_duplicates[:points_n])\n\n poca_min = np.sqrt(ds[ind_sort_x_1[0],ind_sort_y_1[0]])\n \n t0 = rp0[ind_sort_x_0[0]]\n t1 = rp1[ind_sort_y_0[0]]\n\n t0_min_d_cand = rp0[ind_sort_x_1]\n t1_min_d_cand = rp1[ind_sort_y_1]\n\n ''' How to return new bounds: take 10 min distances and take\n min and max of the running parameters of this set\n '''\n\n t0_min = min(t0_min_d_cand)\n t0_max = max(t0_min_d_cand)\n t1_min = min(t1_min_d_cand)\n t1_max = max(t1_min_d_cand)\n\n if t1 > tend1_new or t1 < tstart1_new:\n print(\"Alarm!\")\n\n len0 = abs(t0_max - t0_min)*intervall_frac #range0 * 0.33\n len1 = abs(t1_max - t1_min)*intervall_frac #range1 * 0.33\n \n '''\n len0 = range0 * 0.12\n len1 = range1 * 0.12\n '''\n '''\n print \"Ratio of intervall which contains close points: \",len0 / range0\n print \"Ratio of total intervall which contains close points: \",len0 / (tend0-tstart0)\n '''\n \n #adjust len0 and len1 for pathologic cases\n if len0 == 0 or len0 > range0 * 0.45:\n len0 = range0 * 0.45\n\n if len1 == 0 or len1 > range1 * 0.45:\n len1 = range1 * 0.45\n \n #get new search intervalls \n tstart0_newn = 0.\n tstart1_newn = 0.\n tend0_newn = 0.\n tend1_newn = 0.\n #min sourounded with margin\n '''\n if t0 - len0 > tstart0_new:\n tstart0_newn = t0 - len0\n else:\n tstart0_newn = tstart0_new\n \n if t0 + len0 < tend0_new:\n tend0_newn = t0 + len0\n else:\n tend0_newn = tend0_new\n\n if t1 - len1 > tstart1_new:\n tstart1_newn = t1 - len1\n else:\n tstart1_newn = tstart1_new\n \n if t1 + len1 < tend1_new:\n tend1_newn = t1 + len1\n else:\n tend1_newn = tend1_new\n '''\n #intervall sourounded with margin \n #'''\n if t0_min - len0 > tstart0_new:\n tstart0_newn = t0_min - len0\n else:\n tstart0_newn = tstart0_new\n \n if t0_max + len0 < tend0_new:\n tend0_newn = t0_max + len0\n else:\n tend0_newn = tend0_new\n\n if t1_min - len1 > tstart1_new:\n tstart1_newn = t1_min - len1\n else:\n tstart1_newn = tstart1_new\n \n if t1_max + len1 < tend1_new:\n tend1_newn = t1_max + len1\n else:\n tend1_newn = tend1_new\n #'''\n #Min sourounded with global limits as scope\n '''\n if t0 - len0 > tstart0:\n tstart0_newn = t0 - len0\n else:\n tstart0_newn = tstart0\n \n if t0 + len0 < tend0:\n tend0_newn = t0 + len0\n else:\n tend0_newn = tend0\n\n if t1 - len1 > tstart1:\n tstart1_newn = t1 - len1\n else:\n tstart1_newn = tstart1\n \n if t1 + len1 < tend1:\n tend1_newn = t1 + len1\n else:\n tend1_newn = tend1\n ''' \n\n return t0, t1, tstart0_newn, tend0_newn, tstart1_newn, tend1_newn,t0_min_d_cand,t1_min_d_cand, poca_min", "title": "" }, { "docid": "aba09951bc167657c0199672ce676923", "score": "0.5093684", "text": "def test_consistency():\n prior_fisher_params = { 'row_strip' :np.array([3,5,6,7]),\n 'fisher_source' :'data/F_Planck_tau0.01.dat',\n 'n_full' :45,\n 'n_de' :36,\n 'z_step' :0.025\n }\n mat = np.random.rand(45,45)\n mat = np.dot(mat.T,mat)\n priors = np.random.rand(45,45)\n priors = np.dot(priors.T,priors)\n assert np.all(np.linalg.eigh(mat)[0]>=0.)\n assert np.all(np.linalg.eigh(priors)[0]>=0.)\n\n fp1 = PriorFisher('jdem',prior_fisher_params,fisher_in=mat,labels_in=JDEM_LABELS)\n fp2 = PriorFisher('w0wa',prior_fisher_params,fisher_in=mat,labels_in=JDEM_LABELS)\n fp3 = PriorFisher('constant_w',prior_fisher_params,fisher_in=mat,labels_in=JDEM_LABELS)\n res0 = fp3.get_fisher().get_fisher()\n res1 = pf.project_w0wa_to_w0(fp2.get_fisher().get_fisher(),prior_fisher_params,fp2.processed_labels)[0]\n res2 = pf.project_w0(fp1.get_fisher().get_fisher(),prior_fisher_params,fp1.processed_labels)[0]\n res3_int = pf.project_w0wa(fp1.get_fisher().get_fisher(),prior_fisher_params,fp1.processed_labels)[0]\n res3 = pf.project_w0wa_to_w0(res3_int,prior_fisher_params,fp2.processed_labels)[0]\n assert np.allclose(res3_int[0:6,0:6],res3)\n assert np.all(np.linalg.eigh(res0)[0]>=0.)\n assert np.all(np.linalg.eigh(res1)[0]>=0.)\n assert np.all(np.linalg.eigh(res2)[0]>=0.)\n assert np.all(np.linalg.eigh(res3_int)[0]>=0.)\n assert np.all(np.linalg.eigh(res3)[0]>=0.)\n assert np.allclose(res0,res1)\n assert np.allclose(res0,res2)\n assert np.allclose(res0,res3)\n assert np.allclose(fp2.get_fisher().get_fisher(),res3_int)\n assert np.isclose(np.linalg.det(res0),np.linalg.det(res1))\n assert np.isclose(np.linalg.det(res0),np.linalg.det(res2))\n assert np.isclose(np.linalg.det(res0),np.linalg.det(res3))\n priors = priors\n fp1p = PriorFisher('jdem',prior_fisher_params,fisher_in=priors,labels_in=JDEM_LABELS)\n fp2p = PriorFisher('w0wa',prior_fisher_params,fisher_in=priors,labels_in=JDEM_LABELS)\n fp3p = PriorFisher('constant_w',prior_fisher_params,fisher_in=priors,labels_in=JDEM_LABELS)\n res0p = fp3p.get_fisher().get_fisher()\n res1p = pf.project_w0wa_to_w0(fp2p.get_fisher().get_fisher(),prior_fisher_params,fp2p.processed_labels)[0]\n res2p = pf.project_w0(fp1p.get_fisher().get_fisher(),prior_fisher_params,fp1p.processed_labels)[0]\n res3_intp = pf.project_w0wa(fp1p.get_fisher().get_fisher(),prior_fisher_params,fp1p.processed_labels)[0]\n res3p = pf.project_w0wa_to_w0(res3_intp,prior_fisher_params,fp2p.processed_labels)[0]\n assert np.allclose(res3_intp[0:6,0:6],res3p)\n assert np.all(np.linalg.eigh(res0p)[0]>=0.)\n assert np.all(np.linalg.eigh(res1p)[0]>=0.)\n assert np.all(np.linalg.eigh(res2p)[0]>=0.)\n assert np.all(np.linalg.eigh(res3_intp)[0]>=0.)\n assert np.all(np.linalg.eigh(res3p)[0]>=0.)\n assert np.allclose(res0p,res1p)\n assert np.allclose(res0p,res2p)\n assert np.allclose(res0p,res3p)\n assert np.allclose(fp2p.get_fisher().get_fisher(),res3_intp)\n assert np.isclose(np.linalg.det(res0p),np.linalg.det(res1p))\n assert np.isclose(np.linalg.det(res0p),np.linalg.det(res2p))\n assert np.isclose(np.linalg.det(res0p),np.linalg.det(res3p))\n\n\n alt_prior_fisher_params = { 'row_strip' :np.array([]),\n 'fisher_source' :'data/F_Planck_tau0.01.dat',\n 'n_full' :41,\n 'n_de' :36,\n 'z_step' :0.025\n }\n prior_mat = priors+mat\n fp1c = PriorFisher('jdem',prior_fisher_params,fisher_in=prior_mat,labels_in=JDEM_LABELS)\n fp2c = PriorFisher('w0wa',prior_fisher_params,fisher_in=prior_mat,labels_in=JDEM_LABELS)\n fp3c = PriorFisher('constant_w',prior_fisher_params,fisher_in=prior_mat,labels_in=JDEM_LABELS)\n fp1c2 = PriorFisher('jdem',alt_prior_fisher_params,fisher_in=fp1p.get_fisher().get_fisher()+fp1.get_fisher().get_fisher(),labels_in=fp1.processed_labels)\n fp2c2 = PriorFisher('w0wa',alt_prior_fisher_params,fisher_in=fp1p.get_fisher().get_fisher()+fp1.get_fisher().get_fisher(),labels_in=fp1.processed_labels)\n fp3c2 = PriorFisher('constant_w',alt_prior_fisher_params,fisher_in=fp1p.get_fisher().get_fisher()+fp1.get_fisher().get_fisher(),labels_in=fp1.processed_labels)\n sum_in1 = fp1p.get_fisher().get_fisher()+fp1.get_fisher().get_fisher()\n sum_in2 = fp2p.get_fisher().get_fisher()+fp2.get_fisher().get_fisher()\n sum_in3 = fp3p.get_fisher().get_fisher()+fp3.get_fisher().get_fisher()\n res4 = fp1c.get_fisher().get_fisher()\n res5 = fp2c.get_fisher().get_fisher()\n res6 = fp3c.get_fisher().get_fisher()\n res7 = pf.project_w0wa_to_w0(fp2c.get_fisher().get_fisher(),alt_prior_fisher_params,fp2c.processed_labels)[0]\n res8 = pf.project_w0wa(fp1c.get_fisher().get_fisher(),alt_prior_fisher_params,fp3c.processed_labels)[0]\n res9 = pf.project_w0wa_to_w0(res8,alt_prior_fisher_params,fp2c.processed_labels)[0]\n res10 = fp1c2.get_fisher().get_fisher()\n res11 = fp2c2.get_fisher().get_fisher()\n res12 = fp3c2.get_fisher().get_fisher()\n assert np.allclose(res8[0:6,0:6],res9)\n assert np.all(np.linalg.eigh(res4)[0]>=0.)\n assert np.all(np.linalg.eigh(res5)[0]>=0.)\n assert np.all(np.linalg.eigh(res6)[0]>=0.)\n assert np.all(np.linalg.eigh(res7)[0]>=0.)\n assert np.all(np.linalg.eigh(res8)[0]>=0.)\n assert np.all(np.linalg.eigh(res9)[0]>=0.)\n assert np.all(np.linalg.eigh(res10)[0]>=0.)\n assert np.all(np.linalg.eigh(res11)[0]>=0.)\n assert np.all(np.linalg.eigh(res12)[0]>=0.)\n assert np.allclose(res4,sum_in1)\n assert np.allclose(res5,sum_in2)\n assert np.allclose(res6,sum_in3)\n assert np.allclose(res6,res7)\n assert np.allclose(res5,res8)\n assert np.allclose(res6,res9)\n assert np.allclose(res4,res10)\n assert np.allclose(res5,res11)\n assert np.allclose(res6,res12)\n\n #interlace tests\n eig2 = np.linalg.eigh(res2)[0]\n eig3 = np.linalg.eigh(res3)[0]\n eig_diff = (eig2[::-1][1:eig3.size]-eig3[::-1][0:eig3.size-1])\n eig2p = np.linalg.eigh(res2p)[0]\n eig3p = np.linalg.eigh(res3p)[0]\n eig_diffp = (eig2p[::-1][1:eig3p.size]-eig3p[::-1][0:eig3p.size-1])\n assert np.all(eig_diffp<=0.)\n eig2c = np.linalg.eigh(res5)[0]\n eig3c = np.linalg.eigh(res6)[0]\n eig_diffc = (eig2c[::-1][1:eig3c.size]-eig3c[::-1][0:eig3c.size-1])\n assert np.all(eig_diffc<=0.)", "title": "" }, { "docid": "223db33425e0a7293ab9a606bd7ba8fa", "score": "0.5092296", "text": "def eq02():", "title": "" }, { "docid": "96b3461b00e68c1acf13075f11745038", "score": "0.50801736", "text": "def Exp1(A,m,r,k):\n\n B = int(A*r) # number of patients satisfying certain criteria\n expectation = Decimal(0)\n alpha = 1 - 1 / (2 * m)\n # Restrit an interval for single bucket size (|A1| in formula) with probability greater than 1-alpha\n rv_a = binom(A, 1/m)\n (lb_a, ub_a) = rv_a.interval(alpha)\n rv_b = hypergeom(A, int(lb_a), B)\n (lb_b, ub_b) = rv_b.interval(alpha)\n # Rule out the case that there is no collision\n if lb_b == 0 or lb_a == 0:\n for a in range(int(lb_a), int(ub_a)+1):\n if a > k:\n # Find lowerbound and upperbound for B1\n rv_b = hypergeom(A, a, B)\n (lb_b, ub_b) = rv_b.interval(alpha)\n # Rule out the case that there is no collision\n lb_b = max(1, lb_b)\n # Compute P(|e| < k | |A1|)\n p = P(lb_b, ub_b, k, rv_b, a)\n #Compute Expectation\n expectation = expectation + p*Decimal(rv_a.pmf(a))\n else:\n rv_b = hypergeom(A, a, B)\n expectation = expectation + Decimal(rv_a.pmf(a))*(1-Decimal(rv_b.pmf(0)))\n else:\n for a in range(int(lb_a), int(ub_a)+1):\n # when |A1| < k, P(|e| <= k | A1,B1) = 0\n if a > k:\n # Restrit an interval for B1 with probability greater than 0.99995\n rv_b = hypergeom(A, a, B)\n (lb_b, ub_b) = rv_b.interval(0.99995)\n # Compute P(|e|<=k | |A1|)\n p = P(lb_b, ub_b, k, rv_b, a)\n #Compute Expectation\n expectation = expectation + p*Decimal(rv_a.pmf(a))\n else:\n expectation = expectation + Decimal(rv_a.pmf(a))\n return round(expectation*m, 5)", "title": "" }, { "docid": "b1b8f6d7e9104309d19e2e7b9ba5954f", "score": "0.50781476", "text": "def __rtruediv__(self, other):\n \n pass", "title": "" }, { "docid": "8650ac8c92b5644c216f0bea91515fb4", "score": "0.5076354", "text": "def coupure(x,y):\n n = len(x)\n m = len(y)\n iStar = abs(n) // 2\n\n D = [ [ 0 for j in range(len(y)+1)] for i in range(2)]\n I = [ [ 0 for j in range(len(y)+1)] for i in range(2)]\n\n for j in range(1,m+1):\n D[0][j] = c_ins*j\n I[0][j] = j #initialisation 0 1 2 3 4 5\n\n for i in range(1,n+1):\n D[1][0] = c_del*i\n I[1][0] = 0\n for j in range(1,m+1):\n D[1][j] = min(D[0][j] + c_ins, D[1][j-1] + c_del, D[0][j-1] + c_sub(x[i-1],y[j-1]))\n if(i>iStar):\n\n if(D[1][j] == D[1][j-1] + c_del):\n # print(\"element {} {} {}\".format(D[1][j],D[1][j-1],I[0][j-1]))\n I[1][j] = I[1][j-1]\n elif(D[1][j] == D[0][j] + c_ins):\n # print(\"element {} {} {}\".format(D[1][j],D[0][j],I[0][j]))\n I[1][j] = I[0][j]\n elif(D[1][j] == D[0][j-1] + c_sub(x[i-1],y[j-1])):\n # print(\"element {} {} {}\".format(D[1][j],D[0][j-1],I[0][j-1]))\n I[1][j] = I[0][j-1]\n\n\n if(i>iStar and i!=n):\n I[0], I[1] = I[1], I[0]\n\n if(i!=n):\n\n D[0], D[1] = D[1], D[0]\n # print(I[1])\n\n return I[1][-1]", "title": "" }, { "docid": "4540741d0599128a68765629e922c4b6", "score": "0.50758624", "text": "def algorithm(values, replace1, replace2):\n values[1], values[2] = replace1, replace2\n pos = 0\n while True:\n op = values[pos]\n if op == 99:\n return values[0]\n inputs = values[values[pos + 1]], values[values[pos + 2]]\n values[values[pos + 3]] = (inputs[0] * inputs[1]) if op == 2 else (inputs[0] + inputs[1])\n pos += 4", "title": "" }, { "docid": "12b08b3b1099c0e00573fb306986571f", "score": "0.5073931", "text": "def computeProbaCCouples(strat_1, strat_2, nb_states, rounds):\n init_weight = 1\n proba_c1 = 0\n proba_c2 = 0\n if hasOnlyOneAction(strat_1):\n proba_c1 = strat_1[2]\n if hasOnlyOneAction(strat_2):\n proba_c2 = strat_2[2]\n else:\n # Build tree knowing that proba_c1 is fixed\n for state in range(nb_states):\n if hasOnlyOneDirection(strat_2):\n direction = strat_2[0]\n if direction == 1 and state == 0: # Left\n proba_c2 += strat_2[2 + state]\n elif direction == 0 and state == nb_states - 1: # Right\n proba_c2 += strat_2[2 + state]\n else:\n root_2 = Tree(state, strat_2, init_weight, nb_states - 1)\n cur_prob_c2 = buildTreeOptimised(proba_c1, root_2, rounds)\n proba_c2 += cur_prob_c2\n else:\n root_2 = Tree(state, strat_2, init_weight, nb_states - 1)\n cur_prob_c2 = buildTreeOptimised(proba_c1, root_2, rounds)\n proba_c2 += cur_prob_c2\n\n proba_c2 /= nb_states\n else: # Strat_1 has different possible actions\n if hasOnlyOneAction(strat_2):\n proba_c2 = strat_2[2]\n # Build tree knowing that proba_c2 is fixed\n for state in range(nb_states):\n if hasOnlyOneDirection(strat_1):\n direction = strat_1[0]\n if direction == 1 and state == 0: # Left\n proba_c1 += strat_1[2 + state]\n elif direction == 0 and state == nb_states - 1:\n proba_c1 += strat_1[2 + state]\n else:\n root_1 = Tree(state, strat_1, init_weight, nb_states - 1)\n cur_prob_c1 = buildTreeOptimised(proba_c2, root_1, rounds)\n proba_c1 += cur_prob_c1\n else:\n root_1 = Tree(state, strat_1, init_weight, nb_states - 1)\n cur_prob_c1 = buildTreeOptimised(proba_c2, root_1, rounds)\n proba_c1 += cur_prob_c1\n\n proba_c1 /= nb_states\n\n else: # Have to build two decision tree without having any fixed probability of cooperation\n for state_1 in range(nb_states):\n for state_2 in range(nb_states):\n root_1 = Tree(state_1, strat_1, init_weight, nb_states - 1)\n root_2 = Tree(state_2, strat_2, init_weight, nb_states - 1)\n cur_prob_c1, cur_prob_c2 = buildTrees(root_1, root_2, rounds)\n proba_c1 += cur_prob_c1\n proba_c2 += cur_prob_c2\n proba_c1 /= np.power(nb_states, 2)\n proba_c2 /= np.power(nb_states, 2)\n\n return proba_c1, proba_c2", "title": "" }, { "docid": "ca783ff53c6a73d7cedffe19b13b47bd", "score": "0.50730383", "text": "def test1(self):\r\n root_2 = math.sqrt(2)\r\n x1 = ureal(0,2)\r\n x2 = ureal(0,6)\r\n\r\n y = fn.mul2(x1,x2)\r\n\r\n self.assertTrue( equivalent(y.x,0) )\r\n\r\n uc = x1.u * x2.u\r\n self.assertTrue( equivalent(y.u,uc) )\r\n\r\n self.assertTrue(\r\n equivalent( component(y,x1), uc/root_2 )\r\n )\r\n\r\n x3 = ureal(0,4)\r\n uc *= x3.u\r\n y = fn.mul2(y,x3)\r\n\r\n self.assertTrue( equivalent(y.u,uc) )\r\n\r\n self.assertTrue(\r\n equivalent( component(y,x1), uc/root_2**2 )\r\n )\r\n self.assertTrue(\r\n equivalent( component(y,x3), uc/root_2 )\r\n )\r\n\r\n # a different order should not affect uc\r\n y = fn.mul2(x1,fn.mul2(x2,x3))\r\n self.assertTrue( equivalent(y.u,uc) )", "title": "" }, { "docid": "518e78e45a0f94046a4f5ee960f857e1", "score": "0.50729614", "text": "def solve(board1, pents):\n\n\n board = 1 - board1\n # print(board)\n # print(pents[0])\n # print(pent_id(pents[0]))\n # print(pents[0][0][0])\n # return\n all_pents = []\n temp = cp.deepcopy(pents)\n # print(get_pent_from_hashed(pent_hash(pents[1], 2, 2, board), board))\n # return\n for i in range(4):\n for p in pents:\n all_pents.append(np.flipud(np.rot90(p, i)))\n all_pents.append(np.rot90(p, i))\n # print(len(all_pents))\n # for i in all_pents:\n # if (get_pent_idx(i) == 2):\n # print(i)\n repeat = []\n for i in range(len(all_pents)):\n for j in range(len(all_pents)):\n if i < j and np.array_equal(all_pents[i], all_pents[j]):\n repeat.append(j)\n repeat = list(set(repeat))\n repeat.sort()\n repeat.reverse()\n for i in repeat:\n all_pents.pop(i)\n for i in all_pents:\n pent_id(i)\n y = get_all_subsets(board, all_pents, pents)\n x = range(len(pents) + len(board) * len(board[0]))\n x2 = {j: set(filter(lambda i: j in y[i], y)) for j in x}\n # print(len(y))\n # print(len(x))\n solutions = solve_x(x2, y)\n # print(sol)\n # for i in sol:\n # print(get_pent_from_hashed(i, board))\n # print(len(all_pents))\n # for j in range(1, 13):\n # for i in all_pents:\n # if (get_pent_idx(i) == j):\n # print(i)\n\n # print(sum)\n for i in solutions:\n sol = i\n break\n # print(sol)\n res = [get_pent_from_hashed(r, board) for r in sol]\n b = cp.deepcopy(board)\n\n for i in res:\n add_pentomino(b, i[0], i[1])\n # print(b)\n return res", "title": "" }, { "docid": "61b3e7c234cef2a85297cf7ea2d46b0b", "score": "0.50680363", "text": "def ransac(x,y,funcFindF,funcDist,minPtNum,iterNum,thDist,thInlrRatio):\n\n ptNum = len(x)\n thInlr = round(thInlrRatio*ptNum)\n\n\n inlrNum = np.zeros([iterNum,1])\n fLib= np.zeros(shape=(iterNum,3,3))\n for i in range(iterNum):\n permut = np.random.permutation(ptNum)\n sampleIdx = permut[range(minPtNum)]\n f1 = funcFindF(x[sampleIdx,:],y[sampleIdx,:])\n dist = funcDist(x,y,f1)\n b = dist<=thDist\n r = np.array(range(len(b)))\n inlier1 = r[b]\n inlrNum[i] = len(inlier1)\n if len(inlier1) < thInlr: continue\n fLib[i] = funcFindF(x[inlier1,:],y[inlier1,:])\n\n idx = inlrNum.tolist().index(max(inlrNum))\n f = fLib[idx]\n dist = funcDist(x,y,f);\n b = dist<=thDist\n r = np.array(range(len(b)))\n inlierIdx = r[b]\n return f, inlierIdx", "title": "" }, { "docid": "f79c684c579ae0164feae756673e5ca9", "score": "0.506603", "text": "def session_00013_line_249():", "title": "" }, { "docid": "1da2c1a2c783069b1ca3d28db4027496", "score": "0.5056336", "text": "def problem76():\n from time import clock\n partitions = {}\n def p(n):\n if n < 0: return 0\n if n == 0: return 1\n if n not in partitions:\n partitions[n] = sum([(-1)**(k+1) * (p(n - (k * (3 * k - 1)/2))\n + p(n - (k * (3 * k + 1) / 2))) for k in xrange(1, n+1)])\n return partitions[n]\n\n def main():\n t0 = clock()\n print p(100) - 1\n print clock() - t0\n\n if __name__ == \"__main__\":\n main()", "title": "" }, { "docid": "27929fd0bae9112084fc9d03b91d7eae", "score": "0.5055975", "text": "def compute_matching(x, y):\n\n pass", "title": "" }, { "docid": "24995310ea8ee997700e8ed101f1e7ab", "score": "0.50551796", "text": "def test_big_better_algorithm():\n pass", "title": "" }, { "docid": "6ead608373e997a811506ea417fbd8b3", "score": "0.50513583", "text": "def foiling(b_one: List[str], b_two: List[str], var_type: str) -> List[str]:\n #print(b_one, b_two, var_type)\n b_one_string = stringify(b_one)\n b_one_obj = Algebra(b_one_string+\"=0\")\n\n b_two_string = stringify(b_two)\n b_two_obj = Algebra(b_two_string+\"=0\")\n\n # Create array with only coefficients\n b_one_obj.get_coeff()\n b_two_obj.get_coeff()\n b_one_coeff = copy.deepcopy(b_one_obj.coeff)\n b_two_coeff = copy.deepcopy(b_two_obj.coeff)\n b_one_obj, b_two_obj = None, None\n # print(\"b_one_coeff = \", b_one_coeff.eqn, \"b_two_coeff = \", b_two_coeff.eqn)\n\n ans_coeff = []\n ans_powers = []\n b_one_pow = len(b_one_coeff) - 1\n b_two_pow = len(b_two_coeff) - 1\n\n # Foiling\n for i in range(0, len(b_one_coeff)):\n\n for j in range(0, len(b_two_coeff)):\n ans_coeff.append(b_one_coeff[i] * b_two_coeff[j])\n ans_powers.append((b_one_pow - i) + (b_two_pow - j))\n\n # print(\"ans_coeff = \", ans_coeff)\n # Simplifying the answer\n simp_ans_coeff = []\n temp_ind = []\n temp = 0\n for i in range(ans_powers[0], -1, -1):\n\n for j in range(0, len(ans_powers)):\n\n if ans_powers[j] == i:\n temp += ans_coeff[j]\n\n simp_ans_coeff.append(temp)\n temp = 0\n\n # print(\"simp_ans_coeff = \", simp_ans_coeff)\n ans = []\n j = 0\n highest_deg_ans = len(simp_ans_coeff) - 1\n\n if highest_deg_ans > 1:\n\n for i in range(highest_deg_ans, 1, -1):\n\n if not ans:\n\n if simp_ans_coeff[j].imag == 0:\n\n if simp_ans_coeff[j] == 1:\n\n ans.append(var_type)\n ans.append(\"^\")\n ans.append(str(float(i)))\n j += 1\n\n elif simp_ans_coeff[j] == 0:\n\n pass\n j += 1\n\n elif simp_ans_coeff[j] < 0:\n\n if simp_ans_coeff[j] == -1:\n\n ans.append(\"-\" + var_type)\n ans.append(\"^\")\n ans.append(str(float(i)))\n j += 1\n\n else:\n\n ans.append(str(simp_ans_coeff[j]))\n ans.append(var_type)\n ans.append(\"^\")\n ans.append(str(float(i)))\n j += 1\n\n else:\n\n ans.append(str(simp_ans_coeff[j]))\n ans.append(var_type)\n ans.append(\"^\")\n ans.append(str(float(i)))\n j += 1\n\n # simp_coeff_ans[j] is a complex number\n else:\n\n ans.append(str(simp_ans_coeff[j]))\n ans.append(var_type)\n ans.append(\"^\")\n ans.append(str(float(i)))\n j += 1\n\n else:\n\n if simp_ans_coeff[j].imag == 0:\n\n if simp_ans_coeff[j] == 1:\n\n ans.append(\"+\")\n ans.append(var_type)\n ans.append(\"^\")\n ans.append(str(float(i)))\n j += 1\n\n elif simp_ans_coeff[j] == 0:\n\n pass\n j += 1\n\n elif simp_ans_coeff[j] < 0:\n\n if simp_ans_coeff[j] == -1:\n\n ans.append(\"-\")\n ans.append(var_type)\n ans.append(\"^\")\n ans.append(str(float(i)))\n j += 1\n\n else:\n\n ans.append(\"-\")\n ans.append(str(abs(simp_ans_coeff[j])))\n ans.append(var_type)\n ans.append(\"^\")\n ans.append(str(float(i)))\n j += 1\n\n else:\n\n ans.append(\"+\")\n ans.append(str(simp_ans_coeff[j]))\n ans.append(var_type)\n ans.append(\"^\")\n ans.append(str(float(i)))\n j += 1\n\n # simp_coeff_ans[j] is a complex number\n else:\n\n ans.append(\"+\")\n ans.append(str(simp_ans_coeff[j]))\n ans.append(var_type)\n ans.append(\"^\")\n ans.append(str(float(i)))\n j += 1\n\n if simp_ans_coeff[j].imag == 0:\n\n if simp_ans_coeff[j] == 1:\n\n ans.append(\"+\")\n ans.append(var_type)\n j += 1\n\n elif simp_ans_coeff[j] == 0:\n\n pass\n j += 1\n\n elif simp_ans_coeff[j] < 0:\n\n if simp_ans_coeff[j] == -1:\n\n ans.append(\"-\")\n ans.append(var_type)\n j += 1\n\n else:\n\n ans.append(\"-\")\n ans.append(str(abs(simp_ans_coeff[j])))\n ans.append(var_type)\n j += 1\n\n else:\n\n ans.append(\"+\")\n ans.append(str(simp_ans_coeff[j]))\n ans.append(var_type)\n j += 1\n\n else:\n\n ans.append(\"+\")\n ans.append(str(simp_ans_coeff[j]))\n ans.append(var_type)\n j += 1\n\n if simp_ans_coeff[j].imag == 0:\n\n if simp_ans_coeff[j] == 0:\n\n pass\n\n else:\n\n if simp_ans_coeff[j] < 0:\n\n ans.append(\"-\")\n ans.append(str(abs(simp_ans_coeff[j])))\n\n else:\n\n ans.append(\"+\")\n ans.append(str(simp_ans_coeff[j]))\n\n else:\n\n ans.append(\"+\")\n ans.append(str(simp_ans_coeff[j]))\n\n else:\n\n if not ans:\n\n if simp_ans_coeff[j].imag == 0:\n\n if simp_ans_coeff[j] == 1:\n\n ans.append(var_type)\n j += 1\n\n elif simp_ans_coeff[j] == 0:\n\n pass\n j += 1\n\n elif simp_ans_coeff[j] < 0:\n\n if simp_ans_coeff[j] == -1:\n\n ans.append(\"-\")\n ans.append(var_type)\n j += 1\n\n else:\n\n ans.append(\"-\")\n ans.append(str(abs(simp_ans_coeff[j])))\n ans.append(var_type)\n j += 1\n\n else:\n\n ans.append(str(simp_ans_coeff[j]))\n ans.append(var_type)\n j += 1\n\n else:\n\n ans.append(str(simp_ans_coeff[j]))\n ans.append(var_type)\n j += 1\n\n else:\n\n if simp_ans_coeff[j].imag == 0:\n\n if simp_ans_coeff[j] == 1:\n\n ans.append(\"+\")\n ans.append(var_type)\n j += 1\n\n elif simp_ans_coeff[j] == 0:\n\n pass\n j += 1\n\n elif simp_ans_coeff[j] < 0:\n\n if simp_ans_coeff[j] == -1:\n\n ans.append(\"-\")\n ans.append(var_type)\n j += 1\n\n else:\n\n ans.append(\"-\")\n ans.append(str(abs(simp_ans_coeff[j])))\n ans.append(var_type)\n j += 1\n\n else:\n\n ans.append(\"+\")\n ans.append(str(simp_ans_coeff[j]))\n ans.append(var_type)\n j += 1\n\n else:\n\n ans.append(\"+\")\n ans.append(str(simp_ans_coeff[j]))\n ans.append(var_type)\n j += 1\n\n if simp_ans_coeff[j].imag == 0:\n\n if simp_ans_coeff[j] == 0:\n\n pass\n\n else:\n\n if simp_ans_coeff[j] < 0:\n\n ans.append(\"-\")\n ans.append(str(abs(simp_ans_coeff[j])))\n\n else:\n\n ans.append(\"+\")\n ans.append(str(simp_ans_coeff[j]))\n\n else:\n\n ans.append(\"+\")\n ans.append(str(simp_ans_coeff[j]))\n\n return ans", "title": "" }, { "docid": "f6038987bef58ff3fa661afc41ddce01", "score": "0.50459707", "text": "def clause_b_5_1_2_T_z_1():\n\n pass", "title": "" }, { "docid": "82be65ce20341777894c01f529d32afd", "score": "0.5044495", "text": "def problem2(self, s):\n\n points = 0\n # Test problem 2 here.\n return points", "title": "" }, { "docid": "b73b09889263ec4104e2f4fc4c4b74ba", "score": "0.5042088", "text": "def calculate_rbo(recommendations1, recommendations2, p=0.98):\n if recommendations1 == None: recommendations1 = []\n if recommendations2 == None: recommendations2 = []\n\n sl, ll = sorted([(len(recommendations1), recommendations1), (len(recommendations2), recommendations2)])\n s, S = sl\n l, L = ll\n if s == 0: return 0\n\n # Calculate the overlaps at ranks 1 through l\n # (the longer of the two lists)\n ss = set([]) # contains elements from the smaller list till depth i\n ls = set([]) # contains elements from the longer list till depth i\n x_d = {0: 0}\n sum1 = 0.0\n for i in range(l):\n x = L[i]\n y = S[i] if i < s else None\n d = i + 1\n\n # if two elements are same then\n # we don't need to add to either of the set\n if x == y:\n x_d[d] = x_d[d - 1] + 1.0\n # else add items to respective list\n # and calculate overlap\n else:\n ls.add(x)\n if y != None: ss.add(y)\n x_d[d] = x_d[d - 1] + (1.0 if x in ss else 0.0) + (1.0 if y in ls else 0.0)\n # calculate average overlap\n sum1 += x_d[d] / d * pow(p, d)\n\n sum2 = 0.0\n for i in range(l - s):\n d = s + i + 1\n sum2 += x_d[d] * (d - s) / (d * s) * pow(p, d)\n\n sum3 = ((x_d[l] - x_d[s]) / l + x_d[s] / s) * pow(p, l)\n\n # Equation 32\n rbo_ext = (1 - p) / p * (sum1 + sum2) + sum3\n return rbo_ext", "title": "" }, { "docid": "01c5996683001e47ba10a2f351370ae5", "score": "0.5042032", "text": "def NSGA2(function1, function2, max_gen, pop_size, dim, pf_target):\n\n gen_no = 0\n igd_values = []\n\n n_obj = 2\n pf_ref = copy.deepcopy(pf_target)\n pf_ref_len = len(pf_ref)\n max_obj = [0] * n_obj\n min_obj = [0] * n_obj\n for i in range(n_obj):\n pf_ref = sorted(pf_ref, key=lambda obj: obj[i])\n max_obj[i] = pf_ref[pf_ref_len - 1][i]\n min_obj[i] = pf_ref[0][i]\n for i in range(pf_ref_len):\n for j in range(n_obj):\n pf_ref[i][j] = (pf_ref[i][j] - min_obj[j]) / (max_obj[j] - min_obj[j])\n pf_ref_f1 = []\n pf_ref_f2 = []\n for i in range(pf_ref_len):\n pf_ref_f1.append(pf_ref[i][0])\n pf_ref_f2.append(pf_ref[i][1])\n\n # Initialize search population\n solution = [[random.random() for _ in range(dim)] for _ in range(0, pop_size)]\n function1_values = [function1(solution[i]) for i in range(0, pop_size)]\n function2_values = [function2(solution[i]) for i in range(0, pop_size)]\n\n while (gen_no < max_gen):\n non_dominated_sorted_solution = fast_non_dominated_sort(function1_values[:], function2_values[:])\n print(\"NSGA-II Output for Generation \", gen_no, \" :\")\n parent_front_f11 = []\n parent_front_f22 = []\n non_dominated_sorted_solution[0].sort()\n for index in non_dominated_sorted_solution[0]:\n parent_front_f11.append(function1_values[index])\n parent_front_f22.append(function2_values[index])\n\n # Compute IGD values\n parent_front_f1 = []\n parent_front_f2 = []\n for i in range(len(parent_front_f11)):\n parent_front_f1.append((parent_front_f11[i] - min_obj[0]) / (max_obj[0] - min_obj[0]))\n parent_front_f2.append((parent_front_f22[i] - min_obj[1]) / (max_obj[1] - min_obj[1]))\n sum_dist = 0\n for i in range(pf_ref_len):\n min_dist = math.inf\n for j in range(len(parent_front_f1)):\n dist2 = pow(parent_front_f1[j] - pf_ref_f1[i], 2.0) + pow(parent_front_f2[j] - pf_ref_f2[i], 2.0)\n dist = math.sqrt(dist2)\n if dist < min_dist:\n min_dist = dist\n sum_dist += min_dist\n igd = sum_dist / pf_ref_len\n igd_values.append(igd)\n print('IGD = ', igd)\n\n # Generating offsprings\n solution2 = solution[:]\n while (len(solution2) < 2 * pop_size):\n a1 = random.randint(0, pop_size - 1)\n a2 = random.randint(0, pop_size - 1)\n a = binary_tournament(a1, a2, function1_values[:], function2_values[:])\n b1 = random.randint(0, pop_size - 1)\n b2 = random.randint(0, pop_size - 1)\n b = binary_tournament(b1, b2, function1_values[:], function2_values[:])\n c1, c2 = SBX_crossover(solution[a], solution[b])\n c1_mutated, c2_mutated = polynomial_mutation(c1, c2)\n solution2.append(c1_mutated)\n solution2.append(c2_mutated)\n function1_values2 = function1_values[:]\n function2_values2 = function2_values[:]\n for i in range(pop_size, 2 * pop_size):\n function1_values2.append(function1(solution2[i]))\n function2_values2.append(function2(solution2[i]))\n non_dominated_sorted_solution2 = fast_non_dominated_sort(function1_values2[:], function2_values2[:])\n crowding_distance_values2 = []\n for i in range(0, len(non_dominated_sorted_solution2)):\n crowding_distance_values2.append(crowding_distance(function1_values2[:], function2_values2[:], non_dominated_sorted_solution2[i][:]))\n\n # Environmental selection\n new_solution = []\n function1_values = []\n function2_values =[]\n for i in range(0, len(non_dominated_sorted_solution2)):\n non_dominated_sorted_solution2[i].sort()\n front = sort_distance(non_dominated_sorted_solution2[i], crowding_distance_values2[i])\n front.reverse()\n for index in front:\n new_solution.append(solution2[index])\n function1_values.append(function1_values2[index])\n function2_values.append(function2_values2[index])\n if (len(new_solution) == pop_size):\n break\n if (len(new_solution) == pop_size):\n break\n\n solution = new_solution[:]\n gen_no = gen_no + 1\n print(\"\\n\")\n\n return igd_values", "title": "" }, { "docid": "a5ec347c651217b0499eafc78060a0b3", "score": "0.5037284", "text": "def _postsolve(self):", "title": "" }, { "docid": "94e389dcd2cca8e465f31514884aa7b8", "score": "0.5032518", "text": "def cycle(c):\n \n \"\"\"\n -1 Initial input parameters\n -2 Mutation\n -3 child a\n -4 child b\n -5 best\n \"\"\"\n time_start = time.time()\n \n# Counter\n pf.this_cycle = pf.this_cycle + 1\n pf.this_gen = 0\n \n# Start Cycle (calculate width,\n pf.start_cycle()\n \n# SAVE Starting Parameters\n a = 0\n for fn in range(len(g.pot_functions['functions'])): \n if(g.pot_functions['functions'][fn]['fit_type'] == 1): # TABULATED \n b = a + g.pot_functions['functions'][fn]['fit_size'] \n pf.ps[-1,a:b] = numpy.zeros((g.pot_functions['functions'][fn]['fit_size'],),)\n a = b\n elif(g.pot_functions['functions'][fn]['fit_type'] == 2): # ANALYTIC \n b = a + g.pot_functions['functions'][fn]['fit_size'] \n pf.ps[-1,a:b] = g.pot_functions['functions'][fn]['a_params'][:]\n a = b\n\n# Update and calculate starting rss\n pf.update_potential(pf.ps[-1,:])\n pf.rss[-1] = pf.get_rss() \n \n# Try within the range provided\n \n# Create initial population - First half\n pf.stage = 'Initialising population - First Half'\n for p in range(pf.pop_size):\n if(p == 0):\n pf.ps[p,:] = pf.ps[-1,:]\n else:\n pf.ps[p,:] = pf.ps[-1,:]\n \n# Calculate and save RSS\n pf.ps[p,:] = pf.random_p(0.0, 1.0)\n pf.update_potential(pf.ps[p,:])\n pf.rss[p] = pf.get_rss() \n pf.check_improvement(pf.ps[p,:], pf.rss[p])\n\n# Look in a wider range\n\n# Create initial population - Second half\n pf.stage = 'Initialising population - Second Half'\n m = 0.5\n m_inc = (10.0-0.5) / (pf.pop_size - 1)\n for p in range(pf.pop_size, pf.pop_size_d):\n loop = True\n while(loop): \n pf.ps[p,:] = pf.random_p(0.0, m)\n pf.update_potential(pf.ps[p,:])\n pf.rss[p] = pf.get_rss() \n if(not numpy.isnan(pf.rss[p])):\n loop = False\n pf.check_improvement(pf.ps[p,:], pf.rss[p])\n m = m + m_inc\n\n###################################\n# LOOP THROUGH GENERATIONS\n###################################\n \n gens = pf.generations\n if(c > 1): \n gens = pf.spline_generations\n pf.since_improvement = 0\n for gen in range(gens):\n pf.this_gen = gen + 1\n \n parents = numpy.arange(pf.pop_size_d)\n numpy.random.shuffle(parents)\n\n pf.stage = 'Looping population'\n \n#######################\n# Parents + Parents\n#######################\n \n ca = 0\n# Loop through population\n for p in range(pf.pop_size): \n pa = parents[p]\n pb = parents[p + pf.pop_size]\n \n# Breed\n pf.breed(pa, pb, ca, ca + 1)\n \n# Run calculations\n pf.update_potential(pf.p_children[ca, :]) \n pf.rss_children[ca] = pf.get_rss() \n pf.check_improvement(pf.p_children[ca,:], pf.rss_children[ca]) \n \n pf.update_potential(pf.p_children[ca+1, :]) \n pf.rss_children[ca+1] = pf.get_rss() \n pf.check_improvement(pf.p_children[ca+1,:], pf.rss_children[ca+1])\n \n# Increment\n ca = ca + 2\n \n# MAKE FRESH PARAMETERS\n m = 0.5\n m_inc = (10.0-0.5) / (pf.fresh_size_d - 1)\n for p_fresh in range(pf.pop_size_d, pf.pop_size_d + pf.fresh_size_d): \n pf.ps[p_fresh,:] = pf.random_p(pf.ps[-5,:], m)\n m = m + m_inc\n \n#######################\n# Parents + Fresh\n#######################\n \n# Pick parents\n parents = numpy.arange(pf.pop_size_d)\n numpy.random.shuffle(parents)\n \n pf.stage = 'Looping population + fresh pool'\n# Breed random parents with fresh pool\n pa_n = 0\n for p_fresh in range(pf.pop_size_d, pf.pop_size_d + pf.fresh_size_d):\n pa = parents[pa_n]\n \n# Breed\n pf.breed(pa, p_fresh, ca, ca + 1)\n \n# Run calculations\n pf.update_potential(pf.p_children[ca, :]) \n pf.rss_children[ca] = pf.get_rss() \n pf.check_improvement(pf.p_children[ca, :], pf.rss_children[ca]) \n \n pf.update_potential(pf.p_children[ca+1, :]) \n pf.rss_children[ca + 1] = pf.get_rss() \n pf.check_improvement(pf.p_children[ca+1, :], pf.rss_children[ca+1])\n \n# Increment\n ca = ca + 2\n \n#######################\n# Select Best\n#######################\n \n pf.rss_list[:pf.pop_size_d] = pf.rss[:pf.pop_size_d]\n pf.rss_list[pf.pop_size_d:] = pf.rss_children[:]\n\n rss_sorted = sort.sort_1d_dp_asc(pf.rss_list)\n rt = rss_sorted[pf.pop_size_d-1]\n\n# SELECT BEST FROM PARENTS AND CHILDREN FOR NEXT GEN\n n = 0\n for i in range(pf.pop_size_d):\n if(pf.rss[i] < rt):\n pf.p_temp[n,:] = pf.ps[i,:]\n pf.rss_temp[n] = pf.rss[i]\n n = n + 1 \n for i in range(len(pf.rss_children)):\n if(pf.rss_children[i] < rt):\n pf.p_temp[n,:] = pf.p_children[i,:]\n pf.rss_temp[n] = pf.rss_children[i]\n n = n + 1\n for i in range(pf.pop_size_d):\n if(pf.rss[i] == rt and n < pf.pop_size_d):\n pf.p_temp[n,:] = pf.ps[i,:]\n pf.rss_temp[n] = pf.rss[i]\n n = n + 1 \n for i in range(len(pf.rss_children)):\n if(pf.rss_children[i] == rt and n < pf.pop_size_d):\n pf.p_temp[n,:] = pf.p_children[i,:]\n pf.rss_temp[n] = pf.rss_children[i]\n n = n + 1\n \n# UPDATE RSS AND PS\n pf.rss[1:pf.pop_size_d] = numpy.copy(pf.rss_temp[1:pf.pop_size_d]) \n pf.ps[1:pf.pop_size_d] = numpy.copy(pf.p_temp[1:pf.pop_size_d]) \n\n# RUN EXTINCTION EVENT\n if((gen % pf.extinction_frequency) == 0 and gen > 0 and gen < pf.generations-1):\n pf.stage = 'Extinction event'\n pf.extinction()\n\n# RUN ENHANCE EVENT\n if(((gen + 1) % pf.enhance_frequency) == 0):\n pf.stage = 'Enhancing Top 10% With Steepest Descent'\n pf.enhance()\n \n# SAVE POTENTIAL\n pf.output()\n \n# Run end function\n pf.stage = 'End' \n results = pf.end() \n pf.progress = 100\n pf.time_remaining = 0.0\n progress.display(results)", "title": "" }, { "docid": "70522a0b1fa464e3d11ae4c124df489a", "score": "0.5031848", "text": "def assign01_main():\n # a sorted list of ~20 items\n list1 = [2, 3, 6, 10, 11, 17, 20, 23, 24, 29, 31, 34, 38, 39, 42, 47, 53]\n item_to_find = 34\n\n ls_res1 = linearSearch(list1, item_to_find)\n bs_res1 = binarySearch(list1, item_to_find)\n\n print(f\"\\nlist1 (size = {len(list1)}) results\")\n if ls_res1[0]:\n print(\" linear search found the item and required:\")\n else:\n print(\" linear search did not find the item and required:\")\n print(\" \", ls_res1[1], \"comparisons\")\n print(f\" {ls_res1[2]:.4f} seconds\")\n\n if bs_res1[0]:\n print(\" binary search found the item and required:\")\n else:\n print(\" binary search did not find the item and required:\")\n print(\" \", bs_res1[1], \"comparisons\")\n print(f\" {bs_res1[2]:.4f} seconds\")\n\n # a sorted list of odds from 1 to 9999\n list2 = list(range(1, 10000, 2))\n item_to_find = -1\n\n ls_res2 = linearSearch(list2, item_to_find)\n bs_res2 = binarySearch(list2, item_to_find)\n\n print(f\"\\nlist2 (size = {len(list2)}) results\")\n if ls_res2[0]:\n print(\" linear search found the item and required:\")\n else:\n print(\" linear search did not find the item and required:\")\n print(\" \", ls_res2[1], \"comparisons\")\n print(f\" {ls_res2[2]:.4f} seconds\")\n\n if bs_res2[0]:\n print(\" binary search found the item and required:\")\n else:\n print(\" binary search did not find the item and required:\")\n print(\" \", bs_res2[1], \"comparisons\")\n print(f\" {bs_res2[2]:.4f} seconds\")", "title": "" }, { "docid": "a1ff54492bc2edb6cf6f33c76b13835d", "score": "0.5031396", "text": "def cantor_composition_simple(D1,D2,f,genus):\n a1, b1 = D1\n a2, b2 = D2\n if a1 == a2 and b1 == b2:\n # Duplication law:\n d, h1, h3 = a1.xgcd(2*b1)\n a = (a1 // d)**2\n b = (b1 + h3*((f - b1**2) // d)) % (a)\n else:\n d0, _, h2 = a1.xgcd(a2)\n if d0 == 1:\n a = a1*a2\n b = (b2 + h2*a2*(b1-b2)) % (a)\n else:\n d, l, h3 = d0.xgcd(b1 + b2)\n a = (a1*a2) // (d**2)\n b = ((b2 + l*h2*(b1-b2)*(a2 // d)) + h3*((f - b2**2) // d)) % (a)\n a =a.monic()\n return (a, b)", "title": "" }, { "docid": "e865dec34e559e5075f9478ac13d88fb", "score": "0.5025185", "text": "def main2():\n s = set()\n m = None\n r3 = 0\n r0 = 0\n while True:\n r4 = r3 | 65536 # pow(2, 16)\n r3 = 2176960\n while True:\n r3 = (((r3 + (r4 & 0xff)) & 0xffffff) * 65899) & 0xffffff\n if 256 > r4:\n break\n r4 = r4 // 256\n if not s:\n print('Part 1:', r3)\n if r3 in s:\n print('Part 2:', m)\n break\n else:\n s.add(r3)\n m = r3\n #if r3 == r0:\n # break", "title": "" }, { "docid": "30f33a83c6aa73b70d5dc120b115fe91", "score": "0.5023245", "text": "def second():\n inp = get_input()\n nanobots = [parse_nanobot_data(s) for s in inp]\n\n origins = [nb.origin for nb in nanobots]\n\n x_min, x_max = min(origins, key=lambda o: o.x).x, max(origins, key=lambda o: o.x).x\n y_min, y_max = min(origins, key=lambda o: o.y).y, max(origins, key=lambda o: o.y).y\n z_min, z_max = min(origins, key=lambda o: o.z).z, max(origins, key=lambda o: o.z).z\n\n r = None\n queue = []\n heapq.heappush(queue,\n Region(x_min, y_min, z_min, x_max - x_min + 1, y_max - y_min + 1, z_max - z_min + 1, nanobots))\n while r is None:\n region = heapq.heappop(queue)\n if region.width == 1: # Assume cube here. Whatever ....\n r = region\n else:\n regions = region.split()\n [heapq.heappush(queue, region) for region in regions]\n\n # Should return distance: 98565591, bots in range: 975, pos: (17304966, 29121001, 52139624)\n return Point.manhattan_distance_to_point(Point(0, 0, 0), r.origin)", "title": "" }, { "docid": "5ba36145a9938013ea21fac30b987708", "score": "0.5023203", "text": "def main():\n sum = 0\n testvalues = list(range(1,10001))\n primes = e_sieve(10001)\n dlist1 = []\n dlist2 = []\n amicables = []\n\n for i in testvalues:\n if i in primes:\n testvalues.remove(i)\n\n for i in testvalues:\n dlist1.append((i, d(i)))\n\n for j in dlist1:\n dlist2.append((d(j[1]), j[1]))\n \n for i in range(len(dlist1)):\n if dlist1[i] == dlist2[i] and dlist1[i][0] != dlist1[i][1]:\n amicables.append(dlist1[i])\n \n for i in amicables:\n sum += i[0] + i[1]\n\n print (sum/2)", "title": "" }, { "docid": "9fa51976be7e4dafdc212c6cfc7d1ff5", "score": "0.50210065", "text": "def _compute_quotient(self):\n generators=set([])\n genus=self.genus()\n num_edges=0\n self.get_embedding_matrix(prec = 1)\n p=self._p\n num_verts=0\n v0=Vertex(self,num_verts,self._Mat_22([1,0,0,1]),determinant = 1,valuation = 0)\n V=collections.deque([v0])\n S=Graph(0,multiedges=True,weighted=True)\n Sfun = Graph(0)\n edge_list=[]\n vertex_list=[v0]\n self._num_edges=0\n num_verts+=1\n n_units=len(self.get_units_of_order())\n while len(V)>0:\n v=V.popleft()\n # found_edges=sum([len(e.links) for e in v.leaving_edges])\n # v_stabilizer=len(self._stabilizer(v.rep,as_edge=False))\n # total_edges=(p+1)/v_stabilizer\n\n E=self._BT.leaving_edges(v.rep)\n\n # print 'V = %s, E = %s, G = %s (target = %s), lenV = %s'%(num_verts,num_edges,1+num_edges-num_verts,genus,len(V))\n for e in E: #ii in range(p+1):\n # if found_edges == total_edges:\n # break\n\n #e=E[ii]\n edge_det=e.determinant()\n edge_valuation=edge_det.valuation(p)\n # if v_stabilizer == 1 and ii == len(v.leaving_edges):\n # e1 = None\n # else:\n # g,e1=self._find_equivalent_edge(e,v.leaving_edges,valuation=edge_valuation)\n\n g,e1=self._find_equivalent_edge(e,v.leaving_edges,valuation=edge_valuation)\n\n if e1 is not None: # The edge is old. We just update the links\n e1.links.append(g)\n target = self._BT.target(e)\n if e1.parity == 0:\n Sfun.add_edge(v.rep,target,label = e1.label)\n else:\n Sfun.add_edge(v.rep,target,label = e1.opposite.label)\n\n Sfun.set_vertex(target,e1.target)\n \n else: # The edge is new.\n target=self._BT.target(e)\n target.set_immutable()\n new_det=target.determinant()\n new_valuation=new_det.valuation(p)\n new_parity=new_valuation%2\n g1,v1=self._find_equivalent_vertex(target,V,valuation=new_valuation)\n if v1 is None:\n #The vertex is also new\n v1=Vertex(self,num_verts,target,determinant = new_det,valuation = new_valuation)\n vertex_list.append(v1)\n num_verts+=1\n #Add the vertex to the list of pending vertices\n V.append(v1)\n else:\n generators.add(g1[0])\n\n\n # Add the edge to the list\n new_e=Edge(self,num_edges,e,v,v1,determinant = edge_det,valuation = edge_valuation)\n new_e.links.append(self.B_one())\n Sfun.add_edge(v.rep,target,label = num_edges)\n Sfun.set_vertex(target,v1)\n\n # Add the edge to the graph\n S.add_edge(v.rep,v1.rep,num_edges)\n S.set_vertex(v.rep,v)\n S.set_vertex(v1.rep,v1)\n\n # Find the opposite edge\n opp=self._BT.opposite(e)\n opp_det=opp.determinant()\n new_e_opp=Edge(self,num_edges,opp,v1,v,opposite = new_e)\n new_e.opposite=new_e_opp\n\n if new_e.parity == 0:\n edge_list.append(new_e)\n else:\n edge_list.append(new_e_opp)\n\n v.leaving_edges.append(new_e)\n v.entering_edges.append(new_e_opp)\n v1.entering_edges.append(new_e)\n v1.leaving_edges.append(new_e_opp)\n num_edges += 1\n # found_edges+=ZZ(v_stabilizer/len(self._stabilizer(e,as_edge=True)))\n # if genus == 1 - len(vertex_list) +num_edges:\n # break\n computed_genus=Integer(1- len(vertex_list)+num_edges)\n if computed_genus != genus:\n print 'You found a bug! Please report!'\n print 'Computed genus =',computed_genus\n print 'Theoretical genus =', genus\n raise RuntimeError\n\n self._generators=generators\n self._boundary= {v.rep:v for v in vertex_list}\n self._edge_list=edge_list\n self._vertex_list=vertex_list\n self._S=S\n self._Sfun=Sfun", "title": "" }, { "docid": "b494c4d679e386d29cee6c8f7b461673", "score": "0.5020426", "text": "def __truediv__(self, other):\n \n pass", "title": "" }, { "docid": "b70fea3a4a3dac60e5ade13272c23cde", "score": "0.5020366", "text": "def Arraydiscarder(arrayx, arrayy, args):\n\tL, l, w, delta = args\n\tj=0\n\tfor j in range(len(arrayx)):\n\t\tx = arrayx[j]\n\t\ty = arrayy[j]\n\t\tif y > -L/2 - delta and y < -L/2 + delta:\n\t\t\tif x > l - w/2 and x < l + w/2: \t\t\n\t\t\t\tu = 1\n\t\t\telse:\n\t\t\t\tu = 0\n\t\t\t\tbreak\n\t\tif y > L/2 - delta and y < L/2 + delta:\n\t\t\tif x > l - w/2 and x < l + w/2: \n\t\t\t\tu = 1\n\t\t\telse:\n\t\t\t\tu = 0\n\t\t\t\tbreak\t\n\t\tif x > 0 - delta and x < 0 + delta:\n\t\t\tif y > l - w/2 and y < l + w/2: \n\t\t\t\tu = 1\n\t\t\telse:\n\t\t\t\tu = 0\n\t\t\t\tbreak\n#\t\tif x > L/2 - delta and x < L/2 + delta:\n#\t\t\tif y > l - w/2 and y < l + w/2: \n#\t\t\t\tu = 1\n#\t\t\telse:\n#\t\t\t\tu = 0\n#\n#\t\t\t\tbreak\n#\t\tif x > -L/2 - delta and x < -L/2 + delta:\n#\t\t\tif y > l - 2*w/3 and y < l + w/3: \n#\t\t\t\tu = 1\n#\t\t\telse:\n#\t\t\t\tu = 0\n#\t\t\t\tbreak\n\t\tj= j+1\n\treturn u, j", "title": "" }, { "docid": "49a9c11ea5e24061031e3a26df6e83c5", "score": "0.5016446", "text": "def LUP_decompose(self):\n if self.m != self.n:\n print(\"Nije kvadratna! Nema dekompozicije!\")\n self.a = None\n self.p = None\n return\n\n self.a = self.copy()\n self.p = self.identity()\n self.s = 0\n\n for i in range(0, self.n):\n tmp_max = -math.inf\n r = i\n for j in range(i, self.n):\n if math.fabs(self.a[j, i]) > tmp_max:\n tmp_max = math.fabs(self.a[j, i])\n r = j\n if tmp_max < self.epsilon:\n print(\"Matrica je singularna! Prekidam dekompoziciju\")\n self.a = None\n self.p = None\n return\n else:\n if r != i:\n self.a.switch_rows(r, i)\n self.p.switch_rows(r, i)\n self.s += 1\n for j in range(i+1, self.m):\n if math.fabs(self.a[i, i]) < self.epsilon:\n print(\"Matrica je singularna! Prekidam dekompoziciju\")\n self.a = None\n self.p = None\n return\n self.a[j, i] = self.a[j, i] / self.a[i, i]\n for k in range(i+1, self.n):\n self.a[j, k] = self.a[j, k] - self.a[j, i] * self.a[i, k]\n return", "title": "" }, { "docid": "fcd83622e5f7fa2b71ac05750e236dc0", "score": "0.50153434", "text": "def other(G, fs):\n raise ValueError(\"not yet implemented\") ", "title": "" }, { "docid": "ebba63df8a9229b008955f2fae175816", "score": "0.5011193", "text": "def test_pred_construction(self):\n security_parameter = 10\n one_way_perm = blum_blum_shub.blum_blum_shub(security_parameter)\n hardcorePred = blum_blum_shub.parity", "title": "" }, { "docid": "555d6211df2d3d92dcc104209d385ee1", "score": "0.50092953", "text": "def compute_k(self):", "title": "" }, { "docid": "91b0f5fcda92a08c141e4c6d4e3e9872", "score": "0.5009154", "text": "def P(alpha, m):\n if alpha >= 2*m-1:\n raise Exception\n if m%2==0:\n if alpha < m:\n if alpha%2 == 0:\n b = alpha // 2\n return [(2*a, (2*a + 2*b + 1)%(2*m)) for a in range(m)]\n else:\n b = (alpha-1) // 2\n return [(2*a, (2*a - 2*b - 1)%(2*m)) for a in range(m)]\n else:\n y = alpha - m\n pairs = [(b,(2*y-b)%(2*m)) for b in range(y)]\n pairs += [(c,(2*m+2*y-c-2)%(2*m)) for c in range(2*y+1,m+y-1)]\n pairs += [(2*m+int(-1.5-.5*(-1)**y),y),(2*m+int(-1.5+.5*(-1)**y),m+y-1)]\n return pairs\n else:\n if alpha < m-1:\n if alpha % 2 == 0:\n b = alpha // 2\n return [(2*a,(2*a+2*b+1)%(2*m)) for a in range(m)]\n else:\n b = (alpha-1) // 2\n return [(2*a,(2*a-2*b-1)%(2*m)) for a in range(m)]\n else:\n y = alpha-m+1\n pairs = [(b,2*y-b) for b in range(y)]\n pairs += [(c,2*m+2*y-c) for c in range(2*y+1,m+y)]\n pairs += [(y,m+y)]\n return pairs", "title": "" }, { "docid": "43c08286c4ed462dac13db8b66bc84b8", "score": "0.50077325", "text": "def extra(maze):\n # astar_multi(maze)\n # TODO: Write your code here\n class State:\n def __init__(self, position, gs, lastPosition):\n self.position = position\n self.gs = gs\n self.lastPosition = lastPosition\n\n objective = maze.getObjectives()\n\n def geths(a, p):\n return abs(a[0] - p[0]) + abs(a[1] - p[1])\n\n def multiAstar(fromPosition, toPosition):\n start = State(fromPosition, 0, -1)\n priqueue = [start]\n link = [start]\n explored = [fromPosition]\n\n def addPriqueue(f):\n index = 0\n while index in range(len(priqueue)):\n if (priqueue[index].gs + geths(priqueue[index].position, toPosition)) > \\\n (f.gs + geths(f.position, toPosition)):\n break\n index = index + 1\n priqueue.insert(index, f)\n\n def indexPriqueue(pos):\n index = 0\n while index in range(len(priqueue)):\n if priqueue[index].position == pos:\n return index\n index = index + 1\n return -1\n\n def indexLink(pos):\n index = 0\n while index in range(len(link)):\n if link[index].position == pos:\n return index\n index = index + 1\n return -1\n\n while priqueue[0].position != toPosition:\n neighbors = maze.getNeighbors(priqueue[0].position[0], priqueue[0].position[1])\n for indexN in range(len(neighbors)):\n if neighbors[indexN] not in explored:\n explored.append(neighbors[indexN])\n frontier = State(neighbors[indexN], priqueue[0].gs + 1, priqueue[0].position)\n addPriqueue(frontier)\n link.append(frontier)\n else:\n updateIndex = indexPriqueue(neighbors[indexN])\n # if the update state is in the priqueue, then update gs directly\n if updateIndex != -1:\n if priqueue[updateIndex].gs > priqueue[0].gs + 1:\n frontier = State(neighbors[indexN], priqueue[0].gs + 1, priqueue[0].position)\n priqueue.pop(updateIndex)\n addPriqueue(frontier)\n indexL = indexLink(neighbors[indexN])\n link.pop(indexL)\n link.append(frontier)\n else:\n indexL = indexLink(neighbors[indexN])\n if link[indexL].gs > priqueue[0].gs + 1:\n frontier = State(neighbors[indexN], priqueue[0].gs + 1, priqueue[0].position)\n addPriqueue(frontier)\n link.pop(indexL)\n link.append(frontier)\n priqueue.pop(0)\n\n astarReturn = []\n i = len(link) - 1\n while link[i].position != toPosition:\n i = i - 1\n while i != -1:\n astarReturn.append(link[i].position)\n i = indexLink(link[i].lastPosition)\n astarReturn.reverse()\n return astarReturn\n\n\n objective.insert(0, maze.getStart())\n # print(objective)\n fromList = []\n toList = []\n path = []\n length = []\n indexa = 0\n while indexa in range(len(objective)):\n indexb = indexa + 1\n while indexb < len(objective):\n fromList.append(objective[indexa])\n toList.append(objective[indexb])\n if (abs(objective[indexa][0] - objective[indexb][0]) == 1 and objective[indexa][1] == objective[indexb][1]) \\\n or (abs(objective[indexa][1] - objective[indexb][1]) == 1 and (objective[indexa][0] == objective[indexb][0])):\n result = [objective[indexa], objective[indexb]]\n # if objective[indexa] == (15, 14) and objective[indexb] == (15, 1):\n # print(\"not aaa\")\n else:\n result = multiAstar(objective[indexa], objective[indexb])\n # if objective[indexa] == (15, 14) and objective[indexb] == (15, 1):\n # print(\"aaaa\")\n path.append(result)\n length.append(len(result))\n indexb = indexb + 1\n indexa = indexa + 1\n # print(indexa,\"+1 astar\")\n\n def mst(a):\n nodeList = []\n for i in range(len(a)):\n if a[i] not in nodeList:\n nodeList.append(a[i])\n minLength = 500\n indexMin = 0\n for i in range(len(fromList)):\n if nodeList[len(nodeList)-1] == fromList[i] or nodeList[len(nodeList)-1] == toList[i]:\n if minLength > length[i]:\n minLength = length[i]\n indexMin = i\n return len(nodeList) - 1 + length[indexMin] - 1\n\n\n\n\n class Node:\n def __init__(self, position, gs, route, hs):\n self.position = position\n self.gs = gs\n self.route = route\n self.hs = hs\n\n start = Node(maze.getStart(), 0, [maze.getStart()], 0)\n multiPriqueue = []\n\n def findLength(f, t):\n index = 0\n while index in range(len(fromList)):\n if (fromList[index] == f and toList[index] == t) or (toList[index] == f and fromList[index] == t):\n return length[index] - 1\n index = index + 1\n return -1\n\n def findNodeList(list):\n nodeList = []\n for ii in range(len(objective)):\n if objective[ii] not in list:\n nodeList.append(objective[ii])\n nodeList.append(list[len(list)-1])\n\n # remove redundant\n a = []\n for i in range(len(nodeList)):\n if nodeList[i] not in a:\n a.append(nodeList[i])\n\n return a\n\n for index in range(len(objective)):\n if objective[index] != start.position:\n # for i in range(len(multiPriqueue)):\n # print(multiPriqueue[i].position)\n indexN = 0\n while indexN in range(len(multiPriqueue)):\n\n if multiPriqueue[indexN].gs > findLength(start.position, objective[index]):\n\n break\n indexN = indexN + 1\n\n multiPriqueue.insert(indexN, Node(objective[index], start.gs + findLength(start.position, objective[index]), [start.position, objective[index]], 0))\n\n\n # for i in range(len(multiPriqueue)):\n # multiPriqueue[i].hs = mst(findNodeList(multiPriqueue[i].route))\n # print(multiPriqueue[i].position, multiPriqueue[i].route, multiPriqueue[i].gs + mst(findNodeList(multiPriqueue[i].route)), multiPriqueue[i].gs, mst(findNodeList(multiPriqueue[i].route)))\n\n maxLength = 0\n maxLengthRoute = []\n trashNum = 0\n\n while 1:\n host = multiPriqueue.pop(0)\n if len(host.route) > maxLength:\n # print(host.route)\n maxLength = len(host.route)\n maxLengthRoute = host.route\n\n\n\n finish = 0\n for i in range(len(objective)):\n if objective[i] not in host.route:\n finish = 0\n break\n else:\n finish = 1\n if finish == 1:\n break\n\n # try to improve searching speed\n trashRoute = 0\n # if len(host.route) < 20:\n # restriction = round(maxLength / 3)\n # elif len(host.route) < 50:\n # restriction = maxLength - 15\n # elif len(host.route) < 90:\n # restriction = maxLength - 10\n # elif len(host.route) < 120:\n # restriction = maxLength - 10\n # elif len(host.route) < 150:\n # restriction = maxLength - 10\n # elif len(host.route) < 190:\n # restriction = maxLength - 15\n # else:\n # restriction = maxLength - 15\n\n # restriction = round(maxLength / 2)\n restriction = maxLength\n for i in range(min(restriction, len(host.route))):\n if host.route[i] != maxLengthRoute[i]:\n trashRoute = 1\n break\n if trashRoute == 1:\n # print(\"delete\", trashNum, \"trash\")\n trashNum = trashNum + 1\n continue\n\n # originalNeighbors = []\n neighbors = []\n minNeighbors = 500\n if len(neighbors) == 0:\n # print(\"find the nearest\")\n for index in range(len(objective)):\n if objective[index] != host.position and objective[index] not in host.route:\n tempNb = findLength(host.position, objective[index])\n if minNeighbors > tempNb:\n minNeighbors = tempNb\n minIndex = index\n neighbors.append(objective[minIndex])\n\n\n for index in range(len(neighbors)):\n\n\n\n temp = Node(neighbors[index], host.gs + findLength(host.position, neighbors[index]), host.route+[neighbors[index]], 0)\n temp.hs = mst(findNodeList(temp.route))\n # t0 = time.clock()\n\n if len(multiPriqueue):\n for i in range(len(multiPriqueue)):\n if multiPriqueue[i].gs + multiPriqueue[i].hs >= temp.gs + temp.hs:\n break\n\n multiPriqueue.insert(i, temp)\n\n finalPath = host.route\n # print(finalPath, len(multiPriqueue), \"final\")\n\n finalReturn = []\n\n\n\n for i in range(len(finalPath)-1):\n for ii in range(len(path)):\n if fromList[ii] == finalPath[i] and toList[ii] == finalPath[i+1]:\n finalReturn = finalReturn + path[ii]\n finalReturn.pop()\n break\n elif toList[ii] == finalPath[i] and fromList[ii] == finalPath[i+1]:\n\n path[ii].reverse()\n finalReturn = finalReturn + path[ii]\n path[ii].reverse()\n finalReturn.pop()\n break\n\n\n finalReturn.append(finalPath[len(finalPath)-1])\n # print(finalReturn)\n\n\n return finalReturn", "title": "" }, { "docid": "8284513df19115b1059f755f1cc8acb7", "score": "0.50072235", "text": "def eq0405():", "title": "" }, { "docid": "a8745c8d5a4558a30d9efb77fccf661f", "score": "0.5005822", "text": "def reconcile_layers_iter(a1=[5, 7, 3], a2=[7, 8]):\n res_arr = []\n l_arr = []\n r_arr = []\n while sum(a1) > 0 or sum(a2) > 0:\n for i in range(len(a1)):\n if a1[i] > 0:\n num1 = a1[i]\n break\n for j in range(len(a2)):\n if a2[j] > 0:\n num2 = a2[j]\n break\n minn = min(num1, num2)\n a1[i] -= minn\n a2[j] -= minn\n res_arr.append(minn)\n l_arr.append(i+1)\n r_arr.append(j+1)\n return res_arr, l_arr, r_arr", "title": "" }, { "docid": "bc5c576d9bc7596637cf857f7e3ec1c8", "score": "0.4996447", "text": "def applyRp(A,B, Rup, Rvp, Leg = \"I\", forwhat = 'trg', RABs = [1]*6):\n if forwhat == 'trg':\n Alist = [A, B, A, B]\n [Arot1, Arot2, _, _], _ = rotateLattice(Alist, Leg)\n Arot1mod = ncon([Arot1, Rup],[[-1,-2,1,-4],[1,-3]])\n Arot2mod = ncon([Arot2, Rvp],[[1,-2,-3,-4],[-1,1]])\n Amodlist = [Arot1mod, Arot2mod, Arot1mod, Arot2mod]\n revLeg = {\"I\":\"I\", \"II\":\"IV\", \"III\":\"III\", \"IV\":\"II\"}\n [Amod, Bmod, _, _], _ = rotateLattice(Amodlist, revLeg[Leg])\n return Amod, Bmod\n elif forwhat == 'hotrg':\n # Alist = [A, A, B, B]\n # [Arot1, Arot2, _, _], _ = rotateLattice(Alist, Leg)\n # Arot1mod = ncon([Arot1, Rup],[[-1,-2,1,-4],[1,-3]])\n # Arot2mod = ncon([Arot2, Rvp],[[1,-2,-3,-4],[-1,1]])\n # Amodlist = [Arot1mod, Arot2mod, Arot2mod, Arot1mod]\n # revLeg = {\"II\":\"IV\", \"IV\":\"II\"}\n # Amod = rotateLattice(Amodlist, revLeg[Leg])[0][0]\n # Bmod = rotateLattice(Amodlist, revLeg[Leg])[0][2]\n RA, RB, RAl, RAr, RBl, RBr = RABs\n if Leg == \"II\":\n Amod = ncon([A, Rup], [[-1, -2, -3, 1],[1, -4]])\n Bmod = ncon([B, Rvp], [[-1, 1, -3, -4],[-2, 1]])\n # update RA and RB matrices\n if type(RA).__module__.split(\".\")[0] != 'abeliantensors':\n RA = 1.0 * Rup\n else:\n RA = ncon([RA, Rup], [[-1,1],[1,-2]])\n if type(RB).__module__.split(\".\")[0] != 'abeliantensors':\n RB = 1.0 * Rvp.transpose([1,0])\n else:\n RB = ncon([RB, Rvp], [[-1,1],[-2,1]])\n elif Leg == \"IV\":\n Amod = ncon([A, Rvp], [[-1, -2, -3, 1],[-4, 1]])\n Bmod = ncon([B, Rup], [[-1, 1, -3, -4],[1, -2]])\n # update RA and RB matrices\n if type(RA).__module__.split(\".\")[0] != 'abeliantensors':\n RA = 1.0 * Rvp.transpose([1,0])\n else:\n RA = ncon([RA, Rvp], [[-1,1],[-2,1]])\n if type(RB).__module__.split(\".\")[0] != 'abeliantensors':\n RB = 1.0 * Rup\n else:\n RB = ncon([RB, Rup], [[-1,1],[1,-2]])\n elif Leg == \"I\":\n Amod = ncon([A, Rup, Rvp], [[1, -2, 2, -4], [2, -3], [-1, 1]])\n Bmod = B * 1.0\n # update RAl, RAr matrices\n if type(RAl).__module__.split(\".\")[0] != 'abeliantensors':\n RAl = 1.0 * Rvp.transpose([1,0])\n else:\n RAl = ncon([RAl, Rvp], [[-1,1],[-2,1]])\n if type(RAr).__module__.split(\".\")[0] != 'abeliantensors':\n RAr = 1.0 * Rup\n else:\n RAr = ncon([RAr, Rup], [[-1,1],[1,-2]])\n elif Leg == \"III\":\n Amod = A * 1.0\n Bmod = ncon([B, Rup, Rvp], [[1,-2, 2, -4], [1, -1], [-3, 2]])\n # update RBl, RBr matrices\n if type(RBl).__module__.split(\".\")[0] != 'abeliantensors':\n RBl = 1.0 * Rup\n else:\n RBl = ncon([RBl, Rup], [[-1,1],[1,-2]])\n if type(RBr).__module__.split(\".\")[0] != 'abeliantensors':\n RBr = 1.0 * Rvp.transpose([1,0])\n else:\n RBr = ncon([RBr, Rvp], [[-1,1],[-2,1]])\n else:\n raise ValueError(\"Leg can only be chosen from [II, IV, I, III]\" +\n \"in this gilt-hotrg-imp version\")\n RABs = [RA, RB, RAl, RAr, RBl, RBr]\n return Amod, Bmod, RABs\n else:\n raise ValueError(\"forwhat should be chosen between trg or hotrg\")", "title": "" }, { "docid": "1c8b9269279ae3c517b6925198883e6f", "score": "0.49918652", "text": "def D(z):", "title": "" }, { "docid": "9431f55afcd37a2a6831ab6c50584bd9", "score": "0.49877673", "text": "def work(self):", "title": "" }, { "docid": "b6c751385a3c827e90abc12f2a0dd137", "score": "0.4980513", "text": "def Solve(self):\n pass", "title": "" }, { "docid": "a9e8267bc51ca8a5ad636fd2734d816d", "score": "0.49778596", "text": "def clno(A, a, B, b):\n logging.debug(\"---------------------------------------------------\")\n logging.debug(\"clno(): In python implmentation of Tim's derivation\")\n logging.debug(\"---------------------------------------------------\")\n #logging.debug(\"Inputs are:\")\n #logging.debug(\" A:\\n{}\".format(A))\n #logging.debug(\" a:\\n{}\".format(a))\n #logging.debug(\" B:\\n{}\".format(B))\n #logging.debug(\" b:\\n{}\".format(b))\n ApB = (A + B)\n logging.debug(\"ApB: \\n{}\".format(ApB))\n ApB_det = np.linalg.det(ApB)\n\n ApB_i = np.linalg.inv(ApB)\n amb = a - b\n\n result = 6 * np.log(2*np.pi)\n logging.debug(\"Added 6log(2pi): {}\".format(result))\n lndet = np.log(ApB_det)\n logging.debug(\"Log of det(ApB): {}\".format(lndet))\n result += np.log(ApB_det)\n logging.debug(\"result so far: {}\".format(result))\n exponent = (np.dot(amb, np.dot(ApB_i, amb)))\n logging.debug(\"matrix exponent is: {}\".format(exponent))\n result += exponent\n logging.debug(\"result so far: {}\".format(result))\n result *= -0.5\n\n# lnoverlap = -0.5 * (np.dot(amb, np.dot(ApB_i, amb)))\n# lnoverlap -= 3 * np.log(2 * np.pi)\n# lnoverlap -= 0.5 * np.log(ApB_det)\n\n #logging.debug(\"Final result is: {}\".format(lnoverlap))\n\n logging.debug(\"---------------------------------------------------\")\n logging.debug(\"Final result is: {}\".format(result))\n logging.debug(\"---------------------------------------------------\")\n return result", "title": "" }, { "docid": "6aa1322220f9f50973886ef25d2f9b1a", "score": "0.49754128", "text": "def thomas1d(N, b, a, c, q):\n\n l = np.zeros(N, dtype='float64')\n u = np.zeros(N, dtype='float64')\n d = np.zeros(N, dtype='float64')\n y = np.zeros(N, dtype='float64')\n x = np.zeros(N, dtype='float64')\n\n #print 'b = ', b\n #print 'a = ', a\n #print 'c = ', c\n\n d[0] = a[0]\n u[0] = c[0]\n for i in np.arange(N - 2):\n l[i] = b[i] / d[i]\n #print 'b[i] = %f, d[i] = %f, l[i] = %f' % (b[i], d[i], l[i])\n d[i + 1] = a[i + 1] - l[i] * u[i]\n #print 'a[i+1] = %f, l[i] = %f, u[i] = %f => d[i+1 ] %f' % (a[i + 1],\n # l[i], u[i],\n # d[i + 1])\n u[i + 1] = c[i + 1]\n\n l[N - 2] = b[N - 2] / d[N - 2]\n d[N - 1] = a[N - 1] - l[N - 2] * u[N - 2]\n\n # Forward substitution\n y[0] = q[0]\n for i in np.arange(1, N):\n y[i] = q[i] - l[i - 1] * y[i - 1]\n #print 'q[i] = %f, l[i-1] = %f, y[i-1] = %f => y[i] = q - l[i-1] * y[i-1] = %f' %\\\n # (q[i], l[i - 1], y[i - 1], y[i])\n\n # Backward substitution\n x[N - 1] = y[N - 1] / d[N - 1]\n for i in np.arange(N - 2, -1, -1):\n x[i] = (y[i] - u[i] * x[i + 1]) / d[i]\n #print 'y[i] = %f, u[i] * x[i + 1] = %f, d[i] = %f => x=(y-ux)/d = %f' %\\\n # (y[i], u[i] * x[i + 1], d[i], x[i])\n\n return x, (l, u, d, y)", "title": "" }, { "docid": "7f1c12681ad259b3cb64cb0f183c1fdd", "score": "0.49683738", "text": "def match_features(features1, features2, x1, y1, x2, y2):\n #############################################################################\n # TODO: YOUR CODE HERE\n\n #hyper parameter\n threshold = 0.9\n Sorting = True\n\n ratio = np.zeros([features1.shape[0], 1])\n indexer = np.zeros ([features1.shape[0], 1])\n indexer[:] = np.NaN\n #for order in range(0, 1):\n\n\n for order in range (0,features1.shape[0]):\n sys.stdout.write('\\r' +\" \"+ str(int(order/features1.shape[0]*100+1)) + \"%\")\n\n Loss = np.zeros([features2.shape[0], 1])\n for order2 in range (0,features2.shape[0]):\n Loss[order2, 0] = sum(abs(features1[order,:]-features2[order2,:]))\n '''\n for order3 in range(0,128):\n Loss[order2,0] = Loss[order2,0] + abs(features1[order,order3]-features2[order2,order3])\n #Loss[order2, 0] = Loss[order2, 0] + features1[order, order3] - features2[order2, order3]\n '''\n\n Loss2 = np.sort(Loss.flatten())\n #Loss2 = np.sort(Loss)\n\n a= Loss2[0]/Loss2[1]\n\n #print(a)\n if a <threshold:\n ratio[order, 0] = a\n indexer[order, 0] = np.argmin(Loss)\n print()\n\n dummy = np.arange(indexer.shape[0]).T\n dummy = np.reshape(dummy,(dummy.shape[0],1))\n\n indexer = np.concatenate((dummy,indexer,ratio),axis=1)\n\n for num in range(indexer.shape[0]-1, -1,-1):\n if indexer [num,2]==0:\n indexer = np.delete(indexer,num,0)\n\n if Sorting ==True:\n indexer = indexer[indexer[:,2].argsort()]\n\n matches = indexer[:,0:2].astype(int)\n confidences = indexer[:,2]\n\n\n #print(matches)\n\n\n\n\n\n\n\n\n\n\n #############################################################################\n\n #raise NotImplementedError('`match_features` function in ' +\n # '`student_feature_matching.py` needs to be implemented')\n\n #############################################################################\n # END OF YOUR CODE #\n #############################################################################\n return matches, confidences", "title": "" }, { "docid": "c98c4c757ceafd66c160a204f853d257", "score": "0.49674457", "text": "def double(a):\n \"\"\"{ pre True\n post forall 0 <= i < len(a), b[i] == a[i] * 2\n return b\n }\"\"\"\n b = []\n x = 0\n \"\"\"{ 1.OK forall 0 <= i < x, b[i] == a[i] * 2 foralli\n 2.OK x == len(b) algebra\n }\"\"\"\n while x != len(a) :\n \"\"\"{ invariant (forall 0 <= i < x, b[i] == a[i] * 2) and (x == len(b))\n modifies x, b\n }\"\"\"\n b.append(a[x]*2)\n \"\"\"{ 1.OK (forall 0 <= i < x, b[i] == a[i] * 2) and (x == len(b_old)) premise\n 2.OK x == len(b_old) ande 1\n 10.OK forall 0 <= i < x, b[i] == a[i] * 2 ande 1\n 3.OK len(b) == len(b_old) + 1 premise\n 4.OK x == len(b)-1 algebra 2 3\n 5.OK b[len(b)-1] == a[x]*2 premise\n 6.OK b[x] == a[x]*2 subst 4 5\n 7.OK forall 0 <= i < x+1, b[i] == a[i] * 2 foralli 10 6\n 8.OK len(b) == x + 1 algebra 4\n 9.OK return 7 8\n }\"\"\"\n x = x + 1\n \"\"\"{ 1.OK forall 0 <= i < x_old +1, b[i] == a[i] * 2 premise\n 2.OK x == x_old +1 premise\n 3.OK forall 0 <= i < x, b[i] == a[i] * 2 subst 2 1\n 4.OK len(b) == x_old + 1 premise\n 5.OK len(b) == x subst 2 4\n 6.OK x == len(b) algebra 5\n #7.OK (forall 0 <= i < x, b[i] == a[i] * 2) and (x == len(b)) andi 3 6\n 8. return 3 6\n }\"\"\"\n\n \"\"\"{ 1.OK (forall 0 <= i < x, b[i] == a[i] * 2) and (x == len(b)) premise\n 3.OK not(x != len(a)) premise\n 4.OK x == len(a) algebra\n 6.OK forall 0 <= i < x, b[i] == a[i] * 2 ande 1\n 5.OK forall 0 <= i < len(a), b[i] == a[i] * 2 subst 4 6\n }\"\"\"\n return b", "title": "" }, { "docid": "bdd7ebdabf4b54ad2a6a7bccef9a5a74", "score": "0.49661937", "text": "def solve(s, a, b, c):\n def score(a, b, _taken, turn, prevFailed=False):\n \"\"\"\n taken is a set of unusable rooms.\n It includes both rooms under construction and already-painted rooms.\n turn is 0 for Alma and 1 for Berthe.\n \"\"\"\n #print(a, b, _taken, turn)\n taken = set(_taken)\n possible = [] # Possible next moves.\n \n if turn == 0:\n x = a\n else:\n x = b\n \n if x[1] > 1:\n new = (x[0], x[1] - 1)\n if new not in taken:\n possible.append(new)\n \n if x[1] < x[0] * 2 - 1:\n new = (x[0], x[1] + 1)\n if new not in taken:\n possible.append(new)\n \n if x[1] % 2 == 0:\n if x[0] > 1:\n new = (x[0] - 1, x[1] - 1)\n if new not in taken:\n possible.append(new)\n else:\n if x[0] < s:\n new = (x[0] + 1, x[1] + 1)\n #print(\"bOOOOOOOOOm\", new in taken)\n if new not in taken:\n possible.append(new)\n \n #print(\" possible:\", possible)\n \n p = len(possible)\n if p == 0:\n if prevFailed:\n return 0\n return score(a, b, tuple(taken), 1 - turn, True)\n \n if turn == 0:\n scores = []\n for i in range(p):\n ac = possible[i]\n taken.add(ac)\n scores.append(score(ac, b, tuple(taken), 1 - turn))\n taken.remove(ac)\n return max(scores) + 1\n \n else:\n scores = []\n for i in range(p):\n bc = possible[i]\n taken.add(bc)\n scores.append(score(a, bc, tuple(taken), 1 - turn))\n taken.remove(bc)\n return min(scores) - 1\n \n taken = [a, b]\n taken.extend(list(c))\n ans = score(a, b, tuple(taken), 0)\n \n return ans", "title": "" }, { "docid": "811ffcd6bcd84f21006749643f7fdc19", "score": "0.4965829", "text": "def create_modulus(risk_funs, N1, N2, n1, n2, margin, upper_bound, g, x1, x2):\n for index, function in enumerate(risk_funs):\n if function == \"kaplan_markov\":\n if index == 0:\n T1 = lambda delta: 2*n1*np.log(1 + margin/(2*upper_bound-margin)* \\\n (N1 + N2)/N1*delta)\n else:\n T2 = lambda delta: 2*n2*np.log(1 + (N1 + N2)/N2*delta)\n\n elif function == \"kaplan_kolmogorov\":\n if index == 0:\n T1 = lambda delta: 2*sum(np.log(1 + np.divide(margin*(N1+N2), \\\n (2*upper_bound - margin)*np.multiply(np.array(x1) + \\\n g, N1 - np.array(range(len(x1)))))*delta))\n else:\n T2 = lambda delta: 2*sum(np.log(1 + np.divide(N1 + N2, np.multiply(np.array(x2) \\\n + g, N2 - np.array(range(len(x2)))))*delta))\n \n elif function == \"kaplan_wald\": \n if index == 0:\n T1 = lambda delta: 2*n1*(np.log(1 + margin/(2*upper_bound - margin)* \\\n (N1 + N2)/N1*delta) + np.log(1 + g*margin/(2* \\\n upper_bound - margin)*(N1 + N2)/N1*delta))\n else:\n T2 = lambda delta: 2*n2*(np.log(1+(N1 + N2)/N2*delta) + \\\n np.log(1 + g*(N1 + N2)/N2*delta))\n else: \n return None\n\n if N1 == 0:\n T1 = lambda delta: 0\n\n if N2 == 0: \n T2 = lambda delta: 0\n\n return lambda delta: T1(delta) + T2(delta)", "title": "" } ]
8ffc57df12cae62a251b68661b73e7b5
Construct an object from a parsed response.
[ { "docid": "0e9dde3a2dac743da0d5c1f024147fc1", "score": "0.0", "text": "def from_json(cls, attributes):\n return cls(**{to_snake_case(k): v for k, v in attributes.items()})", "title": "" } ]
[ { "docid": "38fb22532f23588d56b0cb3828560113", "score": "0.71406657", "text": "def __init__(self, res):\n self.fromResponseObj(res)", "title": "" }, { "docid": "38fb22532f23588d56b0cb3828560113", "score": "0.71406657", "text": "def __init__(self, res):\n self.fromResponseObj(res)", "title": "" }, { "docid": "38fb22532f23588d56b0cb3828560113", "score": "0.71406657", "text": "def __init__(self, res):\n self.fromResponseObj(res)", "title": "" }, { "docid": "c73792bd0fba0062485fe08cdbe17a80", "score": "0.6973285", "text": "def from_str(cls, response_str):\n #import pdb; pdb.set_trace()\n response_splited = response_str.split('\\n')\n if len(response_splited) < 3:\n raise ResponseError()\n response_splited = response_splited[:-2]\n status = response_splited[0]\n \n status = cls.check_status(status)\n msg = ''\n metrics = []\n \n if status:\n data_lines = response_splited[1:]\n metrics = cls.parse_data(data_lines)\n assert metrics is not None, 'Response class got none metrics from {:}'.format(response_splited)\n else:\n msg = response_splited[1]\n return Response(status, metrics, msg)", "title": "" }, { "docid": "6e32306b49fbdbcaec5a03cccd5b9b9b", "score": "0.6627292", "text": "def __init__(self, oResponse):\n if oResponse is not None:\n # Read the whole response (so we can log it).\n sBody = oResponse.read();\n\n # Check the content type.\n sContentType = oResponse.getheader('Content-Type');\n if sContentType is None or sContentType != 'application/x-www-form-urlencoded; charset=utf-8':\n testboxcommons.log('SERVER RESPONSE: Content-Type: %s' % (sContentType,));\n testboxcommons.log('SERVER RESPONSE: %s' % (sBody.rstrip(),))\n raise testboxcommons.TestBoxException('Invalid server response type: \"%s\"' % (sContentType,));\n\n # Parse the body (this should be the exact reverse of what\n # TestBoxConnection.postRequestRaw).\n ##testboxcommons.log2('SERVER RESPONSE: \"%s\"' % (sBody,))\n self._dResponse = urlparse.parse_qs(sBody, strict_parsing=True);\n\n # Convert the dictionary from 'field:values' to 'field:value'. Fail\n # if a field has more than one value (i.e. given more than once).\n for sField in self._dResponse:\n if len(self._dResponse[sField]) != 1:\n raise testboxcommons.TestBoxException('The field \"%s\" appears more than once in the server response' \\\n % (sField,));\n self._dResponse[sField] = self._dResponse[sField][0]\n else:\n # Special case, dummy response object.\n self._dResponse = dict();\n # Done.", "title": "" }, { "docid": "c77314265a9822753c9b75c1cedae329", "score": "0.6600541", "text": "def __init__(self, response_dict={}):\n self.code = response_dict.get('code')\n self.name = response_dict.get('name')\n self.series = response_dict.get('series')\n self.total_cards = response_dict.get('totalCards')\n self.standard_legal = response_dict.get('standardLegal')\n self.release_date = response_dict.get('releaseDate')", "title": "" }, { "docid": "ef5ce99c9dfd9b7d877aff561376bcfe", "score": "0.6592119", "text": "def __init__(self, resp):\n self.resp_json = None\n try:\n self.resp_json = resp.json()\n except ValueError:\n pass\n self.resp_text = resp.text", "title": "" }, { "docid": "7cb162c9b79340b9d9e49bde3da23107", "score": "0.6589386", "text": "def make(cls, gitkit_response):\n gitkit_response = dict(gitkit_response) # Make a copy for mutations.\n matches, error = cls.match(gitkit_response)\n if not matches:\n raise ValueError(\n 'Unable to create %s; error: %s' % (cls.__name__, error))\n\n # response_body must be present. It contains a json string we must\n # convert to plain old Python values.\n response_body = gitkit_response.get(_GITKIT_RESPONSE_KEY_RESPONSE_BODY)\n if response_body is None:\n raise ValueError(\n 'Unable to parse GITKit OOB response: response_body missing')\n\n try:\n gitkit_response[_GITKIT_RESPONSE_KEY_RESPONSE_BODY] = (\n transforms.loads(response_body))\n except Exception, e:\n raise ValueError(\n 'Unable to parse %s from GITKit OOB response: got %s' % (\n _GITKIT_RESPONSE_KEY_RESPONSE_BODY, response_body))\n\n return cls(**gitkit_response)", "title": "" }, { "docid": "080833dd3fd269d4ff82227c6d9ef898", "score": "0.65586996", "text": "def fromResponseObj(self, res):\n if type(res) is dict and res.get('data') is not None:\n self._id = res[\"data\"][\"_id\"]\n self.trackingNumber = res[\"data\"][\"trackingNumber\"]\n self.message = res[\"data\"][\"message\"]\n else:\n self.message = str(res)\n self._id = self.trackingNumber = None", "title": "" }, { "docid": "c42ddc1348dbbf9fd232ecb45f1fc804", "score": "0.6558021", "text": "def __init__(\n self,\n response\n ):\n self.__npod_uuid = read_value(\n \"nPod.uuid\", response, str, False)\n self.__spu_serial = read_value(\n \"spu.serial\", response, str, False)\n self.__wwn = read_value(\n \"wwn\", response, str, True)\n self.__old_firmware_rev = read_value(\n \"oldFirmwareRev\", response, str, True)\n self.__new_firmware_rev = read_value(\n \"newFirmwareRev\", response, str, True)\n self.__vendor = read_value(\n \"vendor\", response, str, True)\n self.__model = read_value(\n \"model\", response, str, True)\n self.__eula_url = read_value(\n \"eulaURL\", response, str, True)", "title": "" }, { "docid": "494c1a8c982acbe610d70ee3216b83dc", "score": "0.6505576", "text": "def fromResponseObj(self, res):\n if type(res) is dict and res.get(\"data\") is not None:\n self.data = res[\"data\"]\n self.message = res.get(\"message\")\n else:\n self.message = str(res)\n self.data = None", "title": "" }, { "docid": "818de5889c406056908e974962e06856", "score": "0.6446237", "text": "def deserialize(self, string):\n result = json.loads(string)\n for k in ['url', 'status_code', 'matched', 'response_time', 'error', 'scrape_time']:\n setattr(self, k, result[k])\n return self", "title": "" }, { "docid": "52135e66becbf08040f9b192e09418e5", "score": "0.6426448", "text": "def parse(self, response):", "title": "" }, { "docid": "52135e66becbf08040f9b192e09418e5", "score": "0.6426448", "text": "def parse(self, response):", "title": "" }, { "docid": "e40fa6cc9ac094effbd1646b87c6b970", "score": "0.64180404", "text": "def __init__(\n self,\n response: dict\n ):\n\n self.__uuid = read_value(\n \"uuid\", response, str, True)\n self.__name = read_value(\n \"name\", response, str, True)\n self.__note = read_value(\n \"note\", response, str, True)\n self.__email = read_value(\n \"email\", response, str, True)\n self.__first_name = read_value(\n \"firstName\", response, str, True)\n self.__last_name = read_value(\n \"lastName\", response, str, True)\n self.__mobile_phone = read_value(\n \"mobilePhone\", response, str, True)\n self.__business_phone = read_value(\n \"businessPhone\", response, str, True)\n self.__inactive = read_value(\n \"inactive\", response, bool, True)\n self.__group_uuids = read_value(\n \"groups.uuid\", response, str, False)\n self.__preferences = read_value(\n \"preferences\", response, UserPreferences, False)\n self.__support_contact_id = read_value(\n \"supportContactID\", response, str, False)\n self.__policy_uuids = read_value(\n \"policies.uuid\", response, str, False)\n self.__change_password = read_value(\n \"changePassword\", response, bool, False)\n self.__change_password_reason = read_value(\n \"changePasswordReason\", response, ChangePasswordReason, False)", "title": "" }, { "docid": "dad1692329a7948d4e7388576f3ab15c", "score": "0.63826615", "text": "def parse(cls, json_data):\n dictionary = json.loads(json_data)\n return cls.build(dictionary)", "title": "" }, { "docid": "afe1147b366a839c15693431bdd23d25", "score": "0.6380625", "text": "def __init__(self, resp):\n\n self.resp = resp\n self.name = resp['name']\n self.type = resp['metadata']['operationType']\n\n self._parse_timing()", "title": "" }, { "docid": "8b29141e105f8a79e6b6d8a1ce58c16a", "score": "0.63520694", "text": "def __init__(self, response):\n self.response = response", "title": "" }, { "docid": "576ab0bbb5d4e2bc3f9fe85ebcd5ccc2", "score": "0.62402546", "text": "def from_dict(cls, data):\n response = cls()\n response.sender = data.get('from')\n response.content = data.get('text')\n response.subject = data.get('subject')\n return response", "title": "" }, { "docid": "785113835c1f61b18cdd4127b7da8910", "score": "0.62264836", "text": "def __init__(self, graph, response, profile=False):\n self.graph = graph\n self.header = []\n self.result_set = []\n\n # in case of an error an exception will be raised\n self._check_for_errors(response)\n\n if len(response) == 1:\n self.parse_statistics(response[0])\n elif profile:\n self.parse_profile(response)\n else:\n # start by parsing statistics, matches the one we have\n self.parse_statistics(response[-1]) # Last element.\n self.parse_results(response)", "title": "" }, { "docid": "7a394c2bbb8a4f870902b51e5b04e836", "score": "0.621284", "text": "def __init__(\n self,\n response: dict\n ):\n self.__spu_serial = read_value(\n \"spu.serial\", response, str, False)\n self.__wwn = read_value(\n \"wwn\", response, str, True)\n self.__media_type = read_value(\n \"mediaType\", response, str, True)\n self.__position = read_value(\n \"position\", response, int, True)\n self.__state = read_value(\n \"stateEnum\", response, PhysicalDriveState, True)\n self.__unadmitted = read_value(\n \"unadmitted\", response, bool, True)\n self.__size_bytes = read_value(\n \"sizeBytes\", response, int, True)\n self.__vendor = read_value(\n \"vendor\", response, str, True)\n self.__model = read_value(\n \"model\", response, str, True)\n self.__serial = read_value(\n \"serial\", response, str, True)\n self.__firmware_revision = read_value(\n \"firmwareRevision\", response, str, True)\n self.__interface_type = read_value(\n \"interfaceType\", response, str, True)\n self.__update_failure = read_value(\n \"updateFailure\", response, str, False)", "title": "" }, { "docid": "dc520044947159cc4401d26572c0f57b", "score": "0.61814946", "text": "def __init__(self, response, stream):\n\t\tself.response = response\n\t\tself.stream = stream", "title": "" }, { "docid": "9a508985520ddc7d8a00a42f86e370dd", "score": "0.6142405", "text": "def parse_response(self, resp):\n p, u = self.getparser()\n p.feed(resp.text)\n p.close()\n return u.close()", "title": "" }, { "docid": "c53c234f6617eb3ed01fd9d0f866ecb0", "score": "0.6111229", "text": "def parse_response(self, response):\n return xml.to_dict(response, self.responses, self.do_raise,\n self.delimiter, self.encapsulator, self.uniform)", "title": "" }, { "docid": "50b550033ad387bf154ce2232ff46087", "score": "0.61028934", "text": "def _parseResponse(self, data):\r\n return self._parseAndReturnResult(data)", "title": "" }, { "docid": "7861f98ef8154b838fafceceb634b5c3", "score": "0.6091204", "text": "def __init__(\n self,\n response: dict\n ):\n self.__items = read_value(\n \"items\", response, User, False)\n self.__more = read_value(\n \"more\", response, bool, False)\n self.__total_count = read_value(\n \"totalCount\", response, int, False)\n self.__filtered_count = read_value(\n \"filteredCount\", response, int, False)", "title": "" }, { "docid": "c1027f59f44a17046d4e27733f52bbcc", "score": "0.6081564", "text": "def load(self, response):\n self._doc_id = response.get_elements()[\"_id\"]\n self._rev_id = response.get_elements()[\"_rev\"]\n self._values = response.get_elements()[\"value\"]", "title": "" }, { "docid": "2706b3219a7e22002bf1379e64816146", "score": "0.60575765", "text": "def _unpack_response(response, cursor_id=None, as_class=dict, tz_aware=False):\r\n response_flag = struct.unpack(\"<i\", response[:4])[0]\r\n if response_flag & 1:\r\n # Shouldn't get this response if we aren't doing a getMore\r\n assert cursor_id is not None\r\n\r\n raise InterfaceError(\"cursor id '%s' not valid at server\" %\r\n cursor_id)\r\n elif response_flag & 2:\r\n error_object = bson.BSON(response[20:]).decode()\r\n if error_object[\"$err\"] == \"not master\":\r\n raise DatabaseError(\"master has changed\")\r\n raise DatabaseError(\"database error: %s\" %\r\n error_object[\"$err\"])\r\n\r\n result = {}\r\n result[\"cursor_id\"] = struct.unpack(\"<q\", response[4:12])[0]\r\n result[\"starting_from\"] = struct.unpack(\"<i\", response[12:16])[0]\r\n result[\"number_returned\"] = struct.unpack(\"<i\", response[16:20])[0]\r\n result[\"data\"] = bson.decode_all(response[20:], as_class, tz_aware)\r\n assert len(result[\"data\"]) == result[\"number_returned\"]\r\n return result", "title": "" }, { "docid": "e220c5044bf505d42213666b8d673c53", "score": "0.6053045", "text": "def fromResponseObj(self, res):\n if type(res) is dict and res.get('message') is not None:\n self.message= res[\"message\"]\n else:\n self.message = str(res)", "title": "" }, { "docid": "9b549981aaeda38589405e58051a084b", "score": "0.6036484", "text": "def _from_parsed_uri(cls, result):\n\n # decode label from uri path\n label = result.path\n if label.startswith(\"/\") and len(label) > 1:\n label = unquote(label[1:])\n else:\n raise cls._uri_error(\"missing label\")\n\n # extract old-style issuer prefix\n if \":\" in label:\n try:\n issuer, label = label.split(\":\")\n except ValueError: # too many \":\"\n raise cls._uri_error(\"malformed label\")\n else:\n issuer = None\n if label:\n label = label.strip() or None\n\n # parse query params\n params = dict(label=label)\n for k, v in parse_qsl(result.query):\n if k in params:\n raise cls._uri_error(\"duplicate parameter (%r)\" % k)\n params[k] = v\n\n # synchronize issuer prefix w/ issuer param\n if issuer:\n if \"issuer\" not in params:\n params['issuer'] = issuer\n elif params['issuer'] != issuer:\n raise cls._uri_error(\"conflicting issuer identifiers\")\n\n # convert query params to constructor kwds, and call constructor\n return cls(**cls._adapt_uri_params(**params))", "title": "" }, { "docid": "8526a5a9dcf51db0d56d7ac46887a443", "score": "0.6030578", "text": "def _parse_bing_response(self, query, results, offset):\n response = Response()\n response.version = 'json'\n response.feed.setdefault('title', \"Results from %s for: %s\" % (self.engineName, query.search_terms))\n response.feed.setdefault('link', \"\")\n response.feed.setdefault('description', \"%s results from %s\" % (self.source, self.engineName))\n response.namespaces.setdefault(\"opensearch\", \"http://a9.com/-/spec/opensearch/1.1/\")\n \n \n if self.source == \"Web\":\n entries = self._parse_web_results(results) \n \n elif self.source == \"Image\":\n entries = self._parse_image_results(query, results)\n \n elif self.source == \"News\":\n entries = self._parse_news_results(results)\n \n elif self.source == \"RelatedSearch\":\n entries = self._parse_related_results(query, results)\n \n elif self.source == \"Video\":\n entries = self._parse_video_results(query, results)\n \n elif self.source == \"SpellingSuggestions\":\n entries = self._parse_spelling_results(query, results)\n \n for entry in entries:\n response.entries.append(entry)\n \n response.feed.setdefault('opensearch_totalresults', len(entries))\n response.feed.setdefault('opensearch_startindex', offset)\n response.feed.setdefault('opensearch_itemsperpage', self.resultsPerPage)\n\n return response", "title": "" }, { "docid": "553ac2e8a41c61ff390158db50431ca8", "score": "0.6029541", "text": "def parse_response(response):\n status = response.status\n produces = [response.content.mimeType]\n schema = json.loads(response.content.text, cls=YAMLSchemaDecoder)\n return dict(status=status, produces=produces, schema=schema)", "title": "" }, { "docid": "78b69ea24ff8a7c2939d5086c9e23bcb", "score": "0.60275006", "text": "def __init__(self, response=None, status=200, headers=None, mimetype=None,\n content_type=None):\n if response is None:\n self.response = []\n elif isinstance(response, basestring):\n self.response = [response]\n else:\n self.response = iter(response)\n if not headers:\n self.headers = Headers()\n elif isinstance(headers, Headers):\n self.headers = headers\n else:\n self.headers = Headers(headers)\n if content_type is None:\n if mimetype is None and 'Content-Type' not in self.headers:\n mimetype = self.default_mimetype\n if mimetype is not None and mimetype.startswith('text/'):\n mimetype += '; charset=' + self.charset\n content_type = mimetype\n if content_type is not None:\n self.headers['Content-Type'] = content_type\n if isinstance(status, (int, long)):\n self.status_code = status\n else:\n self.status = status", "title": "" }, { "docid": "bf6a44f612a4afc6e5fb89ca9c14be21", "score": "0.60080504", "text": "def parse(self, data: dict) -> T:\n kwargs = self.get_kwargs(data)\n return self.object_class(**kwargs)", "title": "" }, { "docid": "56d4351e74f22395b18452d3b7381f87", "score": "0.59892863", "text": "def DecodeResponse( self, xmlResponse ):\n\t\tdom = xml.dom.minidom.parseString( xmlResponse )\n\t\ttaskNode = dom.getElementsByTagName( \"task\" )[0]\n\t\ttask = Task()\n\t\ttask.Id = taskNode.getAttribute( \"id\" )\n\t\ttask.Status = taskNode.getAttribute( \"status\" )\n\t\tif task.Status == \"Completed\":\n\t\t\ttask.DownloadUrl = taskNode.getAttribute( \"resultUrl\" )\n\t\treturn task", "title": "" }, { "docid": "e2dfeedc03fd38dad796099b9d24222f", "score": "0.5979086", "text": "def __init__(self, resp):\n\n self.resp = resp\n self.id = '{projectId}:{jobId}'.format(**resp['reference'])\n self.state = resp['status']['state']\n\n self.job_type = set(tuple(resp.keys()))\\\n .intersection({'hadoopJob', 'sparkJob', 'pysparkJob', 'hiveJob', 'pigJob', 'sparkSqlJob'})\\\n .pop()\n\n self._parse_timing()", "title": "" }, { "docid": "b0b639a9d05393735095c8bdc7ee1ae7", "score": "0.5969583", "text": "def parse_response(resp):\n filter_only = ['url', 'title', 'features', 'status']\n parsed_resp = {key: val for key, val in resp.items() if key in filter_only}\n\n parsed_resp['scraped_date'] = [date.today().isoformat()]\n parsed_resp['price'] = [resp['price']['value']]\n parsed_resp['price_pm'] = [resp['areaPrice']['value']]\n parsed_resp['added_date'] = [resp['dateCrated']]\n parsed_resp['updated_date'] = [resp['dateModified']]\n\n parsed_resp['_id'] = resp['id']\n parsed_resp['address'] = resp['addresses']['pl']\n parsed_resp['coordinates'] = f\"{resp['coordinates']['latitude']}+{resp['coordinates']['longitude']}\"\n parsed_resp['characteristics'] = {char['key']: char['value_translated'] for char in resp['characteristics']}\n parsed_resp['photos'] = [(photo['thumbnail'], photo['large']) for photo in resp['photos'].values()]\n\n return parsed_resp", "title": "" }, { "docid": "4b93bdd55727ae5e3769c3f102645048", "score": "0.59676665", "text": "def parse(cls, response: requests.Response) -> APIResponse:\n try:\n data = response.json()\n except (ValueError, TypeError) as e:\n raise APIResponseParsingException(exc=e, response=response)\n else:\n cls.check_for_errors(data)\n return APIResponse(data)", "title": "" }, { "docid": "948f1baad0809d4994678abc371bfb6f", "score": "0.5943018", "text": "def _deserialize_response(data: dict, t: Type[T] = BaseResponse) -> T:\n return parse_obj_as(t, data)", "title": "" }, { "docid": "c7edb0a8ea01812eb23efec5a8b02dd8", "score": "0.5912667", "text": "def _parse_response(self, str_response):\n try:\n resp = json.loads(str_response)\n status = resp['status']\n message = resp['message']\n except (ValueError, KeyError):\n raise SBusClientMalformedResponse('Got malformed response')\n\n # NOTE(takashi): task_id is currently used only in EXECUTE command, so\n # we don't fail here even if the given response doesn't\n # have task_id.\n task_id = resp.get('task_id')\n\n return SBusResponse(status, message, task_id)", "title": "" }, { "docid": "6b4ff981fbff4302712a1998d03b9fb0", "score": "0.5907516", "text": "def from_api_response(cls, reddit_session, json_dict):\r\n if cls == WikiPage: # Temporary HACK for WikiPage\r\n # pylint: disable-msg=W0212\r\n parts = reddit_session._request_url.split('/', 6)\r\n # pylint: enable-msg=W0212\r\n subreddit = parts[4]\r\n page = parts[6].split('.', 1)[0]\r\n return cls(reddit_session, subreddit, page, json_dict=json_dict)\r\n return cls(reddit_session, json_dict=json_dict)", "title": "" }, { "docid": "65e0e6054744bac5c2f83023c0cc8b88", "score": "0.5899166", "text": "def __init__(self, line, *args, **kwargs):\n\n if re.search(r\"Response\", line) is None:\n raise InitializationError()\n self.main_id = None\n self.line = line\n\n self.time = self._get_time()\n self.mac = self._get_mac()\n self.ip = self._get_ip()", "title": "" }, { "docid": "0df00bc329c74e673e4f72c7f5b67993", "score": "0.58825713", "text": "def __init__(self, s):\n self._parse(s)", "title": "" }, { "docid": "484d992535ff5504bcb4f5a37ddfa4ae", "score": "0.5876787", "text": "def __init__(self, response):\n self._total_speech_time = response.get(self._TOTAL_SPEECH_TIME, None)\n self._remaining_speech_time = response.get(self._REMAINING_SPEECH_TIME, None)\n self._speech_time = response.get(self._SPEECH_TIME, None)\n self._enrollment_status = response.get(self._ENROLLMENT_STATUS, None)", "title": "" }, { "docid": "34181215e67d4297b4fea58cda52088b", "score": "0.5869378", "text": "def parse (cls, data, *args, **kwargs):\n return cls().load(data, *args, **kwargs)", "title": "" }, { "docid": "b47641062d96fc8ce00311367442e03d", "score": "0.5864493", "text": "def parse_response(self, response):\n\n def custom_json_decoder():\n return load_from_json_str(response.text)\n\n response.json = custom_json_decoder\n try:\n return ServerBase.parse_response(response)\n except ProtocolError as exc:\n if self._response_error_handler:\n return self._response_error_handler(exc)\n raise", "title": "" }, { "docid": "f17102899c07fc3d9077a18bea771b56", "score": "0.5841206", "text": "def parse_response(resp, endpoint=None, tag=None):\n try:\n try:\n content = resp.content\n root = ET.fromstring(content)\n code = root.find('./Code').text\n msg = root.find('./Message').text\n request_id = root.find('./RequestId').text\n host_id = root.find('./HostId').text\n except ETParseError:\n request_id = resp.headers.get('x-odps-request-id', None)\n if len(resp.content) > 0:\n obj = json.loads(resp.text)\n msg = obj['Message']\n code = obj.get('Code')\n host_id = obj.get('HostId')\n if request_id is None:\n request_id = obj.get('RequestId')\n else:\n raise\n clz = globals().get(code, ODPSError)\n return clz(\n msg, request_id=request_id, code=code, host_id=host_id, endpoint=endpoint, tag=tag\n )\n except:\n # Error occurred during parsing the response. We ignore it and delegate\n # the situation to caller to handle.\n LOG.debug(utils.stringify_expt())\n\n if resp.status_code == 404:\n return NoSuchObject('No such object.', endpoint=endpoint, tag=tag)\n else:\n text = resp.content.decode() if six.PY3 else resp.content\n if text:\n if resp.status_code == 502 and _nginx_bad_gateway_message in text:\n return BadGatewayError(\n text, code=str(resp.status_code), endpoint=endpoint, tag=tag\n )\n else:\n return ODPSError(text, code=str(resp.status_code), endpoint=endpoint, tag=tag)\n else:\n return ODPSError(str(resp.status_code), endpoint=endpoint, tag=tag)", "title": "" }, { "docid": "a37799518218ddb865d4c2f586c90830", "score": "0.58384097", "text": "def __init__(\n self,\n response: dict\n ):\n self.__items = read_value(\n \"items\", response, PhysicalDrive, False)\n self.__more = read_value(\n \"more\", response, bool, False)\n self.__total_count = read_value(\n \"totalCount\", response, int, False)\n self.__filtered_count = read_value(\n \"filteredCount\", response, int, False)", "title": "" }, { "docid": "61dc6f78d7c3daed3b656fc02fcf6679", "score": "0.583705", "text": "def _parse_result(self, response, verbose=False):\n\n arr = np.atleast_1d(np.genfromtxt(io.BytesIO(response.content),\n names=True, dtype=None, delimiter=',',\n skip_header=1))\n\n if len(arr) == 0:\n return None\n else:\n return Table(arr)", "title": "" }, { "docid": "bc060cc5aa007d7b797bcb3f8673e104", "score": "0.58370167", "text": "def parse_response(cls, should_close=False):\n return cls(should_close=should_close)", "title": "" }, { "docid": "2a5f4a77d43215c5ff585e7789b0aa29", "score": "0.58326125", "text": "def from_response(cls, text: str):\n #text = '\\n0. azurenightwalker, 76561198056214819 \\n '\n if text == 'No Players Connected \\n ':\n return cls(0,())\n names = tuple(filter(None, map(lambda name: name.groups()[0], REGEX.finditer(text))))\n return cls(len(names), names)", "title": "" }, { "docid": "1a0a40fa498890259499600ad89a418c", "score": "0.5814047", "text": "def __init__(self, response):\n Exception.__init__(self, \"Expected to get Response object, received {}\".format(type(response)))", "title": "" }, { "docid": "d9364a9460ed565a5c3b2590f5ab8405", "score": "0.5809993", "text": "def parse_response(response):\n dash_app.server.logger.debug(\"parse_response - input: {}\".format(response.text))\n parsed_response = json.loads(response.text)\n dash_app.server.logger.debug(\"parse_response - parsed: {}\".format(parsed_response))\n return parsed_response", "title": "" }, { "docid": "cccfb9fdbd10eae81ceda72012c3b6fe", "score": "0.58010197", "text": "def __init__(self, resp, data):\n self.aiohttp_response = resp\n self.status = resp.status\n self.reason = resp.reason\n self.data = data", "title": "" }, { "docid": "1f660f8c5d89004f9b1afc2125e425c0", "score": "0.57980204", "text": "def parse_response(response):\n d = {}\n d[\"full_name\"] = response[\"input\"][\"full_name\"]\n d[\"samples\"] = response[\"details\"][\"samples\"]\n d[\"first_name\"] = response[\"first_name\"]\n d[\"probability\"] = response[\"probability\"]\n d[\"gender\"] = response[\"gender\"]\n\n return d", "title": "" }, { "docid": "b806de66d903793d2100e74a1257b65e", "score": "0.5796085", "text": "def __init__(\n self,\n response: dict\n ):\n\n self.__send_notification = read_value(\n \"sendNotification\", response, SendNotificationType, True)\n self.__time_zone = read_value(\n \"timeZone\", response, str, True)\n self.__show_base_two = read_value(\n \"showBaseTwo\", response, bool, True)\n self.__date_format = read_value(\n \"dateFormat\", response, DateFormat, True)", "title": "" }, { "docid": "eb6add536feacea28e26a4b8a33ecf05", "score": "0.57925516", "text": "def load_client_response(self, rs_body):\n\n try:\n self._rs_dict = CustomDict(json.loads(rs_body))\n except JSONDecodeError:\n try:\n self._rs_dict = CustomDict(xml2dict(rs_body))\n except XMLSyntaxError:\n logger.warning('Response str could be neither parsed to json nor xml obj')\n return self", "title": "" }, { "docid": "84b79088472d2199b81926f873e9f9c3", "score": "0.57809347", "text": "def from_response(response, method=None):\n if response.status_code:\n cls, enhanced_classes = _code_map.get(response.status_code,\n (ClientException, []))\n\n req_id = response.headers.get(\"x-openstack-request-id\")\n content_type = response.headers.get(\"Content-Type\", \"\").split(\";\")[0]\n\n kwargs = {\n 'code': response.status_code,\n 'method': method,\n 'url': response.url,\n 'request_id': req_id,\n }\n\n if \"retry-after\" in response.headers:\n kwargs['retry_after'] = response.headers.get('retry-after')\n\n if content_type == \"application/json\":\n try:\n body = response.json()\n except ValueError:\n pass\n else:\n if 'description' in body:\n # Gnocchi json\n desc = body.get('description')\n if desc and isinstance(desc, six.text_type):\n for enhanced_cls in enhanced_classes:\n if enhanced_cls.match.match(desc):\n cls = enhanced_cls\n break\n kwargs['message'] = desc\n elif isinstance(body, dict) and isinstance(body.get(\"error\"),\n dict):\n # Keystone json\n kwargs['message'] = body[\"error\"][\"message\"]\n else:\n kwargs['message'] = response.text\n elif content_type.startswith(\"text/\"):\n kwargs['message'] = response.text\n\n if not kwargs['message']:\n del kwargs['message']\n return cls(**kwargs)", "title": "" }, { "docid": "f589baf0dc218ecc9d37cc57091ceb1e", "score": "0.57767695", "text": "def fromJson(json):\n if 'type' in json:\n if json['type'] == PaillierResponse.ADD_RESP:\n return PaillierResponse.parseAddResponse(json)\n if json['type'] == PaillierResponse.MUL_RESP:\n return PaillierResponse.parseMulResponse(json)\n if json['type'] == PaillierResponse.ERROR:\n return PaillierResponse.parseErrorReponse(json)\n if json['type'] == PaillierResponse.SUB_RESP:\n return PaillierResponse.parseSubResponse(json)\n\n raise MessageParseError('No \"type\" field in message')", "title": "" }, { "docid": "c58c9f259ed165a558254a45d54ff342", "score": "0.5769329", "text": "def __init__(self, api_response: Dict[str, Any], config: ShipEngineConfig) -> None:\n self.events = list()\n result = api_response[\"result\"]\n for event in result[\"events\"]:\n self.events.append(TrackingEvent(event=event))\n\n self.shipment = (\n Shipment(\n shipment=result[\"shipment\"],\n actual_delivery_date=self.get_latest_event().date_time,\n config=config,\n )\n if \"shipment\" in result\n else None\n )\n self.package = Package(result[\"package\"]) if \"package\" in result else None", "title": "" }, { "docid": "73259b8ab76b01eba64a3f685950bc7e", "score": "0.57637143", "text": "def from_string(self, s):\n ss = s.split('.')\n self.name = json.loads(ss[0])\n self.id = json.loads(ss[1])\n self.cards = json.loads(ss[2])\n self.free_armies = json.loads(ss[3])\n self.conquered_territory = json.loads(ss[4])", "title": "" }, { "docid": "c48d8de77e9c8a91bbfb69c8cf7c46a5", "score": "0.57604486", "text": "def from_data(cls, data):\n self = object.__new__(cls)\n self.description = parse_description(data)\n self.welcome_channels = parse_welcome_channels(data)\n return self", "title": "" }, { "docid": "0be36a5354c049c001a02e3b9381d1ed", "score": "0.57442486", "text": "def from_json(cls, data):\n self = cls.__new__(cls)\n\n self.kind = data.get('kind')\n self.brand = data['brand']\n self.name = data['name']\n self.price = data['price']\n self.main = data['main']\n self.stars = data['stars']\n self.image = data.get('image')\n self.frequent_skill = data.get('frequent_skill')\n return self", "title": "" }, { "docid": "0f143461d36b7af0d29d7375db3807f4", "score": "0.57423466", "text": "def _parse_response(self, res):\n #decoded = '' # Referenced before assignment protection\n # content_type we get with nHaystack is Content_type : application/json; charset=UTF-8\n content_type = res.headers['Content-Type']\n if ';' in content_type:\n # Separate encoding from content type\n (content_type, encoding) = content_type.split(';',1)\n content_type = content_type.strip()\n # TODO: do we need to convert to Unicode, of so, how?\n\n if content_type in ('text/zinc', 'text/plain'):\n decoded = hszinc.parse(res.text, mode=hszinc.MODE_ZINC)[0]\n elif 'application/json' in content_type:\n decoded = hszinc.parse(res.text, mode=hszinc.MODE_JSON)\n else:\n raise NotImplementedError(\"Don't know how to parse type %s\" \\\n % content_type)\n if 'err' in decoded.metadata:\n raise HaystackError(decoded.metadata.get('dis', 'Unknown error'),\n traceback=decoded.metadata.get('traceback',None))\n return decoded", "title": "" }, { "docid": "807e9893ac2e3700faa84d46cdd76c30", "score": "0.5724377", "text": "def from_response(cls, url, message, response):\n if isinstance(response, tuple):\n # The response has been unrolled into a (status_code,\n # headers, body) 3-tuple.\n status_code, headers, content = response\n else:\n status_code = response.status_code\n content = response.content\n return BadResponseException(\n url, message, \n debug_message=\"Status code: %s\\nContent: %s\" % (\n status_code,\n content,\n )\n )", "title": "" }, { "docid": "e5d6f5254534b035c32bf77e862968c3", "score": "0.5723599", "text": "def parse(self, response):\n final_data = {}\n\n item = {}\n\n if self.__avoid_unwanted_responses(response.status):\n doc = html.fromstring(response.body)\n domain = response.meta['domain']\n site_variable, site_name = parse_site_variable(response.url)\n final_data[self.get_domain(domain)] = []\n \n \n #phone Number only \n phone_logic = PhoneParser(response)\n phone_number = phone_logic.process_response(response)\n if phone_number:\n phone_no = phone_number\n else:\n phone_no = []\n self.total_item.append(phone_no)", "title": "" }, { "docid": "91bcf115ba537f688f47190b93f8f823", "score": "0.57228917", "text": "def from_data(cls, data):\n self = object.__new__(cls)\n self.distributor = parse_distributor(data)\n self.id = parse_id(data)\n self.sku = parse_sku(data)\n return self", "title": "" }, { "docid": "903b750ec7d8e5f42957a590994c000c", "score": "0.5718594", "text": "def from_data(data):\n if data is None or len(data) == 0:\n return PAR()\n\n f = PAR()\n\n from Acquire.Client import Location as _Location\n from Acquire.Client import ACLRule as _ACLRule\n from Acquire.ObjectStore import string_to_datetime \\\n as _string_to_datetime\n\n f._location = _Location.from_data(data[\"location\"])\n f._aclrule = _ACLRule.from_data(data[\"aclrule\"])\n f._expires_datetime = _string_to_datetime(data[\"expires_datetime\"])\n f._uid = data[\"uid\"]\n\n return f", "title": "" }, { "docid": "c6902616027b299d5c986520423e237d", "score": "0.57115", "text": "def loads(cls, buffer_):\n return cls(**json.loads(buffer_))", "title": "" }, { "docid": "6f79efffdf19664c08e59def378f8cfb", "score": "0.57100546", "text": "def parse(self, response):\n \n loader = ItemLoader(item=RealEstateItem(), response=response)\n\n loader.add_xpath('title', '//*[@id=\"ls_title_10024233\"][1]/text()')\n loader.add_xpath('price', '(//div[contains(@id, \"ls_price\")])[1]/font/text()')\n loader.add_xpath('description', '//*[@id=\"ls_property_link_10024233\"]/div/div[2]/div[2]/div/div[1]/div[2]/div[2]/text()')\n loader.add_xpath('address', '//*[@id=\"ls_loc_9260436\"]/text()')\n loader.add_xpath('image_urls', '//*[@id=\"ls_image_10024233\"]/img/@src')\n \n # Housekeeping fields\n loader.add_value('url', response.url)\n loader.add_value('project', self.settings.get('BOT_NAME'))\n loader.add_value('spider', self.name)\n loader.add_value('server', socket.gethostname())\n loader.add_value('date', datetime.datetime.now())\n\n return loader.load_item()", "title": "" }, { "docid": "41a7a52d7113e48b1e90163493e07f8e", "score": "0.56709975", "text": "def _parse_response(self, response):\n\n response_dict = {}\n for line in response.splitlines():\n key, value = response.split(\"=\", 1)\n response_dict[key] = value\n return response_dict", "title": "" }, { "docid": "9c61ed248b01089ee57adca62db98f81", "score": "0.5653755", "text": "def parse(self, response, shape):\n ...", "title": "" }, { "docid": "7154abd803bb5c353f923a858bd945e2", "score": "0.5649987", "text": "def nal_response(response):\n cls = _nal_code_map.get(response['error-code'], NalApiException)\n # Iterate over the nested objects and retreive the \"message\" attribute.\n details = response['message']\n return cls(details=details)", "title": "" }, { "docid": "8a1b47877c58bfa31dc0e476378fe638", "score": "0.5633628", "text": "def __init__(self, soup=None):\n if soup is not None:\n self.parse(soup)", "title": "" }, { "docid": "584ca56607076c69eafe3950c8834715", "score": "0.56273276", "text": "def __init__(self, response=None, status=None, headers=None, mimetype=None,\n content_type=None, direct_passthrough=False, max_age=0,\n private=None):\n max_age = max_age or 0\n try:\n max_age = int(max_age)\n except ValueError:\n max_age = 0\n self.max_age = max_age\n if private is None:\n if self.max_age == 0:\n # The most common reason for max_age to be set to 0 is that a resource\n # is _also_ private.\n private = True\n else:\n private = False\n self.private = private\n\n body = response\n if isinstance(body, etree._Element):\n body = etree.tostring(body)\n elif not isinstance(body, (bytes, str)):\n body = str(body)\n\n super(Response, self).__init__(\n response=body,\n status=status,\n headers=self._headers(headers or {}),\n mimetype=mimetype,\n content_type=content_type,\n direct_passthrough=direct_passthrough\n )", "title": "" }, { "docid": "2fc19d2dde76330ce161c0b4b75a8e84", "score": "0.5625901", "text": "def _parse_response(response):\n if response is None:\n raise APIException('Failed to read data from API.')\n try:\n return json.loads(response)\n except:\n raise APIException('Failed to parse data from API.')", "title": "" }, { "docid": "48f4cc90d87718bdaf934823c8410a42", "score": "0.5625173", "text": "def __init__(self: ResourceResult):\n self.results: List[Dict] = \"\"\n self.type: int = 0\n self.title: str = \"\"\n return", "title": "" }, { "docid": "170fad8fc38865e058d2da327e3115c1", "score": "0.5620945", "text": "def _parse_body(self, robj, response, expected_statuses):\n # If no response given, then return.\n if response is None:\n return None\n\n status, headers, data = response\n\n # Check if the server is down(status==0)\n if not status:\n m = 'Could not contact Riak Server: http://{0}:{1}!'.format(\n self._node.host, self._node.http_port)\n raise RiakError(m)\n\n # Make sure expected code came back\n self.check_http_code(status, expected_statuses)\n\n if 'x-riak-vclock' in headers:\n robj.vclock = VClock(headers['x-riak-vclock'], 'base64')\n\n # If 404(Not Found), then clear the object.\n if status == 404:\n robj.siblings = []\n return None\n # If 201 Created, we need to extract the location and set the\n # key on the object.\n elif status == 201:\n robj.key = headers['location'].strip().split('/')[-1]\n # If 300(Siblings), apply the siblings to the object\n elif status == 300:\n ctype, params = parse_header(headers['content-type'])\n if ctype == 'multipart/mixed':\n if six.PY3:\n data = bytes_to_str(data)\n boundary = re.compile('\\r?\\n--%s(?:--)?\\r?\\n' %\n re.escape(params['boundary']))\n parts = [message_from_string(p)\n for p in re.split(boundary, data)[1:-1]]\n robj.siblings = [self._parse_sibling(RiakContent(robj),\n part.items(),\n part.get_payload())\n for part in parts]\n\n # Invoke sibling-resolution logic\n if robj.resolver is not None:\n robj.resolver(robj)\n\n return robj\n else:\n raise Exception('unexpected sibling response format: {0}'.\n format(ctype))\n\n robj.siblings = [self._parse_sibling(RiakContent(robj),\n headers.items(),\n data)]\n\n return robj", "title": "" }, { "docid": "94389282f3fd8c4b20cfab7ce205fc5b", "score": "0.562015", "text": "def parse_response(self, body):\n p, u = self.getparser()\n p.feed(body)\n p.close()\n return u.close()", "title": "" }, { "docid": "49ead695b0dc90d4e7d63550bc424dcf", "score": "0.56131077", "text": "def from_dict(cls, d: Dict[str, Any]) -> object:\n try:\n return ResponseRPC(result=d[\"result\"], error=d[\"error\"])\n except KeyError:\n raise Exception(\n \"The RPC dictionary must have both the 'result' and 'error' keys\"\n )", "title": "" }, { "docid": "a15e6e5690668e3432fd086dc18fde65", "score": "0.560839", "text": "def __init__(self, raw, user):\n self.body = raw[\"body\"]\n self.content = raw[\"content\"]\n self.count = raw[\"count\"]\n self.created_time = raw[\"created\"]\n self.key = raw[\"key\"]\n self.notif_id = raw[\"_id\"]\n self.priority = raw[\"priority\"]\n self.read = raw[\"subject\"]\n self.reason = raw[\"reason\"]\n self.status = raw[\"status\"]\n self.subject = raw[\"subject\"]\n self.thing_id = raw[\"thingID\"]\n self.thing_type = raw[\"thingType\"]\n self.updated_time = raw[\"updated\"]\n self.user = user\n try:\n self.reply_id = raw[\"replyId\"]\n except KeyError:\n self.reply_id = None\n try:\n self.hash_key = raw[\"hash_key\"]\n self.__v = raw[\"__v\"]\n except KeyError:\n self.hash_key = None\n self.__v = None", "title": "" }, { "docid": "7b60cf62cf58ec1e4844d2cd672c7190", "score": "0.55986893", "text": "def factory(cls, response):\n valid_responses_map = {\n 'CpChanged': 'speaker.service.changed',\n 'SignInStatus': 'speaker.service.logged_in',\n 'SignOutStatus': 'speaker.service.logged_out',\n }\n\n if response.name not in valid_responses_map:\n return None\n\n return cls(valid_responses_map[response.name], response.data['cpname'])", "title": "" }, { "docid": "882ee6162776da65d5d7cedb828f9492", "score": "0.5573996", "text": "def parse (self):\n\n xmlobj = xml2obj(self.apml_content)\n\n # create a structure similar to the one \n # defined by Jon Ciancillo in his php version. \n return Apml(xmlobj)", "title": "" }, { "docid": "bfc9dbc343381399a01224430421ce72", "score": "0.5569944", "text": "def parse(obj):\n return InfoChainResult(\n version=obj[\"version\"],\n protocolVersion=obj[\"protocolversion\"],\n blocks=obj[\"blocks\"],\n timeOffset=obj[\"timeoffset\"],\n connections=obj[\"connections\"],\n proxy=obj[\"proxy\"],\n difficulty=obj[\"difficulty\"],\n testNet=obj[\"testnet\"],\n relayFee=obj[\"relayfee\"],\n errors=obj[\"errors\"],\n )", "title": "" }, { "docid": "0931ebb300e275b227fd1fa75345e093", "score": "0.5569687", "text": "def parse_book(self, response):\n\n book_loader = BookLoader(\n item=Book(),\n response=response\n )\n\n # Book url\n book_loader.add_value(\n 'book_url',\n response.url\n )\n\n # Book title\n book_loader.add_xpath(\n 'title',\n \"//div[@class='prodtitle']/h1/text()\"\n )\n\n # Book price\n book_loader.add_xpath(\n 'price',\n \"//div[@class='buying-priceold-val']/span/text()\"\n )\n\n # Book publishing house\n book_loader.add_xpath(\n 'publishing_house',\n \"//div[@class='publisher']/a/text()\"\n )\n\n # Book series\n book_loader.add_xpath(\n 'series_of_books',\n \"//div[@class='series']/a/text()\"\n )\n\n return book_loader.load_item()", "title": "" }, { "docid": "c0793e3b5c41d902e2e9ff3b77cf8286", "score": "0.5568101", "text": "def __init__(self, data, region):\n # Parse response data\n self.name = data['name']\n self.id = data['id']\n self.account_id = data['accountId']\n self.puuid = data['puuid']\n self.iconId = data['profileIconId']\n self.level = data['summonerLevel']\n self.region = region\n\n # This is only for later creating a Ranked Summoner object\n self._raw = data", "title": "" }, { "docid": "851ae551f5608dfc5810d283b4a086c5", "score": "0.5557714", "text": "def makeParser(self, msg=None):\n if msg:\n self.msg = msg\n\n self.parser = self.parseResponse() # make generator", "title": "" }, { "docid": "fa1b20b492f7543a824664b7b9e13e98", "score": "0.5555741", "text": "def from_dict(cls, d):\n return cls(\n name=d[\"name\"],\n word=d[\"word\"],\n misses=d[\"misses\"],\n hits=d[\"hits\"],\n host=d[\"host\"],\n guesser=d[\"guesser\"],\n state=d[\"state\"]\n )", "title": "" }, { "docid": "e5f039e68cf61d903573993f82ddc694", "score": "0.5553371", "text": "def _parse_response(self, response):\n myStr = StringIO.StringIO(response)\n\n for line in myStr:\n if string.find(line, '#__GR2PROTO__') != -1:\n break\n\n # make sure the 1st line is #__GR2PROTO__\n if string.find(line, '#__GR2PROTO__') == -1:\n raise Exception(\"Bad response: %r\" % response)\n\n resDict = {}\n\n for myS in myStr:\n myS = myS.strip()\n strList = string.split(myS, '=', 2)\n\n try:\n resDict[strList[0]] = strList[1]\n except:\n resDict[strList[0]] = ''\n\n return resDict", "title": "" }, { "docid": "3e7be474b5927c4ee06796cb00f05b20", "score": "0.55440974", "text": "def __init__(self, response, url_suffix, request_data):\n resp_json = None\n try:\n resp_json = response.json()\n except:\n resp_json = {}\n if response.status_code == 500:\n if resp_json and not resp_json.get('reason'):\n resp_json = {'reason': 'Internal Server Error', 'suggestion': 'Contact DDS support.'}\n Exception.__init__(self, 'Error {} on {}\\nReason:{}\\nSuggestion:{}'.format(\n response.status_code, url_suffix, resp_json.get('reason', resp_json.get('error', '')),\n resp_json.get('suggestion', '')\n ))\n self.response = resp_json\n self.url_suffix = url_suffix\n self.request_data = request_data\n self.status_code = response.status_code", "title": "" }, { "docid": "165734ffbd768caad43b22c60c7404ea", "score": "0.5536647", "text": "def from_dict(cls, dikt) -> 'InlineResponse20012':\n return deserialize_model(dikt, cls)", "title": "" }, { "docid": "76d4bccc1df939d73e0283406afe009a", "score": "0.55364615", "text": "def from_dict(cls, dikt) -> 'InlineResponse200':\n return deserialize_model(dikt, cls)", "title": "" }, { "docid": "37a23897b6ecdfb25bb3d051c162f7fd", "score": "0.55358404", "text": "def from_dict(cls, dikt) -> 'InlineResponse200':\n return util.deserialize_model(dikt, cls)", "title": "" }, { "docid": "05b0f9c3678863451b6df4e1477209d1", "score": "0.5533961", "text": "def from_xml_str(cls, _xml: str):\n xml2_obj_converter = Xml2Obj()\n el = xml2_obj_converter.ParseString(_xml)\n return cls.from_xml(el)", "title": "" }, { "docid": "98e9b23745cad062d80629c68aa60c4d", "score": "0.5527031", "text": "def _unjsonify(self, response):\n return objectify(json.loads(response))", "title": "" }, { "docid": "9387318ce5e5d05d0fa1ac7111456f20", "score": "0.55053926", "text": "def _parse_result(self, response, *, verbose=False):\n formats = parse_readme(self.FORMATFILE)\n\n dtypes = [entry['dtype'] for entry in formats.values()]\n\n rows = []\n for line in response.text.split('\\n'):\n if line.strip():\n row = []\n start = 0\n for key, entry in formats.items():\n formatter = entry['formatter']\n length = entry['length']\n value = formatter(line[start:start+length])\n row.append(value)\n start = start + length\n rows.append(row)\n\n result = Table(rows=rows, names=formats.keys(), dtype=dtypes)\n\n return result", "title": "" }, { "docid": "bc0f8db7f1dca7814abd15e53d2a86cf", "score": "0.5496888", "text": "def _parse_response(self, response, api=False):\n data = response.json()\n if 'error' in data:\n raise PyYouTubeException(response)\n if api:\n return self._parse_data(data)\n return data", "title": "" }, { "docid": "c01a7cc82da81502f6a7e22af16e9aff", "score": "0.5493553", "text": "def from_dict(cls, data):\n return from_dict(cls, data)", "title": "" }, { "docid": "2e73e61bb1610d7cb6f73948de388099", "score": "0.5487599", "text": "def parse_from_json(self, json):\n self.name = json[\"name\"]\n self.ID = json[\"id\"]\n self.type = json[\"type\"]\n self.group = json[\"group\"]\n self.params = Params(json[\"params\"])", "title": "" }, { "docid": "a3f2bdac8b60640080abcf0226a26e28", "score": "0.5480331", "text": "def __init__(self, *, raw_request=None, parsed_request=None):\n if raw_request is None and parsed_request is None:\n raise ValueError('Either \"raw_request\" or \"parsed_request\" must be set for Request.')\n\n self._raw_request = raw_request\n \"\"\"str|None: Raw request json, if given.\"\"\"\n\n self._parsed_request = parsed_request\n \"\"\"dict|None: Parsed request json, if given or after being parsed from `_raw_request`.\"\"\"\n\n self._dict = None\n \"\"\"dict|None: Parsed request json after being checked for correct type, used to skip repeated checks.\"\"\"", "title": "" }, { "docid": "82fdf129b4a05b9fc8f8b0e56fd0b5cb", "score": "0.5474289", "text": "def __init__(\n self,\n response: dict\n ):\n self.__items = read_value(\n \"items\", response, PhysicalDriveUpdate, True)\n self.__more = read_value(\n \"more\", response, bool, True)\n self.__total_count = read_value(\n \"totalCount\", response, int, True)\n self.__filtered_count = read_value(\n \"filteredCount\", response, int, True)", "title": "" } ]
20c21940fc9dd704dacf8ea88aa5ee69
Check the type of address (v4, v6, mac) and split out the address, prefix, and port. Values are None if they don't exist.
[ { "docid": "0ab101390fc832573508d0eda37e7ee3", "score": "0.692085", "text": "def _split_addr(addr_str: str) -> Tuple:\n address = possible_addr = prefix = port = possible_port = None\n\n try:\n address, prefix = addr_str.rsplit('/', maxsplit=1)\n except Exception:\n address = addr_str\n\n # is this a mac address? then stop\n if re.match(r'(?:\\S\\S\\:){5}\\S\\S', address):\n return address, prefix, port\n\n # is it an ipv4 with port or just ipv6?\n if ':' in address:\n try:\n possible_addr, possible_port = address.rsplit(':', maxsplit=1)\n _ = ipaddress.IPv4Address(possible_addr)\n address = possible_addr\n port = possible_port\n # assume it was an IPv6 address\n except Exception:\n pass\n\n return address, prefix, port", "title": "" } ]
[ { "docid": "4f797d46fe6ac632ffd5a9a1ab83eeeb", "score": "0.64934886", "text": "def parse_address(address):\n ipv6 = 0\n for i in range(12):\n if address[i] != 0:\n ipv6 = 1\n break\n if ipv6 == 1:\n return socket.inet_ntop(\n socket.AF_INET6, struct.pack('!16B', *address))\n else:\n return socket.inet_ntop(\n socket.AF_INET,\n struct.pack('!BBBB', *address[12:]))", "title": "" }, { "docid": "e88b61dadbf9a1c043830b9c6d4d296d", "score": "0.620578", "text": "def split_host_port(hostport: str):\n if not hostport:\n # wtf do you want to split?\n raise AddressError(hostport, \"missing host and port in address\")\n\n missing_port = \"missing port in address\"\n too_many_colons = \"too many colons in address\"\n host = ''\n port = ''\n\n i = hostport.rfind(':')\n # if there is no ':' in hostport or ':' is the last thing\n # hostport then throw an error into the callers face\n if i < 0 or len(hostport) == i+1:\n raise AddressError(hostport, missing_port)\n\n if '[' in hostport and ']' in hostport:\n # we are treading ipv6 zone\n end = hostport.rfind(']') # get index of ]\n if end+1 == len(hostport):\n # if index of ] is the last thing then there is no port\n raise AddressError(hostport, missing_port)\n elif end+1 == i:\n # this is what we expect\n pass\n else:\n if hostport[end+1] == ':':\n # either ']' is followed by a colon or it is\n # but its not the last one\n raise AddressError(hostport, too_many_colons)\n raise AddressError(hostport, missing_port)\n # host port is worthy ipv6 and should be stripped now.\n host, port = hostport[0 : i], hostport[i+1:]\n host = host.strip('[]')\n elif '[' in hostport or ']' in hostport:\n # contains only one of '[' ']'\n raise AddressError(hostport, \"address has only one of '[' and ']'\")\n else:\n # not string representation of ipv6 but has more than one ':'\n host, port = hostport[0 : i], hostport[i+1:]\n if ':'in host:\n raise AddressError(hostport, too_many_colons)\n \n # we've made it this far and its cool. we can now start the splitting.\n return host, port", "title": "" }, { "docid": "5278c4eb8334b8bea16a151741de233f", "score": "0.62054735", "text": "def parse_address(readfunc):\n address_message = readfunc(1)\n address_type, = unpack('B', address_message)\n if address_type == socks5.IPV4:\n raw_address = readfunc(4)\n address = ipaddress.IPv4Address(raw_address).compressed\n elif address_type == socks5.IPV6:\n raw_address = readfunc(16)\n address = ipaddress.IPv6Address(raw_address).compressed\n elif address_type == socks5.DOMAIN:\n n = readfunc(1)\n address_message += n\n n, = unpack('B', n)\n address = raw_address = readfunc(n)\n else:\n return address_type, None, None, address_message\n address_message += raw_address\n raw_port = readfunc(2)\n address_message += raw_port\n port, = unpack('!H', raw_port)\n return address_type, address, port, address_message", "title": "" }, { "docid": "ef220017c8c667745404e59727faef02", "score": "0.6021543", "text": "def _parse_addresses(self, data: bytes):\n result = []\n try:\n stream = io.BytesIO(data)\n while stream.tell() < len(data):\n _type = int.from_bytes(stream.read(1), byteorder='big')\n if _type == 1: # IPv4 length 6\n ip = socket.inet_ntoa(stream.read(4))\n port = int.from_bytes(stream.read(2), byteorder='big')\n result.append(f\"{ip}:{port}\")\n elif _type == 2: # IPv6 length 18\n ip = socket.inet_ntop(socket.AF_INET6, stream.read(16))\n port = int.from_bytes(stream.read(2), byteorder='big')\n result.append(f\"[{ip}]:{port}\")\n elif _type == 3: # TORv2 length 12 (deprecated)\n stream.read(12)\n elif _type == 4: # TORv3 length 37\n addr = base64.b32encode(stream.read(35)).decode('ascii').lower()\n port = int.from_bytes(stream.read(2), byteorder='big')\n result.append(f\"{addr}.onion:{port}\")\n elif _type == 5: # DNS up to 258\n hostname_len = int.from_bytes(stream.read(1), byteorder='big')\n hostname = stream.read(hostname_len).decode('ascii')\n port = int.from_bytes(stream.read(2), byteorder='big')\n result.append(f\"{hostname}:{port}\")\n else: # Stop parsing at the first unknown type\n break\n # we simply pass exceptions and return what we were able to read so far\n except Exception:\n pass\n self.addresses = result", "title": "" }, { "docid": "feed03cdcb02466e08861cf523abb169", "score": "0.5922379", "text": "def resolve_ipaddress(address:str) -> Tuple[str, int, bool]:\n\tparts = address.split(\":\")\n\tip = parts[0]\n\ttry:\n\t\tinet_pton(AF_INET, ip)\n\t\tport = int(parts[1])\n\t\treturn ip, port, port >= 0 and port <= 65535\n\n\texcept OSError as e: #illegal ip address\n\t\tstderr.write(f\"{e}: {ip}\\n\")\n\n\texcept IndexError: #missing port\n\t\tstderr.write(f\"Bad NAMESERVER format (expected IPv4:PORT, got {address}).\\n\")\n\treturn None, None, False", "title": "" }, { "docid": "b4b68e8c402b0287f642a645fa0fefd1", "score": "0.59146124", "text": "def parse_addr(addr, port=20000):\n if addr == '':\n # no address given (default: localhost IPv4 or IPv6)\n return \"\", port, 0\n elif ']:' in addr:\n # IPv6 address with port\n ip, port = addr.rsplit(':', 1)\n return ip.strip('[]'), int(port), 6\n elif ']' in addr:\n # IPv6 address without port\n return addr.strip('[]'), port, 6\n elif addr.count(':') > 1:\n # IPv6 address without port\n return addr, port, 6\n elif ':' in addr:\n # IPv4 address with port\n ip, port = addr.split(':')\n return ip, int(port), 4\n else:\n # IPv4 address without port\n return addr, port, 4", "title": "" }, { "docid": "3b23ee4f7688bf0e21cc863c4b65a5a3", "score": "0.5900799", "text": "def _get_addr_info(self, host, port):\r\n addresses = getaddrinfo(host, port)\r\n v4_addresses = [info for info in addresses if info[0] == AF_INET]\r\n if has_ipv6:\r\n v6_addresses = [info for info in addresses if info[0] == AF_INET6]\r\n if v6_addresses and not v4_addresses:\r\n # The only time we return a v6 address is if it's the only option\r\n return v6_addresses[0]\r\n return v4_addresses[0]", "title": "" }, { "docid": "c435997053ca249ff2617f8afd19ed31", "score": "0.5773865", "text": "def toAddress(self, data):\n #data = data#.decode()\n data = data.decode()\n ip, port = data.split(':')\n return (ip, int(port))", "title": "" }, { "docid": "f5d2473f4e2d78bfd6d3ec4b581f695f", "score": "0.57585955", "text": "def _router_address(self, data):\n args = data.split()[1:]\n try:\n self._relay_attrs['ip_v6'].extend(args)\n except KeyError:\n self._relay_attrs['ip_v6'] = list(args)", "title": "" }, { "docid": "a37e0b13854baef2575a8638bf9e0312", "score": "0.5728167", "text": "def splitaddr(line):\n p = r\"IP:(?P<srcip>[a-fx0-9.]*):(?P<srcport>\\d+) --> (?P<dstip>[a-fx0-9.]*):(?P<dstport>\\d+)\"\n\n try:\n d = re.search(p, line).groupdict()\n except:\n return {\"srcip\": \"\", \"srcport\": None, \"dstip\": \"\", \"dstport\": None}\n\n if \"x\" in line:\n d[\"srcip\"] = \".\".join(str(int(x, 16)) for x in\n wrap(d[\"srcip\"][2:].zfill(8), 2))\n d[\"dstip\"] = \".\".join(str(int(x, 16)) for x in\n wrap(d[\"dstip\"][2:].zfill(8), 2))\n d[\"srcport\"] = int(d[\"srcport\"])\n d[\"dstport\"] = int(d[\"dstport\"])\n return d", "title": "" }, { "docid": "9978c60c60cd62cb4270ddc40cdf2c44", "score": "0.57269293", "text": "def parse_address(self, address):\n postcode = self.get_postcode(address)\n street = self.get_street(address)\n paon = self.get_paon(address)\n\n return paon, street, postcode", "title": "" }, { "docid": "dbf38b5417ebded7b8f2456e24144041", "score": "0.57154655", "text": "def _ParseAddr(addr_str):\n result = re.search(r'(.+):(\\d+)', addr_str)\n if not result:\n _Usage()\n ip = result.group(1)\n port = int(result.group(2))\n return (ip, port)", "title": "" }, { "docid": "63d03d8ae6587d093566b80af99d107b", "score": "0.57064843", "text": "def get_host_and_port(address: str, default_port: int) -> tuple:\n host_port = address.split(\":\")\n host = host_port[0]\n if len(host_port) == 2:\n port = int(host_port[1])\n else:\n port = default_port\n return host, port", "title": "" }, { "docid": "4bd87cc983565f92264c332a81e3a4ac", "score": "0.56836426", "text": "def split(addrinfo):\n ...", "title": "" }, { "docid": "42dad3286bd8415708cb56a179277420", "score": "0.56821144", "text": "def parse_ip_address_or_subnet(value):\r\n address, sep, subnet = value.partition(\"/\")\r\n if sep != \"\":\r\n subnet = int(subnet)\r\n assert 0 <= subnet < 32\r\n fields = address.split(\".\")\r\n assert len(fields) == 4\r\n for field in fields:\r\n num = int(field)\r\n assert 0 <= num < 256\r\n return value", "title": "" }, { "docid": "c8b45b21fb7958ce589f2b166c2746cb", "score": "0.5669089", "text": "def parse_address(self, *args, **kwargs):\n raise NotImplementedError", "title": "" }, { "docid": "1880e2617d5f9c5641869f0210741560", "score": "0.56525695", "text": "def get_address(source):\n chunks = source.split(':')\n return chunks[0], int(chunks[1]) if len(chunks) > 1 else NTP_PORT", "title": "" }, { "docid": "10f61713343e018ac5ffeeb7380a435f", "score": "0.56420726", "text": "def _parse_ipv4(ip):\n\n addr, port = ip.split(':')\n return addr, port", "title": "" }, { "docid": "aaf3419e0c1c3fc2e78b44bee94046d5", "score": "0.5618106", "text": "def parse(self, data, offset, indent=0):\n if len(data) <= offset:\n raise ParseError(\"Field %s is missing\" % self.name)\n mode = data[offset]\n if mode == 0:\n print_field(indent, self.name, \"Address not present\")\n return offset + 1\n if mode == 1:\n if len(data) < offset + 3:\n raise ParseError(\"Field %s is missing or short\" %\n self.addr_name)\n print_field(indent, self.name, \"Group address\")\n print_field(indent, self.addr_name,\n \"%04x\" % (data[offset+1] |\n (data[offset+2] << 8)))\n return offset + 3\n if mode == 2:\n if len(data) < offset + 3:\n raise ParseError(\"Field %s is missing or short\" %\n self.addr_name)\n print_field(indent, self.name, \"16-bit\")\n print_field(indent, self.addr_name,\n \"%04x\" % (data[offset+1] |\n (data[offset+2] << 8)))\n return offset + 3\n if mode == 3:\n if len(data) < offset + 9:\n raise ParseError(\"Field %s is missing or short\" %\n self.addr_name)\n if len(data) == offset + 9:\n raise ParseError(\"Field %s is missing\" % self.ep_name)\n print_field(indent, self.name, \"64-bit\")\n print_field(indent, self.addr_name,\n \":\".join(\"%02x\" % b\n for b in data[offset+8:offset:-1]))\n print_field(indent, self.ep_name, \"%02x\" % data[offset+9])\n return offset + 10\n if mode == 0xff:\n if len(data) < offset + 3:\n raise ParseError(\"Field %s is missing or short\" %\n self.addr_name)\n print_field(indent, self.name, \"Broadcast\")\n print_field(indent, self.addr_name,\n \"%04x\" % (data[offset+1] |\n (data[offset+2] << 8)))\n return offset + 3\n raise ParseError(\"Invalid field %s (%02x)\" % (self.name,\n data[offset]))", "title": "" }, { "docid": "e2d01d23d01e86d3099258266b139320", "score": "0.5573", "text": "def parse_bind_address(address, default=('localhost', 8080)):\n if ':' in address:\n host, port = address.split(':', 1)\n port = int(port)\n elif re.match(r'^\\d+$', address):\n host = default[0]\n port = int(address)\n else:\n host = address\n port = default[1]\n return host, port", "title": "" }, { "docid": "7901436a5905976cce9373652e11be4d", "score": "0.5545493", "text": "def parse_mac_address(cls, proposed_address: str) -> str:\n uppercase = proposed_address.upper()\n cleaned = uppercase.replace(\"-\", \"\") \\\n .replace(\".\", \"\") \\\n .replace(\":\", \"\")\n result = None\n if len(cleaned) == (2 * RmNetUtils.MAC_ADDRESS_LENGTH):\n match = re.match(r\"^[A-Z0-9]+$\", cleaned)\n if match is not None:\n result = cleaned\n return result", "title": "" }, { "docid": "eb791d951c594da301ef354d6c85483e", "score": "0.55362225", "text": "def pull_and_validate_addrs():\n print(\"Pulling and validating MAC addresses\")\n\n # Throw an error on a bad MAC address or add it to the global MAC address storage\n # TODO: The validation likely won't go in this script, but we'll keep it here for now\n global mac_addrs\n for addr in run_config.mac_addrs:\n if not re.match(\"[0-9a-f]{2}([:])[0-9a-f]{2}(\\\\1[0-9a-f]{2}){4}$\", addr.lower()):\n print(\"Provided address \" + addr + \" is not a vaid MAC address.\")\n mac_addrs.append(addr.lower())", "title": "" }, { "docid": "bd58451e31a60cc3a971cba6315ba3c3", "score": "0.5531523", "text": "def parse(self, addr):\n zip = self.regex_lib.zip_regex.search(addr)\n if zip:\n self.zip = zip.group(0).strip()\n addr = addr.replace(self.zip, '')\n else:\n self.zip = ''\n\n state = self.regex_lib.state_regex.search(addr)\n if state:\n self.state = state.group(0).strip().upper()\n addr = addr[0:state.span()[0]] \n \n # State Standardization - to abbreviation\n if (self.state) and (self.state.upper() in self.standards.states):\n self.state = self.standards.states[self.state.upper()]\n else:\n self.state = ''\n\n\n # TODO: What if pre text?\n # What if number LIKE A701?\n number = self.regex_lib.number_regex.search(addr)\n if number:\n self.number = number.group(0).strip()\n addr = addr.replace(self.number, '')\n if '-' in self.number:\n self.number = self.number[:self.number.index('-')]\n else:\n self.number = ''\n \n pobox = self.regex_lib.po_regex.search(addr)\n if pobox:\n self.po_box_flag = True\n\n\n # TODO: Apt Regex needs to be more flexible\n apt = self.regex_lib.apt_regex.search(addr.strip())\n if apt:\n self.apartment = apt.group(0).strip()\n addr = addr.replace(apt.group(0), '')\n # search for line 2 address\n apt = self.regex_lib.secondary_str_regex.search(addr.strip())\n if apt:\n self.apartment = apt.group(0).strip()\n addr = addr.replace(apt.group(0), '')\n\n # ISSUE: If no/incorrect zip: need street/city value...\n # HACK: Cannot standardize Street direction until 100% positive\n # that City is not in street - EAST HAMPTON , EAST HANOVER, ETC\n #intersection_test = re.search(r'(?:\\s(AT|@|AND|&)\\s)', addr)\n intersection_test = self.regex_lib.intersection_test.search(addr)\n if intersection_test:\n self.intersection_flag = True\n self.street1 = addr[0:intersection_test.span()[0]]\n addr = addr.replace(intersection_test.group(0), '')\n addr = addr.replace(self.street1, '')\n self.street1 = self.street1.upper()\n\n street = self.regex_lib.street_regex.search(addr)\n if street:\n street = street.group(0).strip().split(' ')\n if self.intersection_flag:\n self.street2 = ' '.join(street)\n else:\n self.street1 = ' '.join(street).upper().strip()\n self.delta = addr", "title": "" }, { "docid": "4253537d3006eb5cccfcf3942322e5aa", "score": "0.5527876", "text": "def _ip_from_options(self, option_name_prefix):\n ip = \"\"\n for i in [1, 2, 3, 4]:\n option = option_name_prefix + str(i)\n octet = self._get_option(option)\n if octet is None or len(str(octet)) < 1:\n return None\n\n ip = ip + str(octet)\n if i < 4:\n ip = ip + \".\"\n\n return ip", "title": "" }, { "docid": "631172bc33954af43af2ae43b0155e79", "score": "0.549989", "text": "def validate(value):\n try:\n wtypes.IPv4AddressType.validate(value)\n return value\n except ValueError:\n try:\n wtypes.IPv6AddressType.validate(value)\n return value\n except ValueError as e:\n error = 'Value should be IPv4 or IPv6 format'\n raise ValueError(error) from e", "title": "" }, { "docid": "304995f9b648c6f18691ee537a11cbb8", "score": "0.54927886", "text": "def ip_obj(addr: Text) -> Tuple[Text, Text]:\n\n try:\n # Is address an ipv4 or ipv6?\n ip = ipaddress.ip_address(addr.strip())\n\n if ip.version not in (4, 6):\n raise ValueError('Unknown IP version: %s' % ip.version)\n\n return (\"ipv{}\".format(ip.version), ip.exploded)\n except ValueError:\n raise ValueError('Invalid IP address: %s' % addr)", "title": "" }, { "docid": "10c6c9fdbdda38450d64c29efba1e21a", "score": "0.54877573", "text": "def _format_address(\n address: Any, output_format: str, must_contain: Tuple[str, ...], split: bool, errors: str\n) -> Any:\n address_dict, status = _check_address(address, must_contain, True)\n outputs = _address_dict_to_string(address_dict, output_format, split)\n\n if status == \"null\":\n return (np.nan,) * len(_get_column_names(output_format, split)) + (0,)\n\n elif status == \"unknown\":\n if errors == \"raise\":\n raise ValueError(f\"unable to parse value {address}\")\n return tuple(\n np.nan if not value else value if errors == \"ignore\" else np.nan for value in outputs\n ) + (1,)\n\n if len(outputs) == 1 and address == outputs[0]:\n code = 3\n else:\n code = 2\n return tuple(np.nan if not value else value for value in outputs) + (code,)", "title": "" }, { "docid": "1bedde1df918fc8def509adfd269cd67", "score": "0.5468448", "text": "def ipaddr(value, options=None):\n ipv4_obj = ipv4(value, options=options)\n ipv6_obj = ipv6(value, options=options)\n if ipv4_obj is None or ipv6_obj is None:\n # an IP address can be either IPv4 either IPv6\n # therefofe if the value passed as arg is not a list, at least one of the calls above will return None\n # if one of them is none, means that we should return only one of them\n return ipv4_obj or ipv6_obj # one of them\n else:\n return ipv4_obj + ipv6_obj # extend lists", "title": "" }, { "docid": "67626eb606b0722883bec7f265b2205d", "score": "0.5459971", "text": "def _guess_address_type(address) -> str:\n # the fewest number of : in an ipv6 is for ::1. A domain or IP should never have one so checking for 2 is safe\n if address.count(\":\") > 1:\n return \"ipv6\"\n\n # IPv4 must be 4 octets and no TLD can be a number so checking for both gives us a good guess between the two\n parts = address.split(\".\")\n if len(parts) == 4:\n if parts[3].isnumeric():\n if 0 <= int(parts[3]) < 256:\n return \"ipv4\"\n\n return \"domain\"", "title": "" }, { "docid": "391ef66b3e8e6fdd29a65a3db8e71de8", "score": "0.5458938", "text": "def is_netaddr(form, value):\n items = value.split()\n if len(items) > 1:\n raise ValidationError(\n lazy_gettext(u'You have to enter a valid net address.')\n )\n items = items[0].split(':')\n if len(items) not in (1, 2):\n raise ValidationError(\n lazy_gettext(u'You have to enter a valid net address.')\n )\n elif len(items) == 2 and not items[1].isdigit():\n raise ValidationError(lazy_gettext(u'The port has to be numeric'))", "title": "" }, { "docid": "3b3b2de9ea557449c2cfc124295fa132", "score": "0.54492474", "text": "def extract_ipv4(data):\r\n results = []\r\n flattened_data = flatten_list(data)\r\n addr, mask = None, None\r\n for i, v in enumerate(flattened_data):\r\n if v == 'ip' and (i < len(flattened_data) - 1):\r\n addr = flattened_data[i+1]\r\n if addr:\r\n addr = addr.strip('\"')\r\n elif v == 'mask' and (i < len(flattened_data) - 1):\r\n mask = flattened_data[i+1]\r\n if addr and mask:\r\n if re.match(r'\\d+\\.\\d+\\.\\d+\\.\\d+', addr) and (isinstance(mask, int) or re.match(r'\\d+', mask)):\r\n results.append((addr, str(mask)))\r\n addr, mask = None, None\r\n return results", "title": "" }, { "docid": "bd1bf4816d96c2245478146178d78e72", "score": "0.54396546", "text": "def _check_address(address: Any, must_contain: Tuple[str, ...], clean: bool) -> Any:\n if address in NULL_VALUES:\n return (None, \"null\") if clean else False\n\n address = re.sub(r\"[().]\", \"\", str(address))\n\n try:\n address, _ = tag(address, TAG_MAPPING)\n\n except RepeatedLabelError:\n return (None, \"unknown\") if clean else False\n\n status = _check_status(address, must_contain)\n\n if status:\n return (address, \"success\") if clean else True\n\n return (address, \"unknown\") if clean else False", "title": "" }, { "docid": "23a0fc2440614ac90a36c766eda9b685", "score": "0.54298544", "text": "def _extract_addr_info(self, xpath_sel):\n\n text_list = [a.strip() for a in xpath_sel.xpath(\"./text()\").extract() if a.strip()]\n if not text_list:\n #sometimes they send a longer list hidden in html !\n text_list = [a.strip() for a in xpath_sel.xpath(\"./div[@class='address']/text()\").extract() if a.strip()]\n\n name = xpath_sel.xpath(\".//strong/text()\")[0].extract()\n\n ma = get_fresh_merchant_address()\n\n if name:\n ma[\"address_name\"] = name\n\n #try to match the whole address\n addr_text = \" \".join(text_list)\n #print \"ADDR_TEXT : \",addr_text\n res = re.search(\"(.*)(\\d{3}\\-\\d{3}\\-\\d{4})\", addr_text)\n\n if res:\n final_address = res.group(1).strip()\n ma[\"address\"] = final_address\n ma[\"phone_number\"] = res.group(2).strip()\n else:\n ma[\"address\"] = addr_text.strip()\n\n return ma", "title": "" }, { "docid": "adc026c6bed04a193f997284b7f1a319", "score": "0.5422981", "text": "def post_parse_dev(self):\n \"\"\" \n Prefix Type-some of these are 2+ words. \n need to be able to handle this. Maybe via regex with anchor in front?\n If No City is found -> educated guess by chopping off chunks?\n \"\"\"\n try:\n\n street = self.street1.upper().strip()\n # Pre-Street Type\\\n pretype = self.regex_lib.street_prefix_regex.search(street.strip())\n if pretype:\n self.street1_pretype = pretype.group(0).strip().title()\n street = street.replace(pretype.group(0), '')\n \n addr_tokens = street.split(' ')\n \n if len(addr_tokens) > 2:\n # Post-Direction\n if addr_tokens[-1] in self.standards.street_direction:\n self.street1_postdir = self.standards.street_direction[addr_tokens[-1]]\n # remove addr_tokens[-1] \n addr_tokens[-1] = self.standards.street_direction[addr_tokens[-1]]\n del addr_tokens[-1]\n\n # Post Street Type\n if addr_tokens[-1] in self.standards.cannonical_types:\n self.street1_type = self.standards.cannonical_types[addr_tokens[-1]].strip()\n self.street1_type = self.street1_type.strip()\n del addr_tokens[-1]\n\n # Pre-Direction\n if len(addr_tokens) > 1 and addr_tokens[0] in self.standards.street_direction:\n street1_predir = self.standards.street_direction[addr_tokens[0]]\n del addr_tokens[0]\n\n else: \n # Post Street Type\n if addr_tokens[-1] in self.standards.cannonical_types:\n self.street1_type = self.standards.cannonical_types[addr_tokens[-1]].strip()\n del addr_tokens[-1]\n self.street1 = ' '.join(addr_tokens).strip()\n\n # State Standardization\n if (self.state) and (self.state.upper() in self.standards.states_v2):\n self.state = self.standards.states_v2[self.state.upper()]\n # self.state = self.state.title()\n self.state = self.state\n except:\n #print '\\t\\tERROR IN POST_PARSE_DEV: %s\\n%s' % (sys.exc_info()[1], sys.exc_info()[2] ) \n return", "title": "" }, { "docid": "8e538c53fb7b1b9a7b458a200d155a34", "score": "0.54136336", "text": "def ipv4_address(self):", "title": "" }, { "docid": "15d7d72a76c646bfadc605a0412d6370", "score": "0.5404541", "text": "def unpack_address(self, b):\n bytes_read = 0\n (length, r) = unpack_uint8_t(b[bytes_read])\n bytes_read+=r\n #addr = 0\n # an embedded (non-referenced) address\n if length != 0:\n (typ, r) = unpack_uint8_t(b[bytes_read])\n bytes_read+=r\n addr = b[bytes_read:bytes_read+length]\n bytes_read+=length\n addr_id = len(self.referenced_address)\n self.referenced_address[addr_id] = addr \n # a referenced address\n else:\n (addr_id, r) = unpack_uint32_t(b[bytes_read:])\n bytes_read+=r\n try:\n addr = self.referenced_address[addr_id]\n except:\n print(\"Die: couldn't find referenced address %d\" % addr_id)\n sys.exit(-1)\n if len(addr) == 4:\n return (socket.inet_ntop(socket.AF_INET, addr), bytes_read)\n elif len(addr) == 16:\n return (socket.inet_ntop(socket.AF_INET6, addr), bytes_read)\n else:\n assert False", "title": "" }, { "docid": "37c62b93c2156c97a70f6a795b8704f7", "score": "0.53985095", "text": "def prefix_to_network(prefix):\n ipaddr = ipaddress.ip_interface(prefix) # turn into ipaddress object\n address = ipaddr.ip\n mask = ipaddr.netmask\n return address, mask", "title": "" }, { "docid": "d93381c8be5837c35ff84373baa4e563", "score": "0.5387671", "text": "def test_get_family_from_address():", "title": "" }, { "docid": "9e2fff88e3e138af171216592ad0ff09", "score": "0.5380194", "text": "def mac_parser(mac_address_table_output):\n mac_table_regex = '(\\d*)\\s*(([0-9a-fA-F]{4}[.-:|]?){3})\\s*(\\w*)\\s*(.*)'\n list_of_mac_address_info = []\n for item in mac_address_table_output:\n if re.search(mac_table_regex, item):\n found = re.search(mac_table_regex, item)\n if found.group(5) == 'CPU': #Ignores entries in MAC address table with PORT as CPU\n pass\n else:\n dict = {}\n dict['VLAN'] = found.group(1)\n dict['MAC_ADDRESS'] = found.group(2)\n dict['LEARNED_VIA']= found.group(4) \n dict['INTERFACE'] = found.group(5)\n list_of_mac_address_info.append(dict)\n return list_of_mac_address_info", "title": "" }, { "docid": "33051f321f54c8dc9c40a282e58eba41", "score": "0.5377643", "text": "def __init__ (self, addr):\r\n # Always stores as a 6 character string\r\n if isinstance(addr, bytes) or isinstance(addr, basestring):\r\n if len(addr) == 6:\r\n # raw\r\n pass\r\n elif len(addr) == 17 or len(addr) == 12 or addr.count(':') == 5:\r\n # hex\r\n if len(addr) == 17:\r\n if addr[2::3] != ':::::' and addr[2::3] != '-----':\r\n raise RuntimeError(\"Bad format for ethernet address\")\r\n # Address of form xx:xx:xx:xx:xx:xx\r\n # Pick out the hex digits only\r\n addr = ''.join((addr[x*3:x*3+2] for x in xrange(0,6)))\r\n elif len(addr) == 12:\r\n pass\r\n else:\r\n # Assume it's hex digits but they may not all be in two-digit\r\n # groupings (e.g., xx:x:x:xx:x:x). This actually comes up.\r\n addr = ''.join([\"%02x\" % (int(x,16),) for x in addr.split(\":\")])\r\n # We should now have 12 hex digits (xxxxxxxxxxxx).\r\n # Convert to 6 raw bytes.\r\n addr = b''.join((chr(int(addr[x*2:x*2+2], 16)) for x in range(0,6)))\r\n else:\r\n raise RuntimeError(\"Expected ethernet address string to be 6 raw \"\r\n \"bytes or some hex\")\r\n self._value = addr\r\n elif isinstance(addr, EthAddr):\r\n self._value = addr.toRaw()\r\n elif type(addr) == list or (hasattr(addr, '__len__') and len(addr) == 6\r\n and hasattr(addr, '__iter__')):\r\n self._value = b''.join( (chr(x) for x in addr) )\r\n elif addr is None:\r\n self._value = b'\\x00' * 6\r\n else:\r\n raise RuntimeError(\"Expected ethernet address to be a string of 6 raw \"\r\n \"bytes or some hex\")", "title": "" }, { "docid": "4a24bd5eb10ddfb823af38f88c7d865b", "score": "0.5375611", "text": "def parse_network(value, cols):\n brd = None\n scope = None\n if \"/\" in value: # we have a CIDR in this address\n ip, cidr = value.split(\"/\")\n else:\n ip = value\n cidr = 32\n\n if type_ == \"inet\":\n mask = cidr_to_ipv4_netmask(int(cidr))\n if \"brd\" in cols:\n brd = cols[cols.index(\"brd\") + 1]\n elif type_ == \"inet6\":\n mask = cidr\n if \"scope\" in cols:\n scope = cols[cols.index(\"scope\") + 1]\n return (ip, mask, brd, scope)", "title": "" }, { "docid": "4c40ebd1caf6f7f6210192ea74d8c884", "score": "0.5372666", "text": "def test_part2 (self):\r\n h = '\\xfe\\x80\\x00\\x00\\x00\\x00\\x00\\x00\\xba\\x8d\\x12\\xff\\xfe\\x2a\\xdd\\x6e'\r\n a = IPAddr6.from_raw(h)\r\n assert str(a) == 'fe80::ba8d:12ff:fe2a:dd6e'\r\n assert a.raw == h\r\n\r\n assert a.num == 0xfe80000000000000ba8d12fffe2add6e\r\n assert IPAddr6.from_num(a.num) == a\r\n\r\n assert a.is_multicast is False\r\n assert IPAddr6(\"FF02:0:0:0:0:0:0:1\").is_multicast\r\n\r\n assert IPAddr6('2001:db8:1:2::').set_mac('00:1D:BA:06:37:64') \\\r\n == '2001:db8:1:2:021d:baff:fe06:3764'\r\n\r\n assert IPAddr6('0:0:0:0:0:FFFF:222.1.41.90') == '::ffff:222.1.41.90'\r\n assert IPAddr6('::ffff:C0A8:5') == '::ffff:192.168.0.5'\r\n assert IPAddr6('::ffff:192.168.0.5') == '::ffff:c0a8:5'", "title": "" }, { "docid": "823a7aa07e6314a875e7e29f191d28e6", "score": "0.5355483", "text": "def parse(self, data, offset, indent=0):\n if len(data) < offset:\n raise ParseError(\"Field %s is missing\" % self.name)\n if len(data) < offset + 9:\n raise ParseError(\"Field %s is missing or short\" %\n self.addr_name)\n mode = data[offset]\n if mode == 3:\n print_field(indent, self.name, \"64-bit\")\n print_field(indent, self.addr_name,\n \":\".join(\"%02x\" % b\n for b in data[offset+8:offset:-1]))\n elif mode == 2:\n print_field(indent, self.name, \"16-bit\")\n print_field(indent, self.addr_name,\n \"%04x\" % (data[offset+1] |\n (data[offset+2] << 8)))\n elif mode == 1:\n print_field(indent, self.name, \"Group address\")\n print_field(indent, self.addr_name,\n \"%04x\" % (data[offset+1] |\n (data[offset+2] << 8)))\n elif mode == 0:\n print_field(indent, self.name, \"Address not present\")\n elif mode == 0xff:\n print_field(indent, self.name, \"Broadcast\")\n print_field(indent, self.addr_name,\n \"%04x\" % (data[offset+1] |\n (data[offset+2] << 8)))\n else:\n print_field(indent, self.name, \"Unknown(%02x)\" % mode)\n print_field(indent, self.addr_name,\n \" \".join(\"0x%02x\" % b\n for b in data[offset+1:offset+9]))\n return offset + 9", "title": "" }, { "docid": "501c98d8cf2d3df3a67b590b7326c90f", "score": "0.5355263", "text": "def address(self, address, delimiter = \".\"):\n def list_addr(addr):\n \"\"\"list_addr(addr)\n Takes a list of digits representing an Ipv4 address and converts\n it to a single long\"\"\"\n if len(addr) != 4:\n raise ValueError('Expected 4 octet address, got %i in (%s)' %\n (len(addr), address))\n # converts a list into a single long\n # [255, 255, 255, 255] -> 0b11111111 11111111 11111111 11111111\n _addr = sum(\n [addr[octet] << (8 * (3 - octet)) for octet in range(4)]\n )\n return _addr\n\n def string_addr(addr):\n \"\"\"string_addr(addr)\n Takes a string representing an Ipv6 address, and converts it to a\n single long\"\"\"\n return list_addr(\n map(int, addr.split(self._delimiter))\n )\n\n self._delimiter = delimiter\n if isinstance(address, str):\n self._address = string_addr(address)\n elif isinstance(address, tuple) or isinstance(address, list):\n self._address = list_addr(address)\n elif isinstance(address, int) or isinstance(address, long):\n self._address = address\n elif isinstance(address, Ipv4):\n self._address = list_addr(list(address))\n else:\n raise TypeError(\n 'Unsupported type for address initialization %s, (%s)' % (\n type(address), address)\n )", "title": "" }, { "docid": "ef62aca307931dcd1ae01992f96e5346", "score": "0.5349561", "text": "def do_ipaddr(self, line):\n args = line.split(\" \")\n valid = 1\n preferred = 1\n flags = 0\n prefix_len = 64 # always use /64, as prefix.network.prefixlen returns /128.\n\n num = len(args)\n if (num > 1):\n ipaddr = args[1]\n prefix = ipaddress.IPv6Interface(unicode(ipaddr))\n arr = prefix.ip.packed\n\n if args[0] == \"\":\n v = self.prop_get_value(SPINEL_PROP_IPV6_ADDRESS_TABLE)\n # TODO: clean up table parsing to be less hard-coded magic.\n sz = 0x1B\n addrs = [v[i:i+sz] for i in xrange(0, len(v), sz)]\n for addr in addrs:\n addr = addr[2:18]\n print str(ipaddress.IPv6Address(addr))\n\n elif args[0] == \"add\":\n arr += pack('B', prefix_len) \n arr += pack('<L', valid)\n arr += pack('<L', preferred)\n arr += pack('B', flags)\n value = self.prop_insert_value(SPINEL_PROP_IPV6_ADDRESS_TABLE, \n arr, str(len(arr))+'s')\n if self.wpanApi.tun_if:\n self.wpanApi.tun_if.addr_add(ipaddr)\n\n elif args[0] == \"remove\":\n value = self.prop_remove_value(SPINEL_PROP_IPV6_ADDRESS_TABLE, \n arr, str(len(arr))+'s')\n if self.wpanApi.tun_if:\n self.wpanApi.tun_if.addr_del(ipaddr)\n\n print(\"Done\")", "title": "" }, { "docid": "dbcc9006161fac6d267c5b637684444d", "score": "0.53353196", "text": "def get_address_type(self, idx: int):\n if not self.announced or len(self.addresses) <= idx:\n return None\n addrstr = self.addresses[idx]\n if \".onion:\" in addrstr:\n return 'tor'\n if addrstr[0].isdigit():\n return 'ipv4'\n if addrstr.startswith(\"[\"):\n return 'ipv6'\n return 'dns'", "title": "" }, { "docid": "37e235573a746a3b02fd4a20671953a5", "score": "0.5334579", "text": "def parse_address(self, address):\n num = len(address)\n if num == 0:\n return {}\n if isinstance(address[num-1], dict):\n map = address[num-1].copy()\n num -= 1\n else:\n map = {}\n if num == 1:\n map[\"host\"] = address[0]\n elif num == 2:\n map[\"host\"] = address[0]\n map[\"port\"] = address[1]\n elif num > 2:\n raise ValueError\n return map", "title": "" }, { "docid": "e8667f5a2fa677d57e24308cdb7dd837", "score": "0.5328319", "text": "def _process_binding_address(self, binding, is_ipv6, is_anycast, vip_exempt):\n addr = binding[\"address\"]\n if is_ipv6:\n # we need to translate the netmask6 exposed by puppet into a prefix length since ipaddress\n # library does not support this case.\n nm = self._get_ipv6_prefix_length(binding[\"netmask\"])\n else:\n nm = binding[\"netmask\"]\n\n address = ipaddress.ip_interface(f\"{addr}/{nm}\")\n\n if ((address.is_link_local) or (address.is_loopback)):\n # We skip link local and loopback addresses\n return None\n\n # Warn the user if one of the IP is an auto-config IP and skip it\n if is_ipv6 and address.exploded[27:32] == 'ff:fe':\n self.log_warning(f\"{address}: skipping SLAAC IP\")\n return None\n\n if vip_exempt:\n # FIXME\n # this is a bug in our deployment of certain servers where some service addresses have\n # an incorrect netmask and aren't actually VIPs\n # figure out the actual netmask from the prefix\n prefixq = PrefixFilterSet().search_contains(Prefix.objects.all(), \"\", str(address))\n if not prefixq:\n self.log_failure(f\"Can't find matching prefix for {address} when fixing netmask!\")\n return None\n realnetmask = max([i.prefix.prefixlen for i in prefixq])\n address = ipaddress.ip_interface(f\"{addr}/{realnetmask}\")\n self.log_info(\"VIP exempt: Overriding provided netmask\")\n\n if (is_anycast or (address.network.prefixlen in (32, 128))):\n self._handle_vip(address, is_anycast)\n return None\n\n return address", "title": "" }, { "docid": "97ec0d934c2c510915978b447ccada2f", "score": "0.5315125", "text": "def parse_address(addr_entry):\n label = get_label_from_schema(addr_entry)\n if addr_entry.po_box:\n street = safe_text(addr_entry.po_box)\n else:\n street = safe_text(addr_entry.street)\n\n # figure out neighborhood/city distinction.\n neighborhood = None\n if addr_entry.neighborhood:\n if not addr_entry.city:\n city = addr_entry.neighborhood.text\n else:\n neighborhood = addr_entry.neighborhood.text\n city = addr_entry.city.text\n else:\n city = safe_text(addr_entry.city)\n\n region = safe_text(addr_entry.region)\n postcode = safe_text(addr_entry.postcode)\n country = safe_text(addr_entry.country)\n return (label, street, neighborhood, city, region, postcode, country)", "title": "" }, { "docid": "a8882691a50ea88fc6cf11fa460e5ea3", "score": "0.53118026", "text": "def get_ipv4_info(**connection_info):\n ipv4_info = {}\n res = {}\n if connection_info['IPv4']['method'] == 'auto':\n ipv4_info['method'] = 'auto'\n if connection_info['IPv4']['ignore-auto-dns']:\n ipv4_info['ignore-auto-dns'] = connection_info['IPv4']['ignore-auto-dns']\n if connection_info['IPv4']['never-default']:\n ipv4_info['never-default'] = connection_info['IPv4']['never-default']\n if connection_info['IPv4']['dns']:\n # Check if dns is valid\n if check_ipv4(*connection_info['IPv4']['dns']):\n ipv4_info['dns'] = connection_info['IPv4']['dns']\n else:\n res[conststr.RET_TYPE] = conststr.FAILED_RES\n res[conststr.RET_MSG] = conststr.INVALID_DNS_ADDR\n res[conststr.RET_CODE] = -1\n elif connection_info['IPv4']['method'] == 'manual':\n ipv4_info['method'] = 'manual'\n if connection_info['IPv4']['addresses']:\n # Check if ipv4 address is valid.\n ips = [x[0] for x in connection_info['IPv4']['addresses']] + [x[2] for x in connection_info['IPv4']['addresses']]\n pfs = [x[1] for x in connection_info['IPv4']['addresses']]\n if not check_ipv4(*ips):\n res[conststr.RET_TYPE] = conststr.FAILED_RES\n res[conststr.RET_MSG] = conststr.INVALID_IPV4_ADDR\n res[conststr.RET_CODE] = -1\n elif not check_pf4(*pfs):\n res[conststr.RET_TYPE] = conststr.FAILED_RES\n res[conststr.RET_MSG] = conststr.INVALID_IPV4_PREFIX\n res[conststr.RET_CODE] = -1\n else:\n ipv4_info['addresses'] = connection_info['IPv4']['addresses']\n else:\n res[conststr.RET_TYPE] = conststr.FAILED_RES\n res[conststr.RET_MSG] = conststr.EMPTY_IPV4_ADDR\n res[conststr.RET_CODE] = -1\n if connection_info['IPv4']['dns']:\n # Check if dns is valid\n if check_ipv4(*connection_info['IPv4']['dns']):\n ipv4_info['dns'] = connection_info['IPv4']['dns']\n else:\n res[conststr.RET_TYPE] = conststr.FAILED_RES\n res[conststr.RET_MSG] = conststr.INVALID_DNS_ADDR\n res[conststr.RET_CODE] = -1\n if connection_info['IPv4']['ignore-auto-dns']:\n ipv4_info['ignore-auto-dns'] = connection_info['IPv4']['ignore-auto-dns']\n if connection_info['IPv4']['never-default']:\n ipv4_info['never-default'] = connection_info['IPv4']['never-default']\n elif connection_info['IPv4']['method'] == 'link-local':\n ipv4_info['method'] = 'link-local'\n if connection_info['IPv4']['ignore-auto-dns']:\n ipv4_info['ignore-auto-dns'] = connection_info['IPv4']['ignore-auto-dns']\n if connection_info['IPv4']['never-default']:\n ipv4_info['never-default'] = connection_info['IPv4']['never-default']\n elif connection_info['IPv4']['method'] == 'disabled':\n ipv4_info['method'] = 'disabled'\n return ipv4_info, res", "title": "" }, { "docid": "40a34061855701db58d67ae646c32de4", "score": "0.528599", "text": "def parseAddress(self, address):\n add_list = []\n for i in range(4):\n add_list.append(int(address.hex()[(i * 2): (i + 1) * 2], 16))\n add_str = (\n str(add_list[0])\n + \".\"\n + str(add_list[1])\n + \".\"\n + str(add_list[2])\n + \".\"\n + str(add_list[3])\n )\n return add_str", "title": "" }, { "docid": "3aebee88b657a52ad47daf580fd393f0", "score": "0.52794445", "text": "def splitaddr(line):\n pattern = r\"(IN|OUT): ([0-9.]*):(\\d+) --> ([0-9.]*):(\\d+) \\((\\D+)\\)\"\n keys = (\"direction\", \"srcip\", \"srcport\", \"dstip\", \"dstport\", \"proto\")\n m = re.search(pattern, line)\n try:\n return dict((k, v) for k, v in zip(keys, m.groups()))\n except:\n return dict((k, None) for k in keys)", "title": "" }, { "docid": "677cbd42ab236ac8a957610fa272e212", "score": "0.52793294", "text": "def _parse_host_header(self):\n host = self.host_header\n if not host:\n return None, None\n port = None\n m = host_header_re.match(host)\n if m:\n host = m.group(\"host\").strip(\"[]\")\n if m.group(\"port\"):\n port = int(m.group(\"port\"))\n return host, port", "title": "" }, { "docid": "983e37b57b7d8bd83691c5a4126cf838", "score": "0.5267389", "text": "def _parse_address(key):\n return key.split('/')[-2]", "title": "" }, { "docid": "0ad177d2b9b24a907510d914fa6e36aa", "score": "0.5260803", "text": "def get_address_components(full_address=None, short_address=None):\n raw_address = None\n address_dict = {}\n from_full_address = False\n\n if full_address:\n raw_address = full_address\n from_full_address = True\n\n elif short_address:\n raw_address = short_address\n\n if raw_address:\n if ',' not in raw_address:\n address_dict['address1'] = raw_address\n else:\n address_componets = raw_address.split(',')\n if len(address_componets) == 4:\n address_dict['address1'] = address_componets[0]\n address_dict['address2'] = address_componets[1].strip()\n address_dict['city'] = address_componets[2].strip().title()\n elif len(address_componets) == 3 and from_full_address:\n address_dict['address1'] = address_componets[0]\n address_dict['city'] = address_componets[1].strip().title()\n elif not from_full_address:\n address_dict['city'] = address_componets[0].strip().title()\n \n # Try to get zip code\n zip_code = re.search(r'(\\d{5})([-])?(\\d{4})?', raw_address)\n address_dict['zip_code'] = ''.join(match for match in zip_code.groups() if match) if zip_code else None\n\n state = re.search(r'([A-Z]{2})', raw_address)\n address_dict['state'] = state.group(0) if state else None\n\n if 'United States'.lower() in raw_address.lower() or state or zip_code:\n address_dict['country'] = 'US'\n address_dict['full_address'] = raw_address\n return address_dict", "title": "" }, { "docid": "4fa9a97e4141783de5522dd293186a5f", "score": "0.5260441", "text": "def _parse_request_ipv4(self, data: bytes) -> None:\n host = socket.inet_ntoa(data[:4])\n port = struct.unpack(\"!H\", data[4:6])[0]\n self._done_parsing(host, port)", "title": "" }, { "docid": "58f3129f17f999c0ce47323432bac75d", "score": "0.525287", "text": "def address_prefixes(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:\n return pulumi.get(self, \"address_prefixes\")", "title": "" }, { "docid": "58f3129f17f999c0ce47323432bac75d", "score": "0.525287", "text": "def address_prefixes(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:\n return pulumi.get(self, \"address_prefixes\")", "title": "" }, { "docid": "58f3129f17f999c0ce47323432bac75d", "score": "0.525287", "text": "def address_prefixes(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:\n return pulumi.get(self, \"address_prefixes\")", "title": "" }, { "docid": "6037eff73a9fe2443cabc05f21299a91", "score": "0.5245369", "text": "def full_address(self, separator=\", \"):\n addr = separator.join(filter(None, (self.addr_street, self.addr_line2,\n self.addr_town, self.addr_postcode)))\n if not addr.strip():\n return 'Address unknown'\n return addr", "title": "" }, { "docid": "635861155cbb74fdfa2c80e64e0bf156", "score": "0.5241696", "text": "def _structured_addr(match):\r\n\r\n res = (match.group('name'), {\r\n 'addr': match.group('addr'),\r\n 'type': match.group('type'),\r\n 'scope': match.group('scope'),\r\n })\r\n\r\n brd = match.group('brd')\r\n alias = match.group('alias')\r\n extra = match.group('extra')\r\n\r\n if brd:\r\n res[1]['brd'] = brd\r\n if alias:\r\n res[1]['alias'] = alias\r\n if extra:\r\n res[1]['settings'] = _dict_from_spaced_kv(extra)\r\n\r\n return res", "title": "" }, { "docid": "602e8e17afc84f74c9f02034ae7bfdb9", "score": "0.52396375", "text": "def valid_ip_addresses():\r\n temp = []\r\n\r\n for i in psutil.net_if_addrs().items():\r\n for j in i[1]:\r\n if re.match(r\"192\\.168\\..+\\..+\",j[1]):\r\n temp.append(j[1])\r\n \r\n return temp", "title": "" }, { "docid": "9327cc0ff5af097fbe212839d195a06c", "score": "0.5239225", "text": "def _prom_addr_of_element(element):\n if not element:\n # this is sciond\n return '127.0.0.1', 32040\n (addrs_selector, public_keyword, bind_keyword, port_keyword) = \\\n ('InternalAddrs','PublicOverlay','BindOverlay', 'OverlayPort') if 'InternalAddrs' in element.keys() \\\n else ('Addrs','Public','Bind', 'L4Port')\n addrs = next(iter(element[addrs_selector].values()))\n addr_type = bind_keyword if bind_keyword in addrs.keys() else public_keyword\n ip = addrs[addr_type]['Addr']\n port = addrs[addr_type][port_keyword] + PROM_PORT_OFFSET\n return ip, port", "title": "" }, { "docid": "f3e94547f9098f8ff2d8fca6f4c4e5fd", "score": "0.5236579", "text": "def urlsafe_address(address):\n addr, port, *rest = address\n\n if rest:\n # An IPv6 address needs to be surrounded by square brackets\n addr = f'[{addr}]'\n\n return addr, port", "title": "" }, { "docid": "b53e2d269f9a772227ebca3b1684bb71", "score": "0.52343327", "text": "def resolve_router():\n gws = netifaces.gateways()\n gateway_ip = gws['default'][netifaces.AF_INET][0]\n output = check_output([\"nmap\", \"-sP\", \"-n\", gateway_ip]).decode(sys.stdout.encoding)\n regmatch = re.compile(\"MAC Address: ([0-9A-F:]+) ((.*))\", re.MULTILINE)\n matches = regmatch.findall(output)\n if matches is None or len(matches) > 0:\n return matches[0][1]\n return \"Unknown\"", "title": "" }, { "docid": "d63311dfb459233f47df9cc4aff8a4e7", "score": "0.5233224", "text": "def extract_address(search_response):\n if type(search_response) is not SearchResponse:\n raise CaprotoTypeError(\"expected SearchResponse, not {!r}\"\n \"\".format(type(search_response).__name__))\n if search_response.header.parameter1 == 0xffffffff:\n # The CA spec tells us that this sentinel value means we\n # should fall back to using the address of the sender of\n # the UDP datagram.\n address = search_response.sender_address[0]\n else:\n address = search_response.ip\n return (address, search_response.port)", "title": "" }, { "docid": "5f8d6d60fcba1940db2642197e6f65cd", "score": "0.52272934", "text": "def addrstr(addr):\n sz = len(addr)\n if sz == 4: # IP\n return inet_ntoa(addr)\n elif sz == 6: # MAC\n return hexstr(addr)\n else:\n logging.warning('unexpected address length: %d' % sz)\n return hexstr(addr)", "title": "" }, { "docid": "80cd965d2ed720856372215c2b181095", "score": "0.5224687", "text": "def handleLine(ip4, ip6, names, outp, generate):\n\n #print(\"_{}_\\t_{}_\\t_{}_\".format(ip4, ip6, ' '.join(names)), file=debugOut)\n\n # at least one address has to be supplied\n if ip4 is None and ip6 is None:\n print(\"Neither IPv4 nor IPv6 address were specified for name(s) \" + ','.join(names), file=debugOut)\n return False\n\n # there must be at least one name (currently this is handled by the line-parser, so this should never happen)\n if len(names) <= 0:\n print(\"No names were specified for {}, {}\".format(ip4, ip6), file=debugOut)\n return False\n\n try:\n if ip4 is None:\n ip4addr = None\n else:\n ip4addr = ipaddress.IPv4Address(ip4)\n if not ip4addr in ip4_range:\n print(\"IPv4 address not in permitted range \" + str(ip4_range), file=debugOut)\n return False\n\n if ip6 is None:\n ip6addr = None\n else:\n ip6addr = ipaddress.IPv6Address(ip6)\n if not ip6addr in ip6_range:\n print(\"IPv6 address not in permitted range \" + str(ip6_range), file=debugOut)\n return False\n\n except Exception as e:\n print(\"Invalid IP address: \" + repr(e), file=debugOut)\n return False\n\n for name in names:\n if nameRegex.match(name) is None:\n print(\"Invalid name \\\"{}\\\", does not match {}\".format(name, nameRegex.pattern), file=debugOut)\n return False\n\n #TODO: check for double addresses\n #TODO: check for double DNS names\n\n if generate:\n primaryName = names[0]\n\n if not ip4addr is None:\n for name in names:\n outp['arecords'] += \"{name}.{domain}\\tA\\t{ip} ~\\n\".format(name=name, ip=ip4, domain=domain)\n ip4_r = ipaddress.IPv4Address(ip4addr.packed[::-1])\n outp['aptrs'] += \"{rip}.in-addr.arpa.\\tPTR\\t{name}.{domain} ~\\n\".format(rip=ip4_r, domain=domain, name=primaryName)\n\n if not ip6addr is None:\n for name in names:\n outp['arecords'] += \"{name}.{domain}\\tAAAA\\t{ip} ~\\n\".format(name=name, ip=ip6, domain=domain)\n ip6_r = '.'.join(ip6addr.exploded.translate({ord(\":\"): None})[::-1])\n outp['aaaaptrs'] += \"{rip}.ip6.arpa.\\tPTR\\t{name}.{domain} ~\\n\".format(rip=ip6_r, domain=domain, name=primaryName)\n\n return True", "title": "" }, { "docid": "359b536cdca42fad074bb06aa60b3037", "score": "0.5221294", "text": "def parse_address(address):\n province = None\n region1 = None\n region2 = None\n\n if not address or not isinstance(address, basestring):\n return (province, region1, region2)\n \n addresses = address.split() \n\n # province\n try:\n province = addresses[0]\n except IndexError, e:\n pass\n\n # region1\n try:\n region1 = addresses[1]\n except IndexError, e:\n pass\n\n # region2\n try:\n region2 = addresses[2]\n except IndexError, e:\n pass\n\n return (province, region1, region2)", "title": "" }, { "docid": "4dd7abf2b8256758d48a9c953782b5a8", "score": "0.52157223", "text": "def IpAddress(address, version=None):\n\n if version:\n if version == 4:\n return Ipv4Address(address)\n elif version == 6:\n return Ipv6Address(address)\n\n try:\n return Ipv4Address(address)\n except (ValueError):\n pass\n\n try:\n return Ipv6Address(address)\n except (ValueError):\n pass\n\n raise ValueError('%r does not appear to be an IPv4 or IPv6 address' % address)", "title": "" }, { "docid": "d3d6eb686a4050d0563ed05817c449df", "score": "0.5211757", "text": "def resolver(host: str, port: str, network: str) -> tuple[list, AddrConfig]:\n config = config_inetaddr(host, port, network)\n addr_list = inet_addr_list(config.get_config(), network)\n return addr_list, config", "title": "" }, { "docid": "07ecb155d696090c85f2e9730d1ec2cd", "score": "0.5211693", "text": "def test_basics_part1 (self):\r\n a = IPAddr6('2001:0db8:85a3:0000:0000:8a2e:0370:7334')\r\n assert str(a) == '2001:db8:85a3::8a2e:370:7334','minimal repr'\r\n assert a.to_str(zero_drop=False) == \\\r\n '2001:0db8:85a3::8a2e:0370:7334', 'no zero drop'\r\n assert a.to_str(section_drop=False) == \\\r\n '2001:db8:85a3:0:0:8a2e:370:7334', 'no section drop'\r\n assert a.to_str(section_drop=False, zero_drop=False) == \\\r\n '2001:0db8:85a3:0000:0000:8a2e:0370:7334', 'full length'\r\n assert str(IPAddr6('0:0:0:0:0:0:0:1')) == '::1', 'loopback'\r\n assert str(IPAddr6('0:0:0:0:0:0:0:0')) == '::', 'unspecified'\r\n assert str(IPAddr6('2001:db8:0:0:0:0:2:1')) == '2001:db8::2:1'\r\n assert str(IPAddr6('2001:db8:0000:1:1:1:1:1')) == '2001:db8:0:1:1:1:1:1'\r\n assert str(IPAddr6('2001:db8:0:0:1:0:0:1')) == '2001:db8::1:0:0:1'\r\n assert str(IPAddr6('1:0:0:2:0:0:0:3')) == '1:0:0:2::3'", "title": "" }, { "docid": "6e2ebe96979672f574af780d1ab37930", "score": "0.51968217", "text": "def find_macaddr2(debug=False):\n pairs=[]\n sas=[]\n for ts_sec,ts_usec,ts_nsec,dbytes,ch in self.pkts:\n ehdr=self.eth_hdr(dbytes,debug=False)\n if ehdr.sa not in sas:\n sas.append(ehdr.sa)\n if (ehdr.sa,ehdr.da) not in pairs:\n pairs.append((ehdr.sa,ehdr.da))\n for macaddr in sas:\n for pair in pairs: \n if pair[1]=='ffffffffffff':\n continue\n if macaddr not in pair:\n break\n if debug:\n print pairs\n print filter(lambda x: x[0]==macaddr,pairs)\n print filter(lambda x: x[0]!=macaddr,pairs)\n return macaddr\n return None", "title": "" }, { "docid": "e5187f7a95249385b78b6141d7abe2b6", "score": "0.51743704", "text": "def segwit_addr_decode(hrp, addr):\n hrpgot, data = bech32_decode(addr)\n if hrpgot != hrp:\n return None, None\n decoded = convertbits(data[1:], 5, 8, False)\n if decoded is None or len(decoded) < 2 or len(decoded) > 40:\n return None, None\n if data[0] > 16:\n return None, None\n if data[0] == 0 and len(decoded) != 20 and len(decoded) != 32:\n return None, None\n return data[0], decoded", "title": "" }, { "docid": "76b9b2a65ba96b8cffc168d6d8b82185", "score": "0.5166014", "text": "def extract_registration_address(self, input_dict):\n\n # columns to create address, in order\n address_components = [\n 'HOME-NO',\n 'HOME-STREET',\n 'HOME-APT',\n 'HOME-DEV',\n ]\n # create address string for usaddress.tag\n address_str = ' '.join([\n input_dict[x] for x in address_components if input_dict[x] is not None\n ])\n\n raw_dict = {\n 'RAW_ADDR1': address_str,\n # Including Raw Addr 2 as same because not as clear of a division\n 'RAW_ADDR2': address_str,\n 'RAW_CITY': input_dict['HOME-CITY'],\n 'RAW_ZIP': input_dict['HOME-ZIPCODE']\n }\n\n usaddress_dict = self.usaddress_tag(address_str)[0]\n\n if usaddress_dict:\n converted_addr = self.convert_usaddress_dict(usaddress_dict)\n\n converted_addr.update({\n 'PLACE_NAME': input_dict['HOME-CITY'],\n 'ZIP_CODE': input_dict['HOME-ZIPCODE'],\n 'VALIDATION_STATUS': '2'\n })\n converted_addr.update(raw_dict)\n else:\n converted_addr = self.constructEmptyResidentialAddress()\n converted_addr.update(raw_dict)\n converted_addr.update({\n 'STATE_NAME': input_dict['STATE'],\n 'VALIDATION_STATUS': '1'\n })\n\n return converted_addr", "title": "" }, { "docid": "657720069a71bfe3df5f3baa63d83028", "score": "0.51587725", "text": "def getIPv4Addresses():\n \n STRUCT_SIZE = 40 # 64bit environment - each name/IP pair is 40 bytes\n SIOCGIFCONF = 35090 # addr from http://pydoc.org/1.6/SOCKET.html\n BYTES = 4096 # buffer size to use\n IGNORED_IFACES = ('lo','virbr','tun','tap')\n \n sck = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)\n \n names = array.array('B', '\\0' * BYTES)\n outbytes = struct.unpack('iL', fcntl.ioctl(\n sck.fileno(),\n SIOCGIFCONF, \n struct.pack('iL', BYTES, names.buffer_info()[0])))[0]\n\n namestr = names.tostring()\n\n if_list = [(namestr[i:i+16].split('\\0', 1)[0],\n socket.inet_ntoa(namestr[i+20:i+24])) \n for i in range(0, outbytes, STRUCT_SIZE)] \n\n return [ifaddr for ifname,ifaddr in if_list \n if not ifname.startswith(IGNORED_IFACES) ]", "title": "" }, { "docid": "8c610b93f55f212c937e65b2da5f96f7", "score": "0.5157077", "text": "def serialize_network_address_values_given(self, address):\n logging.info('SER Serialize network address values given.')\n\n ip = address[2]\n if '.' in ip:\n # ipv4; unused (12 bytes) + ipv4 (4 bytes) = ipv4-mapped ipv6 address\n host = bytearray.fromhex(\"00000000000000000000ffff\") + socket.inet_aton(ip)\n elif ':' in ip and not ip.endswith('.onion'):\n # ipv6; ipv6 (16 bytes)\n host = socket.inet_pton(socket.AF_INET6, ip)\n elif ip.endswith(\".onion\"):\n # convert .onion address to its ipv6 equivalent (6 + 10 bytes)\n host = ONION_PREFIX + b32decode(ip[:-6], True)\n else:\n # the address was not in the correct format, therefore return empty bytes\n return b''\n\n return struct.pack('<I', int(address[0])) + struct.pack(\"<Q\", int(address[1])) + struct.pack('>16sH', host, int(address[3]))", "title": "" }, { "docid": "07714853de2e2aee7fb6b7e53561c00c", "score": "0.513481", "text": "def test_format_address():\n protocol = 'tcp'\n host = '127.0.0.1'\n port = 5555\n address = ZMQComm.format_address(protocol, host, port)\n result = ZMQComm.parse_address(address)\n nt.assert_equal(result['protocol'], protocol)\n nt.assert_equal(result['host'], host)\n nt.assert_equal(result['port'], port)\n nt.assert_raises(ValueError, ZMQComm.parse_address, 'INVALID')\n nt.assert_raises(ValueError, ZMQComm.parse_address, 'INVALID://')", "title": "" }, { "docid": "ae74498f8cd4e8f3b39de709e0ec2f22", "score": "0.5126181", "text": "def parse_prefix(prefix, default_length=24):\n if '/' in prefix:\n network, pfxlen = prefix.split('/')\n else:\n network = prefix\n pfxlen = default_length\n return network, int(pfxlen)", "title": "" }, { "docid": "22a261e1e31f7a9a4ec855b59207d4e6", "score": "0.5111533", "text": "def format_address(address):\n host, port = address[:2]\n if port is not None:\n return f\"[{host}]:{port}\"\n else:\n return f\"[{host}]\"", "title": "" }, { "docid": "b5738ae143286a659406d65eb8d3c5d1", "score": "0.5109787", "text": "def _split_address(self, address):\n\n if address < 0:\n raise ValueError(\"address must be non-negative!\")\n\n # The offset is contained in the lowest 'self.offset_bits' bits.\n offset_mask = (1 << self.offset_bits) - 1\n offset = address & offset_mask\n\n # The slot id is contained in the middle 'self.slot_bits' bits.\n offset_and_slot_mask = (1 << (self.slot_bits + self.offset_bits)) - 1 \n slot = (address & offset_and_slot_mask) >> self.offset_bits\n\n # The tag is contained in the remaining bits above the offset and slot id.\n tag_mask = ~offset_and_slot_mask\n tag = (address & tag_mask) >> (self.slot_bits + self.offset_bits)\n\n return tag, slot, offset", "title": "" }, { "docid": "ac4970a3e321bdbd4ba0e9b7b817c063", "score": "0.5100607", "text": "def parse_daemon_addr(self, daemon_addr):\n p = self.mk_generic_daemon_addr_pattern_regexp()\n m = p.search(daemon_addr)\n if m is None: return None,None,None\n prefix,gupid,pid = m.group(\"prefix\", \"gupid\", \"pid\")\n pid = self.safe_atoi(pid, None)\n if pid is None: return None,None,None\n return prefix,gupid,pid", "title": "" }, { "docid": "a710db806929bd7ad409cd4492021927", "score": "0.5094197", "text": "def ipv6_address(self):", "title": "" }, { "docid": "ad43bcc751db2888aa4638b42b1a0b3d", "score": "0.50940126", "text": "def convert_address(row, me, alias_map):\n if isinstance(me, str): me = me.decode('utf-8')\n address = format_phone(row['address'])\n if alias_map and row['group_id'] in alias_map:\n other = alias_map[row['group_id']]\n else:\n other = address\n \n if row['flags'] == 2:\n from_addr = other\n to_addr = me\n elif row['flags'] == 3:\n from_addr = me\n to_addr = other\n \n return (from_addr, to_addr)", "title": "" }, { "docid": "de96db539c8ffc85af3170d40cfc52fe", "score": "0.5090147", "text": "def _FormatInAddrExToken(self, token_data):\n protocol = bsmtoken.BSM_PROTOCOLS.get(token_data.net_type, 'UNKNOWN')\n if token_data.net_type == 4:\n ip_address = self._FormatPackedIPv6Address(token_data.ip_address[:4])\n elif token_data.net_type == 16:\n ip_address = self._FormatPackedIPv6Address(token_data.ip_address)\n return {\n 'protocols': protocol,\n 'net_type': token_data.net_type,\n 'address': ip_address}", "title": "" }, { "docid": "b136952dd47b4a55ed419cc28814e0d7", "score": "0.5085396", "text": "def mac_address():\n # This procedure has given us problems in the past, so sorround with try-except\n try:\n # Get the IP of servcinf-sql\n sql_ip = socket.gethostbyname('servcinf-sql')\n # Get the route for the servcinf-sql ip, it will look like one of these:\n #10.54.6.26 dev eth0 src 10.54.6.43 \\ cache\n #130.225.86.27 via 10.54.6.1 dev eth0 src 10.54.6.43 \\ cache\n interface_string = subprocess.check_output(\n ['ip', '-o', 'route', 'get', sql_ip]\n ).split()\n\n # The interface name e.g. \"eth0\" is the first item after \"dev\"\n ifname = interface_string[interface_string.index('dev') + 1]\n\n # Get an infostring for a socket connection of this interface name\n sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)\n info = fcntl.ioctl(sock.fileno(), 0x8927, struct.pack(b'256s', ifname[:15]))\n if sys.version < '3':\n return ':'.join(['%02x' % ord(char) for char in info[18:24]])\n else:\n return ':'.join(['%02x' % char for char in info[18:24]])\n except: # pylint: disable=bare-except\n return 'MAC ADDRESS UNKNOWN'", "title": "" }, { "docid": "e1ec2f6c8a7f2c83910515701af9f4be", "score": "0.50825006", "text": "def address_prefixes(self) -> Optional[Sequence[str]]:\n return pulumi.get(self, \"address_prefixes\")", "title": "" }, { "docid": "d6cb8d1f28fa00012479a1204472e275", "score": "0.5081402", "text": "def __init__(self, address):\n super(Ipv4, self).__init__(address)", "title": "" }, { "docid": "cf1a7a59f22ac36d2582564729ca73ce", "score": "0.5075956", "text": "def parse_ip4_address(cls, proposed_value: str) -> [int]:\n result: [int] = None\n accumulate: [int] = []\n tokens: [str] = proposed_value.split(\".\")\n if len(tokens) == RmNetUtils.IP4_ADDRESS_LENGTH:\n valid: bool = True\n for this_token in tokens:\n try:\n token_parsed: int = int(this_token)\n if (token_parsed >= 0) & (token_parsed <= 255):\n accumulate.append(token_parsed)\n else:\n # Number is out of acceptable range\n valid = False\n break\n except ValueError:\n valid = False\n break\n finally:\n pass\n if valid:\n result = accumulate\n return result", "title": "" }, { "docid": "508297c665377fa39da6df5c2ce03711", "score": "0.5072287", "text": "def prepare_addresses(addresses, usage=\"header\"):\n result = []\n for address in re.split('[,;]', addresses):\n if not address:\n continue\n name, addr = parseaddr(address)\n if name and usage == \"header\":\n result.append(\"%s <%s>\" % (Header(name, 'utf8'), addr))\n else:\n result.append(addr)\n if usage == \"header\":\n return \",\".join(result)\n return result", "title": "" }, { "docid": "6c2ef96d22069e715fd67b14e51f1eb8", "score": "0.5067478", "text": "def extract_address(request):\n # First find and strip out /geocode/\n header = '/geocode/'\n if not request.startswith(header):\n return None\n request = request.partition(header)[2]\n\n splits = request.split('&')\n for s in splits:\n parts = s.split('=')\n if len(parts) != 2:\n continue\n tag, value = parts\n if tag == 'address':\n return value\n return None", "title": "" }, { "docid": "8788c1205270d0513ff1103e9ad79d09", "score": "0.5065744", "text": "def parse(cls, nlri_data):\n nlri_list = []\n while nlri_data:\n if nlri_data == b'\\x00\\x00':\n # Note: Fix wrong decoding to [\"0.0.0.0/0\", \"0.0.0.0/0\"]\n nlri_data = nlri_data[2:]\n continue\n if isinstance(nlri_data[0], int):\n prefix_bit_len = int(nlri_data[0])\n else:\n prefix_bit_len = ord(nlri_data[0:1])\n if prefix_bit_len % 8 == 0:\n prefix_byte_len = prefix_bit_len // 8\n else:\n prefix_byte_len = prefix_bit_len // 8 + 1\n offset = prefix_byte_len + 1\n prefix_bit = nlri_data[1:offset]\n # append zero\n zero_len = (128 - prefix_bit_len) // 8\n for i in range(0, zero_len):\n prefix_bit += b'\\x00'\n\n prefix_addr = str(netaddr.IPAddress(int(binascii.b2a_hex(prefix_bit), 16))) + '/%s' % prefix_bit_len\n nlri_list.append(prefix_addr)\n nlri_data = nlri_data[offset:]\n\n return nlri_list", "title": "" }, { "docid": "5a352bfa33d3cc7bcc044ef5d3f9608d", "score": "0.50614583", "text": "def pack_address(host, port):\n ip = ipaddress.ip_address(host)\n if ip.version == 4:\n msg = pack('B', socks5.IPV4)\n elif ip.version == 6:\n msg = pack('B', socks5.IPV6)\n else:\n assert False\n msg += ip.packed + pack('!H', port)\n return msg", "title": "" }, { "docid": "03746db2effbfcef85658f1bc59bf2ce", "score": "0.5059381", "text": "def reassemble_addresses(seq):\n reassembled = []\n prev = ''\n\n for s in seq:\n if prev.isdigit():\n try:\n end, port = s.split('_')\n except ValueError:\n end, port = '', ''\n\n if s.isdigit():\n reassembled[-1] += '.{}'.format(s)\n elif end.isdigit() and port.isdigit():\n reassembled[-1] += '.{}:{}'.format(end, port)\n else:\n reassembled.append(s)\n else:\n reassembled.append(s)\n\n prev = s\n\n return reassembled", "title": "" }, { "docid": "bdfb28d3f7e70f4b8663f6e546a1dcff", "score": "0.5057183", "text": "def addr_tuple(self) -> tuple[str, int]:\n return self.ip_addr, self.port", "title": "" }, { "docid": "a2c17786cccbedd3edb4c69ee969a4ca", "score": "0.5055029", "text": "def _parse_addresses(self, addresses):\r\n\r\n # start off with an empty list\r\n address_list = []\r\n\r\n # we could be getting a list or a string\r\n if addresses:\r\n if isinstance(addresses, (tuple, list)):\r\n for address in addresses:\r\n address_list += address.split(';')\r\n else:\r\n address_list = addresses.split(';')\r\n\r\n # return the final list, removing blanks\r\n return [address for address in address_list if len(address.strip()) > 0]", "title": "" }, { "docid": "f97134d43cb399c26a3721489e7831e6", "score": "0.504879", "text": "def _splitnport(host, defport=-1):\n host, delim, port = host.rpartition(':')\n if not delim:\n host = port\n elif port:\n try:\n nport = int(port)\n except ValueError:\n nport = None\n return host, nport\n return host, defport", "title": "" }, { "docid": "e3a24fab6f0ec3aaf92a9371f0a1d1a6", "score": "0.5047713", "text": "def destination_address_prefixes(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:\n return pulumi.get(self, \"destination_address_prefixes\")", "title": "" }, { "docid": "b3166c44cb94e3672aa40b0cd86fa48c", "score": "0.5047621", "text": "def decode_addr(v):\n if len(v) not in [0, 20]:\n raise Exception(\"Serialized addresses must be empty or 20 bytes long!\")\n return encode_hex(v)", "title": "" }, { "docid": "480b592465bd307ffed8195103ac99b5", "score": "0.5047279", "text": "def get_interface_addresses(version=AF_INET):\n\n for interface in netifaces.interfaces():\n try:\n addresses = netifaces.ifaddresses(interface)\n except ValueError:\n # some interfaces are given that are invalid, we encountered one called ppp0\n yield Interface(interface, None, None, None)\n else:\n if version == AF_INET:\n for option in addresses.get(netifaces.AF_INET, []):\n try:\n yield Interface(interface, option.get(\"addr\"), option.get(\"netmask\"), option.get(\"broadcast\"))\n except TypeError:\n # some interfaces have no netmask configured, causing a TypeError when\n # trying to unpack _l_netmask\n pass\n elif version == AF_INET6:\n for option in addresses.get(netifaces.AF_INET6, []):\n try:\n yield Interface(interface, option.get(\"addr\").split(\"%\")[0], option.get(\"netmask\"), option.get(\"broadcast\"), version=AF_INET6)\n except TypeError:\n # some interfaces have no netmask configured, causing a TypeError when\n # trying to unpack _l_netmask\n pass\n else:\n logger.warning(\"Unknown version %s\", version)", "title": "" } ]
e7f79e2f79009c5fe91885bf8e3476a1
Get the route location and customer location of a customer.
[ { "docid": "826d0525eebbe19e2e0d8df2fd11fe00", "score": "0.5834146", "text": "def cust_loc(self, sol, cust):\n cust_ind = [] # [route_loc, cust_loc]\n for i, rt in enumerate(sol):\n if cust in rt:\n cust_ind.append(i)\n cust_ind.append(rt.index(cust))\n return cust_ind\n\n print('Costomer not in the solution: ', cust)", "title": "" } ]
[ { "docid": "a5ec4d134d11f24bc1d5418d50483a79", "score": "0.6389137", "text": "def GetLocation():\n\t\tif not clc.LOCATION: Account.GetAccounts()\n\t\treturn(clc.LOCATION)", "title": "" }, { "docid": "08bd0dd35f42aa342ea747da89233e48", "score": "0.62559897", "text": "def customer_info(self):\n return self.customer", "title": "" }, { "docid": "148173b5292c89e425810162710b72c4", "score": "0.62468135", "text": "def get_location(self):\r\n location = \"{0}, {1}\".format(self.city, self.country.name)\r\n return location", "title": "" }, { "docid": "0b05c52ce7fc7854d679f03dd7bfb006", "score": "0.6193476", "text": "def get_location(self):", "title": "" }, { "docid": "1be2c9bcaa6a2e5ad63056282361ac1b", "score": "0.6121398", "text": "def get_location(self):\n\n return self.location", "title": "" }, { "docid": "4403353a44fb1729d7e1a9d0c2ab0918", "score": "0.6064329", "text": "def get_customer(self):\r\n return self._customer", "title": "" }, { "docid": "68fd1ad0a99aad9bb865f073fdf85433", "score": "0.6042594", "text": "def get_location(self):\n\t\treturn self.location", "title": "" }, { "docid": "68fd1ad0a99aad9bb865f073fdf85433", "score": "0.6042594", "text": "def get_location(self):\n\t\treturn self.location", "title": "" }, { "docid": "b525fb4fccdb6a578beccac751bdf629", "score": "0.6031382", "text": "def get_customer(self):\n return self._customer", "title": "" }, { "docid": "b525fb4fccdb6a578beccac751bdf629", "score": "0.6031382", "text": "def get_customer(self):\n return self._customer", "title": "" }, { "docid": "b525fb4fccdb6a578beccac751bdf629", "score": "0.6031382", "text": "def get_customer(self):\n return self._customer", "title": "" }, { "docid": "0f6ca7ec0364bd40909daca12671ce10", "score": "0.5978856", "text": "def location(self):\n return self.route_nodes[self.location_index]", "title": "" }, { "docid": "242cb8af1bde79858f69a8b42127de28", "score": "0.5896137", "text": "def get_location(self, request):\n return request.user.location", "title": "" }, { "docid": "51291b76f390d82dc83b6d6edf2c5dfa", "score": "0.58836234", "text": "def location(self):\n return self.execute(Command.GET_LOCATION)['value']", "title": "" }, { "docid": "96598c4267c245e617b937407c490d62", "score": "0.587921", "text": "def _get_location(self):\n return self.__location", "title": "" }, { "docid": "3ad46edfde5ad3baa5c77e5677e98284", "score": "0.5868467", "text": "def customer(self):\n return self.__customer", "title": "" }, { "docid": "33e87ecf1e2ff1e7d41cae8b231fd87f", "score": "0.5807464", "text": "def get_distance_seller_customer(self):\n # Optional\n # Hint: you can use the haversine_distance logic coded in olist/utils.py\n # import data\n data = self.data\n matching_table = Olist().get_matching_table()\n # Since one zip code can map to multiple (lat, lng), take the first one\n geo = data['geolocation']\n geo = geo.groupby('geolocation_zip_code_prefix',\n as_index=False).first()\n # Select sellers and customers\n sellers = data['sellers']\n customers = data['customers']\n # Merge geo_location for sellers\n sellers_mask_columns = ['seller_id', 'seller_zip_code_prefix',\n 'seller_city', 'seller_state',\n 'geolocation_lat', 'geolocation_lng']\n sellers_geo = sellers.merge(geo,\n how='left',\n left_on='seller_zip_code_prefix',\n right_on='geolocation_zip_code_prefix')[sellers_mask_columns]\n # Merge geo_location for customers\n customers_mask_columns = ['customer_id', 'customer_zip_code_prefix',\n 'customer_city', 'customer_state',\n 'geolocation_lat', 'geolocation_lng']\n customers_geo = customers.merge(geo,\n how='left',\n left_on='customer_zip_code_prefix',\n right_on='geolocation_zip_code_prefix')[customers_mask_columns]\n # Use the matching table and merge customers and sellers\n matching_geo = matching_table.merge(sellers_geo,\n on='seller_id')\\\n .merge(customers_geo,\n on='customer_id',\n suffixes=('_seller',\n '_customer'))\n # Remove na()\n matching_geo = matching_geo.dropna()\n matching_geo.loc[:, 'distance_seller_customer'] =\\\n matching_geo.apply(lambda row:\n haversine_distance(row['geolocation_lng_seller'],\n row['geolocation_lat_seller'],\n row['geolocation_lng_customer'],\n row['geolocation_lat_customer']),\n axis=1)\n # Since an order can have multiple sellers,\n # return the average of the distance per order\n order_distance =\\\n matching_geo.groupby('order_id',\n as_index=False).agg({'distance_seller_customer':\n 'mean'})\n return order_distance", "title": "" }, { "docid": "91b4d4da2cfc3a35c7267722e42815ee", "score": "0.57253265", "text": "def get_customer_data(self):\n\t\tcustomer = {}\n\t\tsignals.customer_data_query.send(sender=type(self), instance=self, customer=customer)\n\t\treturn customer", "title": "" }, { "docid": "13979a97d538c6d5b27491686549c109", "score": "0.571447", "text": "def get_location(self):\n response = self._send_command(b'w', 8)\n lat_deg = response[0]\n lat_min = response[1]\n lat_sec = response[2]\n lat_north = (response[3] == 0)\n lat = lat_deg + lat_min / 60.0 + lat_sec / 3600.0\n if not lat_north:\n lat = -lat\n lon_deg = response[4]\n lon_min = response[5]\n lon_sec = response[6]\n lon_east = (response[7] == 0)\n lon = lon_deg + lon_min / 60.0 + lon_sec / 3600.0\n if not lon_east:\n lon = -lon\n return (lat, lon)", "title": "" }, { "docid": "282369f2c345281ac131d099264529d2", "score": "0.56632286", "text": "def get_location(self, locator):\n import fiona\n with fiona.open(locator.get_building_geometry()) as shp:\n longitude = shp.crs['lon_0']\n latitude = shp.crs['lat_0']\n return latitude, longitude", "title": "" }, { "docid": "deef5534758c602df8a262d449050b66", "score": "0.564373", "text": "def get_location(self):\n return self._location", "title": "" }, { "docid": "b778e3d51bdb9b2def92dfaafdccfd57", "score": "0.5641305", "text": "def get_customer_details(self):\n url = API_ROOT + API_CUSTOMER.format(email=self.username)\n response = self._get_request(url)\n response.raise_for_status()\n if response.text:\n j = response.json()\n return Customer().from_json_dictionary(j)\n return None", "title": "" }, { "docid": "5333cb544b398449274ccdf00bb749dd", "score": "0.5619865", "text": "def getLocation(self):\n\n return self.location", "title": "" }, { "docid": "7f4f28fda64b5224f003c04e947bd650", "score": "0.56105465", "text": "def customer_reference(self):\n return self.__customer_reference", "title": "" }, { "docid": "7f4f28fda64b5224f003c04e947bd650", "score": "0.56105465", "text": "def customer_reference(self):\n return self.__customer_reference", "title": "" }, { "docid": "97bb0bfd1591574f3ef5c01bf5a10098", "score": "0.55995584", "text": "def getGeoCoordinates(self):\n url = location_point + apiKey + \\\n \"&format=json&language=en-IN&postalKey={}%3AIN\".format(\n (self.pin_code))\n response = requests.get(url)\n if response.status_code == 200:\n data = response.json()\n self.lon = data[\"location\"][\"longitude\"]\n self.lat = data[\"location\"][\"latitude\"]\n city = data[\"location\"][\"city\"]\n local = data[\"location\"][\"locale\"]\n place = \" \".join([local[i].strip() for i in local if local[i]])\n self.place_id = data[\"location\"][\"placeId\"]\n return city, place\n else:\n raise RequestError(response.status_code, response.json())", "title": "" }, { "docid": "378127c261a24a6b9117c5a0201e5f94", "score": "0.5555975", "text": "def getLoc(self):\n return self.location", "title": "" }, { "docid": "769114c2f53f6279a0823692380d5b7c", "score": "0.5543031", "text": "def location(self):\n return self._data[\"LOCATION\"]", "title": "" }, { "docid": "a0ba106a2a1f6b2d5ab59d9dffde6861", "score": "0.5540326", "text": "def getLocation(self):\n return self._location", "title": "" }, { "docid": "a0ba106a2a1f6b2d5ab59d9dffde6861", "score": "0.5540326", "text": "def getLocation(self):\n return self._location", "title": "" }, { "docid": "a0ba106a2a1f6b2d5ab59d9dffde6861", "score": "0.5540326", "text": "def getLocation(self):\n return self._location", "title": "" }, { "docid": "a0ba106a2a1f6b2d5ab59d9dffde6861", "score": "0.5540326", "text": "def getLocation(self):\n return self._location", "title": "" }, { "docid": "a0ba106a2a1f6b2d5ab59d9dffde6861", "score": "0.5540326", "text": "def getLocation(self):\n return self._location", "title": "" }, { "docid": "a56695f2331799863ff26aa830f7d0cb", "score": "0.5538692", "text": "def get_location_cell(sm_devices_list, user_cell):\n location = None\n for device in sm_devices_list:\n if device['phoneNumber'] == user_cell:\n pprint(device)\n location = device['location']\n return location", "title": "" }, { "docid": "022be113e2d04b9a43d6c90acbeceea9", "score": "0.55151623", "text": "def get_location():\r\n ip_request = requests.get('https://get.geojs.io/v1/ip.json')\r\n my_ip = ip_request.json()['ip']\r\n geo_request = requests.get('https://get.geojs.io/v1/ip/geo/' +my_ip + '.json')\r\n geo_data = geo_request.json()\r\n geo = geo_data['city']\r\n return geo", "title": "" }, { "docid": "685eb836347224364032bac6d925e170", "score": "0.54943347", "text": "def getLocation(self):\n return self._Location", "title": "" }, { "docid": "685eb836347224364032bac6d925e170", "score": "0.54943347", "text": "def getLocation(self):\n return self._Location", "title": "" }, { "docid": "35b6652b0cb26858e8ab4a5c4085737b", "score": "0.5493288", "text": "def city(self, record):\n address = record['commerce_customer_address']\n return {'city': address['locality']}", "title": "" }, { "docid": "35b6652b0cb26858e8ab4a5c4085737b", "score": "0.5493288", "text": "def city(self, record):\n address = record['commerce_customer_address']\n return {'city': address['locality']}", "title": "" }, { "docid": "af097126f7d78a94f61f742753d9a784", "score": "0.5463095", "text": "def geolocation(self):\n \n if geo:\n try: \n location = self.geoloc.reverse(str(self.latitude)+\", \"+str(self.longitude))\n return str(location)\n except:\n return \"Address for coordinates not found\"\n else:\n return \"Geopy is not installed, cannot get address for coordinates.\"", "title": "" }, { "docid": "4ae762f5c25a7cbd7ed47e5aed06fc44", "score": "0.5425713", "text": "def geoLocation(self):\n if self._stone_geoLocation_present:\n return self._stone_geoLocation_value\n else:\n return None", "title": "" }, { "docid": "3d693983fe43722fcff5b8f80b0443f4", "score": "0.54076356", "text": "def location(self):\n return self.location_value", "title": "" }, { "docid": "79541d6f7681a37152de87aff094c6d4", "score": "0.54058564", "text": "def get_customer(self, customer_id, params=None):\n return self._get('customers/' + customer_id, params=params)", "title": "" }, { "docid": "3d3bbad0d358f01417f6f578cb9f3e0d", "score": "0.5364921", "text": "def get_customer_id(self):\n return self.customer_id", "title": "" }, { "docid": "da85740c4013d1e85af94bcda4b8fac6", "score": "0.53478515", "text": "def location(self) -> Sequence['outputs.LocationResponse']:\n return pulumi.get(self, \"location\")", "title": "" }, { "docid": "9f3000a2c465e0ccb0a7d2135de5d14f", "score": "0.532473", "text": "def get_location(self):\n\t\ttry:\n\t\t\t### Fetch location using freegeoip API ###\n\t\t\t# store location URL. Uses IP fetched by get_ip() in variable\n\t\t\tlocation_req_url = (\"http://api.ipstack.com/\" + str(self.get_ip()) +\n\t\t\t\t\t\t\t\t\"?access_key=\" + LOCATION_API_TOKEN + \"&output=json&legacy=1\")\n\t\t\treq = get(location_req_url)\t\t\t\t\t\t\t# fetch data from URL\n\t\t\tlocation_obj = loads(req.text)\t\t\t\t\t\t# convert fetched data to object\n\t\t\tif self.latitude != location_obj['latitude']:\t\t# change latitude variable if device has moved.\n\t\t\t\tself.latitude = location_obj['latitude']\n\t\t\tif self.longitude != location_obj['longitude']:\t\t# change latitude variable if device has moved.\n\t\t\t\tself.longitude = location_obj['longitude']\n\t\t\tlocation_tmp = \"%s, %s\" % \\\n\t\t\t\t\t(location_obj['city'], location_obj['region_code']) # get current location and store in tmp variable\n\t\t\tif self.location != location_tmp:\t\t\t\t\t# update weather information\n\t\t\t\tself.location = location_tmp\n\t\t\t\tself.location_label.config(text=location_tmp)\n\t\texcept Exception as exc:\n\t\t\tprint_exc()\n\t\t\treturn \"Error: %s. Cannot get location.\" % exc", "title": "" }, { "docid": "660d534255c1ee6567b2838b91f9e0ec", "score": "0.53116834", "text": "def get_lat_and_lng(self):\n self.lat = self.location['geometry']['location']['lat']\n self.lng = self.location['geometry']['location']['lng']\n self.address = self.geocoder.reverse(str(self.lat)+\", \"+str(self.lng))\n return [self.lat, self.lng]", "title": "" }, { "docid": "2f4a5efee2556f716996f56271812d9b", "score": "0.5303388", "text": "def GetLocations():\n\t\tr = clc.v1.API.Call('post','Account/GetLocations',{})\n\t\tif r['Success'] != True: \n\t\t\tif clc.args: clc.v1.output.Status('ERROR',3,'Error calling %s. Status code %s. %s' % ('Account/GetLocations',r['StatusCode'],r['Message']))\n\t\t\traise Exception('Error calling %s. Status code %s. %s' % ('Account/GetLocations',r['StatusCode'],r['Message']))\n\t\telif int(r['StatusCode']) == 0: \n\t\t\tclc.LOCATIONS = [x['Alias'] for x in r['Locations']]\n\t\t\treturn(r['Locations'])", "title": "" }, { "docid": "21cb0621fb259770a8c61200f86c0fbc", "score": "0.52936625", "text": "def locations(self):\n return self.addresses.keys()", "title": "" }, { "docid": "5fc02afd463aefb3c859ee80b6832996", "score": "0.5278212", "text": "def location(self) -> str:\n return pulumi.get(self, \"location\")", "title": "" }, { "docid": "5fc02afd463aefb3c859ee80b6832996", "score": "0.5278212", "text": "def location(self) -> str:\n return pulumi.get(self, \"location\")", "title": "" }, { "docid": "5fc02afd463aefb3c859ee80b6832996", "score": "0.5278212", "text": "def location(self) -> str:\n return pulumi.get(self, \"location\")", "title": "" }, { "docid": "5fc02afd463aefb3c859ee80b6832996", "score": "0.5278212", "text": "def location(self) -> str:\n return pulumi.get(self, \"location\")", "title": "" }, { "docid": "5fc02afd463aefb3c859ee80b6832996", "score": "0.5278212", "text": "def location(self) -> str:\n return pulumi.get(self, \"location\")", "title": "" }, { "docid": "5fc02afd463aefb3c859ee80b6832996", "score": "0.5278212", "text": "def location(self) -> str:\n return pulumi.get(self, \"location\")", "title": "" }, { "docid": "5fc02afd463aefb3c859ee80b6832996", "score": "0.5278212", "text": "def location(self) -> str:\n return pulumi.get(self, \"location\")", "title": "" }, { "docid": "5fc02afd463aefb3c859ee80b6832996", "score": "0.5278212", "text": "def location(self) -> str:\n return pulumi.get(self, \"location\")", "title": "" }, { "docid": "5fc02afd463aefb3c859ee80b6832996", "score": "0.5278212", "text": "def location(self) -> str:\n return pulumi.get(self, \"location\")", "title": "" }, { "docid": "5fc02afd463aefb3c859ee80b6832996", "score": "0.5278212", "text": "def location(self) -> str:\n return pulumi.get(self, \"location\")", "title": "" }, { "docid": "5fc02afd463aefb3c859ee80b6832996", "score": "0.5278212", "text": "def location(self) -> str:\n return pulumi.get(self, \"location\")", "title": "" }, { "docid": "5fc02afd463aefb3c859ee80b6832996", "score": "0.5278212", "text": "def location(self) -> str:\n return pulumi.get(self, \"location\")", "title": "" }, { "docid": "5fc02afd463aefb3c859ee80b6832996", "score": "0.5278212", "text": "def location(self) -> str:\n return pulumi.get(self, \"location\")", "title": "" }, { "docid": "5fc02afd463aefb3c859ee80b6832996", "score": "0.5278212", "text": "def location(self) -> str:\n return pulumi.get(self, \"location\")", "title": "" }, { "docid": "5fc02afd463aefb3c859ee80b6832996", "score": "0.5278212", "text": "def location(self) -> str:\n return pulumi.get(self, \"location\")", "title": "" }, { "docid": "5fc02afd463aefb3c859ee80b6832996", "score": "0.5278212", "text": "def location(self) -> str:\n return pulumi.get(self, \"location\")", "title": "" }, { "docid": "5fc02afd463aefb3c859ee80b6832996", "score": "0.5278212", "text": "def location(self) -> str:\n return pulumi.get(self, \"location\")", "title": "" }, { "docid": "5fc02afd463aefb3c859ee80b6832996", "score": "0.5278212", "text": "def location(self) -> str:\n return pulumi.get(self, \"location\")", "title": "" }, { "docid": "14bfaa7df84eabccabb6c1a5fce7ebf6", "score": "0.52580065", "text": "def customer_distance(self, data):\n customer_lat = math.radians(float(data['latitude']))\n customer_long = math.radians(float(data['longitude']))\n del_long = math.fabs(customer_long - self.icom_off_lon)\n\n \"\"\" Great circle distance formula can be found here: \n https://en.wikipedia.org/wiki/Great-circle_distance \"\"\"\n\n eq1 = (math.sin(customer_lat) * math.sin(self.icom_off_lat))\n eq2 = math.cos(customer_lat) * math.cos(self.icom_off_lat) * math.cos(del_long)\n\n del_sig = math.acos(eq1 + eq2)\n distance = self.radius_earth * del_sig\n return distance", "title": "" }, { "docid": "ff5549ea746484dcdcbef5f2c82e8479", "score": "0.52402705", "text": "def find_closest_location(self):\n location = \"\"\n min_distance = None\n config = self.get_plugin_config()\n base_coords = config[\"latitude\"], config[\"longitude\"]\n for temp in self.location_list:\n location_coords = float(self.location_list[temp].get('lat')), float(self.location_list[temp].get('long'))\n distance = self.haversine(base_coords, location_coords)\n self.log( temp + \" is \" + str(distance))\n if min_distance is None or min_distance > distance:\n location = temp\n min_distance = distance\n self.location_list[location]['distance'] = distance\n self.log(\"Closest point \" + location + \" \" + self.location_list[location].get('name') + \" being in \" + str(self.location_list[location].get('distance')) + \"km\")\n return location", "title": "" }, { "docid": "02347caada20725b34727e8136f4c9f7", "score": "0.5229773", "text": "def get_location(self):\r\n response = self.connection.make_request('GET', self.name,\r\n query_args='location')\r\n body = response.read()\r\n if response.status == 200:\r\n rs = ResultSet(self)\r\n h = handler.XmlHandler(rs, self)\r\n xml.sax.parseString(body, h)\r\n return rs.LocationConstraint\r\n else:\r\n raise S3ResponseError(response.status, response.reason, body)", "title": "" }, { "docid": "02347caada20725b34727e8136f4c9f7", "score": "0.5229773", "text": "def get_location(self):\r\n response = self.connection.make_request('GET', self.name,\r\n query_args='location')\r\n body = response.read()\r\n if response.status == 200:\r\n rs = ResultSet(self)\r\n h = handler.XmlHandler(rs, self)\r\n xml.sax.parseString(body, h)\r\n return rs.LocationConstraint\r\n else:\r\n raise S3ResponseError(response.status, response.reason, body)", "title": "" }, { "docid": "a83c4e3eabf7679d8aa709a6511f5d27", "score": "0.5226422", "text": "def location(self) -> pulumi.Output[Optional[str]]:\n return pulumi.get(self, \"location\")", "title": "" }, { "docid": "a83c4e3eabf7679d8aa709a6511f5d27", "score": "0.5226422", "text": "def location(self) -> pulumi.Output[Optional[str]]:\n return pulumi.get(self, \"location\")", "title": "" }, { "docid": "a83c4e3eabf7679d8aa709a6511f5d27", "score": "0.5226422", "text": "def location(self) -> pulumi.Output[Optional[str]]:\n return pulumi.get(self, \"location\")", "title": "" }, { "docid": "a83c4e3eabf7679d8aa709a6511f5d27", "score": "0.5226422", "text": "def location(self) -> pulumi.Output[Optional[str]]:\n return pulumi.get(self, \"location\")", "title": "" }, { "docid": "a83c4e3eabf7679d8aa709a6511f5d27", "score": "0.5226422", "text": "def location(self) -> pulumi.Output[Optional[str]]:\n return pulumi.get(self, \"location\")", "title": "" }, { "docid": "a83c4e3eabf7679d8aa709a6511f5d27", "score": "0.5226422", "text": "def location(self) -> pulumi.Output[Optional[str]]:\n return pulumi.get(self, \"location\")", "title": "" }, { "docid": "91ef896bfb19593a0daefd17d3b9df9b", "score": "0.5217581", "text": "def location(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"location\")", "title": "" }, { "docid": "91ef896bfb19593a0daefd17d3b9df9b", "score": "0.5217581", "text": "def location(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"location\")", "title": "" }, { "docid": "91ef896bfb19593a0daefd17d3b9df9b", "score": "0.5217581", "text": "def location(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"location\")", "title": "" }, { "docid": "91ef896bfb19593a0daefd17d3b9df9b", "score": "0.5217581", "text": "def location(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"location\")", "title": "" }, { "docid": "91ef896bfb19593a0daefd17d3b9df9b", "score": "0.5217581", "text": "def location(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"location\")", "title": "" }, { "docid": "91ef896bfb19593a0daefd17d3b9df9b", "score": "0.5217581", "text": "def location(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"location\")", "title": "" }, { "docid": "91ef896bfb19593a0daefd17d3b9df9b", "score": "0.5217581", "text": "def location(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"location\")", "title": "" }, { "docid": "91ef896bfb19593a0daefd17d3b9df9b", "score": "0.5217581", "text": "def location(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"location\")", "title": "" }, { "docid": "91ef896bfb19593a0daefd17d3b9df9b", "score": "0.5217581", "text": "def location(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"location\")", "title": "" }, { "docid": "91ef896bfb19593a0daefd17d3b9df9b", "score": "0.5217581", "text": "def location(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"location\")", "title": "" }, { "docid": "91ef896bfb19593a0daefd17d3b9df9b", "score": "0.5217581", "text": "def location(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"location\")", "title": "" }, { "docid": "91ef896bfb19593a0daefd17d3b9df9b", "score": "0.5217581", "text": "def location(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"location\")", "title": "" }, { "docid": "91ef896bfb19593a0daefd17d3b9df9b", "score": "0.5217581", "text": "def location(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"location\")", "title": "" }, { "docid": "91ef896bfb19593a0daefd17d3b9df9b", "score": "0.5217581", "text": "def location(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"location\")", "title": "" }, { "docid": "91ef896bfb19593a0daefd17d3b9df9b", "score": "0.5217581", "text": "def location(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"location\")", "title": "" }, { "docid": "91ef896bfb19593a0daefd17d3b9df9b", "score": "0.5217581", "text": "def location(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"location\")", "title": "" }, { "docid": "91ef896bfb19593a0daefd17d3b9df9b", "score": "0.5217581", "text": "def location(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"location\")", "title": "" }, { "docid": "91ef896bfb19593a0daefd17d3b9df9b", "score": "0.5217581", "text": "def location(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"location\")", "title": "" }, { "docid": "91ef896bfb19593a0daefd17d3b9df9b", "score": "0.5217581", "text": "def location(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"location\")", "title": "" }, { "docid": "57305b5c06a3ddaef20d52dafec0cde2", "score": "0.52087766", "text": "def location(self):\n return self._location", "title": "" }, { "docid": "57305b5c06a3ddaef20d52dafec0cde2", "score": "0.52087766", "text": "def location(self):\n return self._location", "title": "" }, { "docid": "57305b5c06a3ddaef20d52dafec0cde2", "score": "0.52087766", "text": "def location(self):\n return self._location", "title": "" }, { "docid": "57305b5c06a3ddaef20d52dafec0cde2", "score": "0.52087766", "text": "def location(self):\n return self._location", "title": "" } ]
e1765654b9adf837458da050bd051eaf
Wrapper Function for telnet interface. This uses underlying python telnet libraries Read until one from a list of a regular expressions matches.
[ { "docid": "4489f58e627d6910de71e6b7564d008d", "score": "0.0", "text": "def loc_expect(self, output: any) -> object:\n print(\"Telnet expect with {}\".format(output))\n (i, obj, res) = self.telnet_con.expect([output.encode('ascii')], 5)\n print(\"i inside is {} \".format(i))\n return i", "title": "" } ]
[ { "docid": "1e83f6c81f96a3a34cfe616ec39ddc25", "score": "0.639549", "text": "def _telnet_read(self):\n return self.tn.read_until(\"\\n\", self.timeout).rstrip('\\n') # Telnet reply, with termination chars removed", "title": "" }, { "docid": "4d5010c53f8ddd74b9249bb012d1d113", "score": "0.63211936", "text": "def test_read_until(self):\n want = [b'xxxmatchyyy']\n telnet = test_telnet(want)\n data = telnet.read_until(b'match')\n self.assertEqual(data, b'xxxmatch', msg=(telnet.cookedq, telnet.rawq, telnet.sock.reads))\n\n reads = [b'x' * 50, b'match', b'y' * 50]\n expect = b''.join(reads[:-1])\n telnet = test_telnet(reads)\n data = telnet.read_until(b'match')\n self.assertEqual(data, expect)", "title": "" }, { "docid": "dec78c9a8edcd508d9ffb50b710f0cdc", "score": "0.6051021", "text": "async def _read_until(self, pattern_list: List[str], re_flags) -> str:\n if pattern_list is None:\n raise ValueError(\"Pattern list can't be None\")\n\n output = \"\"\n self._logger.debug(\"Host %s: Read until pattern list: %r\", self.host, pattern_list)\n while True:\n buf = await self._io_connection.read()\n output += buf\n\n for pattern in pattern_list:\n if re.search(pattern, output, flags=re_flags):\n self._logger.debug(\n \"Host %s: find pattern [%r] in buffer: %r\", self.host, pattern, output,\n )\n output = self._normalize_linefeeds(output)\n return output", "title": "" }, { "docid": "a5a205c55265663a8eee9d9a7b963471", "score": "0.58326966", "text": "def test_read_all(self):\n reads = [b'x' * 500, b'y' * 500, b'z' * 500]\n expect = b''.join(reads)\n telnet = test_telnet(reads)\n data = telnet.read_all()\n self.assertEqual(data, expect)\n return", "title": "" }, { "docid": "ca06cd9eaf288d10814b0352faba988e", "score": "0.5810383", "text": "def read_until_regex(\n self, regex: bytes, max_bytes: Optional[int] = None\n ) -> Awaitable[bytes]:\n future = self._start_read()\n self._read_regex = re.compile(regex)\n self._read_max_bytes = max_bytes\n try:\n self._try_inline_read()\n except UnsatisfiableReadError as e:\n # Handle this the same way as in _handle_events.\n gen_log.info(\"Unsatisfiable read, closing connection: %s\" % e)\n self.close(exc_info=e)\n return future\n except:\n # Ensure that the future doesn't log an error because its\n # failure was never examined.\n future.add_done_callback(lambda f: f.exception())\n raise\n return future", "title": "" }, { "docid": "0d34ac446193c01c2f6d3a6c9fd09f38", "score": "0.56899154", "text": "def telnet_query(tn, timeout, orig_message):\n # Expected message pattern\n message_pattern = re.compile('(?:scpi>)*\\S*\\s+(.*)')\n\n # Checks that the telnet message is a string\n if type(orig_message) != str:\n raise TypeError\n tn.write(orig_message + \"\\r\\n\") # Writes a telnet message with termination characters\n r_str = tn.read_until(\"\\n\", timeout).rstrip('\\n') # Telnet reply, with termination chars removed\n check = tn.read_until(\"\\n\", timeout).rstrip('\\n') # Status line\n message = re.match(message_pattern, r_str)\n\n if 'OK' not in check:\n #Have another go\n print 'Retrying ', orig_message\n tn.write(orig_message + \"\\r\\n\") # Writes a telnet message with termination characters\n r_str = tn.read_until(\"\\n\", timeout).rstrip('\\n') # Telnet reply, with termination chars removed\n check = tn.read_until(\"\\n\", timeout).rstrip('\\n') # Status line\n message = re.match(message_pattern, r_str)\n if 'OK' not in check:\n print \"Original message = \", orig_message\n print 'Returned command = ', r_str\n print 'Bad status STATUS = ', check\n if message is not None:\n print 'Return = ', message.group(1)\n raise ValueError(''.join(('Bad status on communication with ITechBL12HI: ', orig_message)))\n return message.group(1), check", "title": "" }, { "docid": "7cd8ae5bfdb562f93a8cc7ad228c2203", "score": "0.5561093", "text": "def read_stop(self, command, regex: str, timeout=5):\n _LOGGER.debug('Serial::read_stop:: Sending: {}'.format(command))\n _LOGGER.debug('Serial::read_stop:: Expecting regex: {}'.format(regex))\n # compile regex\n _re = re.compile(regex)\n # send the command\n self.send_command(command)\n # return variable\n ret_val = b''\n # Initialize timeout\n timeout_start = datetime.now()\n found = False\n try:\n while not found:\n self._check_timeout(timeout, timeout_start)\n # grab response\n line = self.read_line()\n ret_val += line\n # set found lv\n found = _re.search(str(line))\n except TimeoutException:\n _LOGGER.debug(\"Serial::read_stop:: Timeout raised.\")\n if timeout != 5:\n raise TimeoutException\n finally:\n self.reset_input_buffer()\n return ret_val", "title": "" }, { "docid": "60155dd69a5e8617b4d4e32eb852934b", "score": "0.52934337", "text": "def wrap_read(telnet_obj):\n try:\n data = telnet_obj.read_very_eager().decode('utf-8')\n except Exception:\n data = \"\"\n\n return data", "title": "" }, { "docid": "396a2da29d63b5d77a2c2deb37a0af0e", "score": "0.5292741", "text": "def _read_eager(self, func_name):\n want = b'x' * 100\n telnet = test_telnet([want])\n func = getattr(telnet, func_name)\n telnet.sock.block = True\n self.assertEqual(b'', func())\n telnet.sock.block = False\n data = b''\n while True:\n try:\n data += func()\n except EOFError:\n break\n self.assertEqual(data, want)", "title": "" }, { "docid": "e77c17281417dc7d034f365fb0564402", "score": "0.5222662", "text": "def read(self):\n recv = b''\n read_timeout = self.read_timeout\n chrono = self.chrono\n chrono.reset() # Reset the timer\n chrono.start() # Start timer\n while (chrono.read() < read_timeout):\n inp = self.serial.read(1) # Read a character from the input\n if inp == MSG_CHAR_1: # check it matches\n recv += inp # if it does add it to receive string\n inp = self.serial.read(1) # read the next character\n if inp == MSG_CHAR_2: # check it's what's expected\n recv += inp # att it to the receive string\n recv += self.serial.read(30) # read the remaining 30 bytes\n self._verify(recv) # verify the checksum\n chrono.stop() # Stop the timer\n return PlantowerReading(recv) # convert to reading object\n # If the character isn't what we are expecting loop until timeout\n chrono.stop() # Stop the timer (in case the while loop timed out)\n\n raise PlantowerException(\"No message received\")", "title": "" }, { "docid": "b6693f102a61879f2144c269bf8de808", "score": "0.5218396", "text": "def test_read_some(self):\n # test 'at least one byte'\n telnet = test_telnet([b'x' * 500])\n data = telnet.read_some()\n self.assertTrue(len(data) >= 1)\n # test EOF\n telnet = test_telnet()\n data = telnet.read_some()\n self.assertEqual(b'', data)", "title": "" }, { "docid": "be1c171a3c50b7268b50ad626ef51f07", "score": "0.5152896", "text": "def readline(self, timeout=None):\n while True:\n if b\"\\r\\n\" in self._buffer:\n data, tmp, self._buffer = self._buffer.partition(b\"\\r\\n\")\n return data.decode(self._codec)\n\n events = self.selector_r.select(timeout=timeout) # We ignore the event, only one socket\n try:\n data = os.read(sock, 4096)\n if data == \"\":\n return None # disconnected\n except OSError:\n return None # disconnect\n self._buffer += data", "title": "" }, { "docid": "babee497222a763c6a7684a7804f34b7", "score": "0.51489544", "text": "def _telnet_query(self, message):\n self._telnet_write(message)\n return self._telnet_read()", "title": "" }, { "docid": "65103301021d39b5cad393e3412f48b6", "score": "0.5119769", "text": "def find (self, patterns, timeout=0) :\n\n def escape (txt) :\n pat = re.compile(r'\\x1b[^m]*m')\n return pat.sub ('', txt)\n\n _debug = False\n\n with self.rlock :\n\n try :\n start = time.time () # startup timestamp\n ret = [] # array of read lines\n patts = [] # compiled patterns\n data = self.cache # initial data to check\n self.cache = \"\"\n\n if not data : # empty cache?\n data = self.read (timeout=_POLLDELAY)\n\n # pre-compile the given pattern, to speed up matching\n for pattern in patterns :\n patts.append (re.compile (pattern, re.MULTILINE | re.DOTALL))\n\n # we wait forever -- there are two ways out though: data matches\n # a pattern, or timeout passes\n while True :\n\n # skip non-lines\n if not data :\n data += self.read (timeout=_POLLDELAY)\n\n if _debug : print \">>%s<<\" % data\n\n escaped = escape (data)\n if _debug : print 'data ==%s==' % data\n if _debug : print 'escaped ==%s==' % escaped\n\n # check current data for any matching pattern\n for n in range (0, len(patts)) :\n\n escaped = data\n # escaped = escape (data)\n # print '-- 1 --%s--' % data\n # print '-- 2 --%s--' % escaped\n\n match = patts[n].search (escaped)\n if _debug : print \"==%s==\" % patterns[n]\n if _debug : print match\n\n if match :\n # a pattern matched the current data: return a tuple of\n # pattern index and matching data. The remainder of the\n # data is cached.\n ret = escaped[0:match.end()]\n self.cache = escaped[match.end():]\n\n if _debug : print \"~~match!~~ %s\" % escaped[match.start():match.end()]\n if _debug : print \"~~match!~~ %s\" % (len(escaped))\n if _debug : print \"~~match!~~ %s\" % (str(match.span()))\n if _debug : print \"~~match!~~ %s\" % (ret)\n\n return (n, ret.replace('\\r', ''))\n\n # if a timeout is given, and actually passed, return\n # a non-match and a copy of the data we looked at\n if timeout == 0 :\n return (None, str(escaped))\n\n if timeout > 0 :\n now = time.time ()\n if (now-start) > timeout :\n self.cache = escaped\n return (None, str(escaped))\n\n # no match yet, still time -- read more data\n data += self.read (timeout=_POLLDELAY)\n\n except se.NoSuccess as e :\n raise ptye.translate_exception (e, \"(%s)\" % data)", "title": "" }, { "docid": "537bfad70732666ed491201bc2c0989c", "score": "0.50606513", "text": "def parse(inport):\n return read(inport)", "title": "" }, { "docid": "30929cde11d054ca35a5da27ac547b06", "score": "0.50342655", "text": "def telnet_cmds(self, cmds):\n self.telnet_client()\n try:\n std_output = self._execute_cmds(cmds)\n except:\n printd(\"%s: execute telnet commands not successful\\n\" % self._ipaddr)\n std_output = None\n finally:\n self.client.close()\n return std_output", "title": "" }, { "docid": "8afb5024394165ebbc5147230fb2c63c", "score": "0.5023784", "text": "def read(self):\n with serial.Serial(**self.serial_settings) as serial_handle:\n telegram = []\n\n while True:\n line = serial_handle.readline()\n line = line.decode('ascii')\n\n # Telegrams need to be complete because the values belong to a\n # particular reading and can also be related to eachother.\n if not telegram and not is_start_of_telegram(line):\n continue\n\n telegram.append(line)\n\n if is_end_of_telegram(line):\n yield self.telegram_parser.parse(telegram)\n telegram = []", "title": "" }, { "docid": "7398e59a4f8e3b90fff72b60294eb800", "score": "0.49608758", "text": "def listen(directory,server,port=23,pw=False,test=False):\n\tHOST = server\n\tif directory[-1] == \"/\":\n\t\tdirectory = directory[:-1]\n\n\t# LOGIN DECISIONS WILL BE MADE AT A LATER TIME\n\n\ttn = telnetlib.Telnet(HOST,port)\n\tif pw:\n\t\tuser = raw_input(\"Enter your remote account: \")\n\t\tpassword = getpass.getpass()\n\t\n\t\n\t\ttn.read_until(\"login: \")\n\t\ttn.write(user + \"\\n\")\n\t\tif password:\n\t\t tn.read_until(\"Password: \")\n\t\t tn.write(password + \"\\n\")\n\t\n\t# so begins the ideally infinite loop\n\tgo = True\n\tprint \"I'm Listening.\"\n\t#current = tn.read_all()\n\t#print current\n\tEOM = unichr(3)\n\tBOM = unichr(2)\n\ttrig = unichr(4)\n\twhile go:\n\t\t#print time.time()\n\t\t# read until the end of the message\n\t\ttn.read_until(BOM)# now it is the start of the message!\n\t\t# now we have filtered off all the pre message schlock.\n\t\tob = BOM+tn.read_until(EOM)\n\t\t# add a timestamp, and pass the file to the raw data file\n\t\t\n\t\tprint ob[0:100]+\"...\" # this is for giggles right now, hopefully it is good\n\n\t\t# oh, we will need to check this ob, to see if it is useful...\n\n\t\tts = time.time() # I am saving in EPOCH!!! VICTORY!\n\t\traw = open(directory+\"/raw_data.dat\",'a')\n\t\traw.write(\"\\n\"+str(ts)+\"\\n\"+ob.strip()) # time before ob!\n\t\traw.close()\n\t\tif test:\n\t\t\t# then don't try to analyze the data... it will make angry.\n\t\t\tcontinue\n\n\t\t# edit the runtime file!\n\t\trt = open(directory+\"/.runtime\",'w')\n\t\trt.write(str(ts))\n\t\trt.close()\n\n\t\ttrans_ob = vCT12.read({'time':ts,'code':[0],'rest':ob.strip()}) #CT12 only!!\n\t\tif not trans_ob:\n\t\t\t# well, that was not a good ob.\n\t\t\tprint \"Badly Formatted Observation\",ts\n\t\t\tcontinue\n\t\tdat = open(directory+\"/ceil.dat\",'a')\n\t\tvl = \"\" # text holer for the values\n\t\tfor v in trans_ob['v'][:trans_ob['l']]:\n\t\t\t# copy the values to a comma seperated list\n\t\t\t# however, this goes through a thousand, even if there are only 250 obs... sorry\n\t\t\tvl += str(v)+\",\"\n\t\tdat.write(\"\\n\"+str(ts)+\",\"+str(trans_ob['h'])+\",\"+trans_ob['c']+\",\"+vl[:-1])\n\t\tdat.close()\n\t\t\"\"\"\n\t\t# now, we should check the controls... but meh...\n\t\t#meh indeed. Just kill it if it needs to die...\thopefully\n\t\tcommand = signal.signal(signal.SIGALARM,read_raw)\n\t\tsignal.alarm(1) # check for one second\n\t\tif command == \"kill\":\n\t\t\tgo = False\n\t\t\"\"\"\n\t\t# well, now it is truly infinite... so, just hope for death... \n\t\t# or restart the serial port\n\n\ttn.write(\"exit\\n\")\n\n\tprint tn.read_all()", "title": "" }, { "docid": "e5654b0c312650bd0478818c3d92f775", "score": "0.49270147", "text": "def main(host, port=None, encoding='cp437'):\n # pylint: disable=R0914,R0912,R0915\n # Too many local variables\n # Too many branches\n # Too many statements\n import telnetlib\n from functools import partial\n from x84.bbs import getsession, getterminal, echo, getch, from_cp437, telnet\n import logging\n log = logging.getLogger()\n\n assert encoding in ('utf8', 'cp437')\n session, term = getsession(), getterminal()\n session.activity = 'connecting to %s' % (host,)\n port = int(port) if port is not None else 23\n telnet_client = telnetlib.Telnet()\n telnet_client.set_option_negotiation_callback(partial(\n telnet.callback_cmdopt, env_term=session.env['TERM'], height=term.height, width=term.width))\n echo(u\"\\r\\n\\r\\nEscape character is 'ctrl-^.'\")\n if not session.user.get('expert', False):\n getch(3)\n echo(u'\\r\\nTrying %s:%s... ' % (host, port,))\n # pylint: disable=W0703\n # Catching too general exception Exception\n try:\n telnet_client.open(host, port)\n except Exception as err:\n echo(term.bold_red('\\r\\n%s\\r\\n' % (err,)))\n echo(u'\\r\\n press any key ..')\n getch()\n return\n\n echo(u'\\r\\n... ')\n inp = session.read_event('input', timeout=0)\n echo(u'\\r\\nConnected to %s.' % (host,))\n session.activity = 'connected to %s' % (host,)\n carriage_returned = False\n with term.fullscreen():\n while True:\n if encoding == 'cp437':\n try:\n unistring = from_cp437(\n telnet_client.read_very_eager().decode('iso8859-1'))\n except EOFError:\n break\n else:\n unistring = telnet_client.read_very_eager().decode('utf8')\n if 0 != len(unistring):\n echo(unistring)\n if inp is not None:\n if inp == chr(30): # ctrl-^\n telnet_client.close()\n echo(u'\\r\\n' + term.clear_el + term.normal)\n break\n elif not carriage_returned and inp in (b'\\x0d', b'\\x0a'):\n telnet_client.write(b'\\x0d')\n log.debug('send {!r}'.format(b'\\x0d'))\n carriage_returned = True\n elif carriage_returned and inp in (b'\\x0a', b'\\x00'):\n carriage_returned = False\n elif inp is not None:\n telnet_client.write(inp)\n log.debug('send {!r}'.format(inp))\n carriage_returned = False\n inp = session.read_event('input', timeout=KEY_POLL)\n echo(u'\\r\\nConnection closed.\\r\\n')\n echo(u''.join(('\\r\\n\\r\\n', term.clear_el, term.normal, 'press any key')))\n echo(u'\\x1b[r') # unset 'set scrolling region', sometimes set by BBS's\n session.flush_event('input')\n getch()\n return", "title": "" }, { "docid": "cb4be84722988c66850cd8238f4f397f", "score": "0.49031314", "text": "def process(self, ip, port):\n telnet = Telnet()\n sock = socket(AF_INET, SOCK_STREAM)\n sock.settimeout(self.timeout)\n\n # Wrap the socket in SSL\n ssl_socket = wrap_socket(sock)\n ssl_socket.connect((ip, port))\n telnet.sock = ssl_socket\n \n tmp = telnet.read_some()\n \n data = ''\n while tmp != '':\n data += tmp\n sleep(0.2)\n tmp = telnet.read_very_eager()\n \n if data != '':\n banner = {\n 'data': data,\n 'opts': {}\n }\n try:\n banner['opts']['pem'] = get_server_certificate((ip, port))\n except:\n pass\n\n try:\n tmp = has_heartbleed(ip, port)\n if tmp['data']:\n banner['opts']['heartbleed'] = tmp['data']\n banner['opts']['vulns'] = tmp['vulns']\n except:\n pass\n return banner\n \n return None", "title": "" }, { "docid": "6ac312fe244a45b5676c4b565ac892a3", "score": "0.48950082", "text": "def expect(self, matches, timeout=20):\n self.before = ''\n self.match = None\n\n if isinstance(matches, str):\n matches = {'0': matches}\n\n elif isinstance(matches, list):\n tmp = {}\n ix = 0\n for match in matches:\n tmp[ix] = match\n ix += 1\n matches = tmp\n\n regexes = {}\n for key, match in matches.items():\n regexes[key] = re.compile(match)\n\n while True:\n c = self.transport.read(timeout=timeout)\n if c is None:\n break\n self.before += c\n for key, regex in regexes.items():\n m = regex.search(self.before)\n if m:\n log.debug(\" expect, matched pattern: '%s' %s\" % (key, regex))\n self.match = m.group()\n if log.isEnabledFor(log.DEBUG):\n tmp = self.match.replace(\"\\n\", \"\\\\n\").replace(\"\\r\", \"\\\\r\")\n log.debug(\" expect, matched text : %s\" % tmp)\n\n tmp = self.before[:m.end()] # Everthing up to matched text\n if log.isEnabledFor(log.DEBUG):\n tmp = tmp.replace(\"\\n\", \"\\\\n\").replace(\"\\r\", \"\\\\r\")\n log.debug(\" expect, self.before : %s\" % tmp)\n\n tmp = self.before[m.end():]\n if len(tmp):\n # There are received data after our match, return the extra data\n if log.isEnabledFor(log.DEBUG):\n tmp = tmp.replace(\"\\n\", \"\\\\n\").replace(\"\\r\", \"\\\\r\")\n log.debug(\" expect, returned to buffer: '%s'\" % tmp)\n self.transport.unread(self.before[m.end():]) # text after match is returned to transport\n\n return key\n raise CommException(1, \" expect, timeout, self.before: %s\" % self.before)", "title": "" }, { "docid": "d87aff9093290b0cbf0f331d7efd4610", "score": "0.48785874", "text": "def _make_regex_block_reader(self, start_re=None, end_re=None):\n if start_re is None: start_re = self._speaker_pattern\n\n def pbr(stream):\n return read_regexp_block(stream, start_re, end_re=end_re)\n\n return pbr", "title": "" }, { "docid": "2871572ddccb6838e8ce8446243e9435", "score": "0.4875379", "text": "def telnet_connect(host, port):\n try:\n return telnetlib.Telnet(host, port, 0.1)\n except:\n return None", "title": "" }, { "docid": "2871572ddccb6838e8ce8446243e9435", "score": "0.4875379", "text": "def telnet_connect(host, port):\n try:\n return telnetlib.Telnet(host, port, 0.1)\n except:\n return None", "title": "" }, { "docid": "eb2c824ded439ebca03bbb0bf77447b9", "score": "0.48708892", "text": "def read_until(self, read_until_string, timeout=None):\n raise NotImplementedError", "title": "" }, { "docid": "2ec0e17f7ce477ef399ffd383aca06c0", "score": "0.4852301", "text": "def telnet (ip,user,passwd):\n try:\n t = pexpect.spawn('telnet {}'.format(ip))\n t.expect('User Name:', 5)\n except pexpect.TIMEOUT:\n return 1\n t.sendline(user)\n t.expect('Password:')\n t.sendline(passwd)\n t.expect('#')\n t.sendline('terminal datadump')\n t.expect('#')\n return t", "title": "" }, { "docid": "20e75bff33d788737450cb3c9d5a3ee8", "score": "0.48295444", "text": "def read_banner(self, telnet):\n banner = ''\n # Read telnet banner\n for i in range(0, 5):\n banner += str(telnet.read_very_eager())\n time.sleep(0.2)\n logging.debug('Banner: '+banner)\n\n\n # Need a new line char to print telnet prompt login\n if str(banner).replace(\"b''b''b'\",\"\").replace(\"'b''b''\",\"\") == '':\n # Send new line\n telnet.write(('').encode('ascii'))\n\n # Get telnet banner\n for i in range(0, 10):\n banner += str(telnet.read_very_eager())\n time.sleep(0.2)\n\n return banner", "title": "" }, { "docid": "2abbf52d64d3f1243807240b79d7a6c0", "score": "0.48186484", "text": "def _read(self, n):\n result = ''\n while len(result) < n:\n rx = self.sock.recv(n - len(result))\n if not rx:\n raise SocketFail('End of input')\n result = result + rx\n return result", "title": "" }, { "docid": "a5f3b5bf543fd91945dde0aff6763571", "score": "0.480771", "text": "def loc_read_very_eager(self) -> bytes:\n print(\"Telnet Read Very eager\")\n out = self.telnet_con.read_very_eager()\n return out", "title": "" }, { "docid": "125bba9e200c34555b6b354884f3fdc6", "score": "0.4807634", "text": "def _consume_until(line_iter, end_re):\n ret = []\n for line in line_iter:\n if end_re.search(line):\n break\n ret.append(line)\n return ret", "title": "" }, { "docid": "d2d8ffe56d3a65e60cd90952a6d6af02", "score": "0.47940785", "text": "def _read_response( self, ref, timeout_ms=None, callback=None, wait_buff=None ):\n\t\t#self._rbuf.clear() # Clear\n\t\tt = time.ticks_ms()\n\t\ts = self.uart.readline()\n\t\twhile (timeout_ms==None) or ( time.ticks_diff( time.ticks_ms(), t ) < timeout_ms ) :\n\t\t\tself.dmesg( s, prefix='<<--' )\n\t\t\tif self.is_empty_buff(s):\n\t\t\t\t# Tru tu read next line -> restart processing loop\n\t\t\t\ts = self.uart.readline()\n\t\t\t\tcontinue\n\n\t\t\t# Want a specific response?\n\t\t\tif wait_buff and (wait_buff in s):\n\t\t\t\treturn True\n\t\t\t# Wait for standard buffer responses (Error/OK)\n\t\t\tif s == OK_BUFF:\n\t\t\t\treturn True # OK received\n\t\t\tif s == ERROR_BUFF:\n\t\t\t\traise CommandError( ref )\n\t\t\t# process Unsollicited_Result_Code\n\t\t\ts_urc = self._process_URC( s ) # May returns NULL, ERROR_BUFF or OK_BUFF\n\t\t\tif s_urc:\n\t\t\t\t# s_URC returned OK_BUFF it means that it managed somthing\n\t\t\t\tif s_urc == OK_BUFF:\n\t\t\t\t\t# Abord processing here and prepare to process the next read\n\t\t\t\t\ts = self.uart.readline()\n\t\t\t\t\tcontinue\n\t\t\t\telse:\n\t\t\t\t\t# process the response as the next buffer line\n\t\t\t\t\ts = s_urc\n\t\t\t\t\tcontinue\n\t\t\t# if callback --> call it\n\t\t\tif callback:\n\t\t\t\tcallback( s )\n\t\t\telse:\n\t\t\t\t# Accumulate response in the buffer\n\t\t\t\tpass\n\t\t\t# Process Next buffer line\n\t\t\ts = self.uart.readline()\n\t\treturn False # Timeout received", "title": "" }, { "docid": "1cf9fbc0600effb5ef00da0126d5880c", "score": "0.47702745", "text": "def listen_to_device(self):\n while self._is_open:\n try:\n msg = self.serialport.readline()\n if msg != b'':\n msg = msg.decode().strip()\n return msg\n\n except Exception as e:\n self.logger.exception(SerialDeviceReadError(e))", "title": "" }, { "docid": "84000000d51fec112b1eeb99f7e67d9e", "score": "0.47523087", "text": "def _readline(self, fh, timeout=None):\n if timeout is None:\n timeout = self.timeout\n\n # standard blocking readline if no timeout is set\n if not timeout:\n return fh.readline()\n\n line = ''\n fhArray = [fh]\n while not line.endswith('\\n'):\n # wait until there is something to read, with a timeout\n (readlist, _, _) = select_timeout(timeout, fhArray)\n if not readlist:\n # if we have some partial data, return that first!\n # we'll be called again, and the next time can raise the error\n if line:\n return line\n elif self.boot_phase:\n raise TimeoutError('waiting for victim to boot')\n else:\n raise TimeoutError('waiting for test output from victim')\n # read a single character, to avoid blocking\n # FIXME: there must be a better way to do nonblocking IO!\n c = fh.read(1)\n # can see EOF if fh is a telnet socket that was closed in the meantime\n if c == '':\n raise EOFError('read from sub-process returned EOF')\n line += c\n return line", "title": "" }, { "docid": "bf95caa4a01c86c8f761d81cd249a379", "score": "0.47422114", "text": "def read(inport):\n def read_ahead(token):\n if token == '(':\n L = []\n while True:\n token = inport.next_token()\n if token == ')':\n return L\n else:\n L.append(read_ahead(token))\n elif token == ')':\n raise SyntaxError('unexpected )')\n elif token in quotes:\n return [quotes[token], read(inport)]\n elif token in eof_object:\n raise SyntaxError('unexpected EOF in list')\n else:\n return atom(token)\n # body of read:\n token1 = inport.next_token()\n return eof_object if token1 is eof_object else read_ahead(token1)", "title": "" }, { "docid": "6345027a76a338443c9ea0ebaccb384f", "score": "0.47417018", "text": "def readNetworkLoop ( self ):\n\n # Sometimes messages are cut off mid-line by read(); store the leftover\n leftover = ''\n self.conn.settimeout( 1.0 )\n\n while True:\n # Try reading data\n try:\n rawIn = self.conn.recv ( 1024 )\n networkIn = rawIn.decode( \"utf-8\", \"replace\" )\n except socket.timeout as timeout:\n continue\n \n # Data may contain many lines; each line is processed seperately\n for line in networkIn.splitlines( True ):\n\n if line[-2:] == '\\r\\n':\n # strip trailing newlines, prepend leftover line, and dispatch the event\n line = line.rstrip( '\\r\\n' )\n line = leftover + line\n leftover = ''\n out = \"< \" + line\n try:\n print ( out.encode('ascii', 'replace') )\n except UnicodeDecodeError as e:\n print ( out.encode() )\n\n data = self.parseLine ( line )\n self.dispatch ( data )\n\n else:\n leftover = line", "title": "" }, { "docid": "0b265c7667bd132f757671745f4467a5", "score": "0.47263974", "text": "def retrlines(self, cmd, callback = None):\r\n if callback is None: callback = print_line\r\n resp = self.sendcmd('TYPE A')\r\n conn = self.transfercmd(cmd)\r\n fp = conn.makefile('rb')\r\n while 1:\r\n line = fp.readline()\r\n if self.debugging > 2: print ('*retr*', repr(line))\r\n if not line:\r\n break\r\n if line[-2:] == CRLF:\r\n line = line[:-2]\r\n elif line[-1:] == '\\n':\r\n line = line[:-1]\r\n callback(line)\r\n fp.close()\r\n conn.close()\r\n return self.voidresp()", "title": "" }, { "docid": "4f38fcda96c01c0ea93e5ca5e8426a87", "score": "0.47173205", "text": "def re_rsearch(pattern, text, chunk_size=...):\n ...", "title": "" }, { "docid": "c4e0ff6b01cdb45f5d9fe25b33f8b99f", "score": "0.4716639", "text": "def test_tcp_port(host, port=23, timeout=5, check_result=False,\n expected_result=''):\n try:\n t = telnetlib.Telnet(host, port, timeout)\n if check_result:\n result = t.read_some()\n t.close()\n return result.startswith(expected_result)\n except (socket.timeout, socket.error):\n return False\n\n t.close()\n return True", "title": "" }, { "docid": "b7599a6539564a1ef639736a2fdc4ec4", "score": "0.47109556", "text": "def readline(self):\n\t\tex = None\n\t\twhile True:\n\t\t\ta = sys.stdin.readline().rstrip('\\n')\n\t\t\t# handle exceptions\n\t\t\tm = self.exception_re.match(a)\n\t\t\tif m:\n\t\t\t\tif m.group(1) == '!':\n\t\t\t\t\tself.log('W','exception from device: {0}'.format(m.group(2)))\n\t\t\t\t\tex = Rts2Exception(m.group(2))\n\t\t\t\telif m.group(1) == '&':\n\t\t\t\t\traise Rts2NotActive()\n\t\t\telif ex:\n\t\t\t\traise ex\n\t\t\telse:\n\t\t\t\treturn a", "title": "" }, { "docid": "c76885293721f40e122cc6e5e30325c7", "score": "0.4683225", "text": "def read(pipe, funcs):\n\tfor line in iter(pipe.readline, b''):\n\t\tfor func in funcs:\n\t\t\tfunc(line.decode(\"utf-8\"))\n\tpipe.close()", "title": "" }, { "docid": "98723b721073f648e18f9c2e95ad998a", "score": "0.4681667", "text": "def __init__(self,ipaddr=None,ipPort=5025):\n if ipaddr == None: ipaddr = \"192.168.100.13\"\n if ipPort == None: ipPort = 5025\n \n self.atn = telnetlib.Telnet(ipaddr,ipPort,timeout=10)", "title": "" }, { "docid": "476737d6523100409f8408f33edec936", "score": "0.4674914", "text": "def apc_telnet_common_action(cls, telnet, check_str, action):\n telnet.read_until(check_str.encode())\n telnet.write(action.encode() + b'\\r\\n')", "title": "" }, { "docid": "467f59a2651b2dd75b6ef8a22786027c", "score": "0.4668259", "text": "def _request(self, request):\n\n logger.warning(\"Connecting the rig at: {}:{}\".format(self.hostname,\n self.port))\n con = telnetlib.Telnet(self.hostname, self.port)\n con.write(('%s\\n' % request).encode('ascii'))\n response = con.read_some().decode('ascii').strip()\n con.write('c\\n'.encode('ascii'))\n return response", "title": "" }, { "docid": "ec7806fac767c72f0712368b2d73f368", "score": "0.4655152", "text": "def _read_deleg(t, env, funct=None, response=NFS4_OK):\n c = env.c1\n count = c.cb_server.opcounts[OP_CB_RECALL]\n c.init_connection('pynfs%i_%s' % (os.getpid(), t.code), cb_ident=0)\n _get_deleg(t, c, c.homedir + [t.code], funct, response)\n _cause_recall(t, env)\n _verify_cb_occurred(t, c, count)", "title": "" }, { "docid": "edac4c59a50f8c86f22dde190b111ebc", "score": "0.46480367", "text": "def run(self):\n while not self.stoprequest.isSet():\n if self.port :\n rcv = self.port.read(256)\n else :\n rcv = \"\"\n #begin parse:\n i = 0\n #print(\"DEBUG: len rcv=\" + \"%d\"%len(rcv))\n if len(rcv)>1:\n while i < len(rcv):\n self.parse_buffer(rcv[i])\n i += 1", "title": "" }, { "docid": "5aa510e483cb8762053cd09c3089fc04", "score": "0.46444026", "text": "def read_list(self, addrlist):\n # Check addresses.\n if any(addr / 0x100000 for addr in addrlist): # any address is out of range\n raise ValueError('Some addresses are wrong.')\n\n if any(addr < 0x20 for addr in addrlist):\n raise NotImplementedError # no sequential reads for link interface addresses.\n\n return retry_on_timeout(self.__class__._read_vme)(self, addrlist)", "title": "" }, { "docid": "9348f67122c8dc4e197f632db1b54e75", "score": "0.46362394", "text": "def readlines(self, sizehint=None, eol=LF):\r\n if self.timeout is None:\r\n raise ValueError(\"Serial port MUST have enabled timeout for this function!\")\r\n leneol = len(eol)\r\n lines = []\r\n while True:\r\n line = self.readline(eol=eol)\r\n if line:\r\n lines.append(line)\r\n if line[-leneol:] != eol: # was the line received with a timeout?\r\n break\r\n else:\r\n break\r\n return lines", "title": "" }, { "docid": "2f88af4a392dfb58986afa1637397e3f", "score": "0.46285746", "text": "def read(self, length = None, end = None, decode = True, lines = 1, reply = None, \n\t\t\treply_retryAttempts = 0, reply_retryDelay = 0, reply_retryPrintError = False):\n\n\t\t\tif (not self.isOpen()):\n\t\t\t\twarnings.warn(f\"Serial port has not been opened yet for {self.__repr__()}\\n Make sure that ports are available and then launch this application again\", Warning, stacklevel = 2)\n\t\t\t\treturn\n\n\t\t\tif (reply is not None):\n\t\t\t\tif (not isinstance(reply, bytes)):\n\t\t\t\t\treply = reply.encode(\"utf-8\")\n\n\t\t\tif (end is None):\n\t\t\t\tif (length is None):\n\t\t\t\t\tlength = 1\n\t\t\t\tmessage = self.device.read(length)\n\t\t\telif (end == \"\\n\"):\n\t\t\t\tif (lines <= 1):\n\t\t\t\t\tif (length is None):\n\t\t\t\t\t\tlength = -1\n\t\t\t\t\tmessage = self.device.readline(length)\n\t\t\t\telse:\n\t\t\t\t\tmessage = self.device.readlines(lines)\n\t\t\telse:\n\t\t\t\tmessage = b\"\"\n\n\t\t\t\tif (not isinstance(end, bytes)):\n\t\t\t\t\tend = end.encode(\"utf-8\")\n\t\t\t\tif (length is None):\n\t\t\t\t\tlength = 1\n\n\t\t\t\tlinesRead = 0\n\t\t\t\twhile True:\n\t\t\t\t\twhile True:\n\t\t\t\t\t\tif (not self.isOpen()):\n\t\t\t\t\t\t\treturn\n\n\t\t\t\t\t\tvalue = self.device.read(length)\n\t\t\t\t\t\tmessage += value\n\t\t\t\t\t\tif (end in value):\n\t\t\t\t\t\t\tlinesRead += 1\n\t\t\t\t\t\t\tbreak\n\t\t\t\t\t\n\t\t\t\t\tif (linesRead >= lines):\n\t\t\t\t\t\tbreak\n\n\t\t\tif (reply is not None):\n\t\t\t\ttry:\n\t\t\t\t\tself.device.write(reply)\n\t\t\t\texcept Exception as error_1:\n\t\t\t\t\tif (reply_retryPrintError):\n\t\t\t\t\t\ttraceback.print_exception(type(error_1), error_1, error_1.__traceback__)\n\t\t\t\t\tattempts = 0\n\t\t\t\t\twhile (attempts > reply_retryAttempts):\n\t\t\t\t\t\ttry: \n\t\t\t\t\t\t\tself.device.write(reply)\n\t\t\t\t\t\t\tbreak\n\t\t\t\t\t\texcept Exception as error_2:\n\t\t\t\t\t\t\tif (reply_retryPrintError):\n\t\t\t\t\t\t\t\ttraceback.print_exception(type(error_2), error_2, error_2.__traceback__)\n\t\t\t\t\t\t\ttime.sleep(reply_retryDelay / 1000)\n\t\t\t\t\telse:\n\t\t\t\t\t\treturn False\n\n\t\t\tif (decode):\n\t\t\t\tmessage = message.decode(\"utf-8\")\n\n\t\t\treturn message", "title": "" }, { "docid": "1e0e60a85e92584301b8f0230f3c8972", "score": "0.46230733", "text": "async def _read_tcp_data(self, msg, func=str):\n self.writer.write(msg.encode())\n data = await self.reader.readline()\n row = data.decode().strip()\n if \" is \" in row:\n return func(row.split(\" is \")[-1])\n return func(row.split(\" \")[-1])", "title": "" }, { "docid": "72d2894d00cb776e841695381f7d793a", "score": "0.46205077", "text": "def test_readlineSizeInDelimiter(self):\n d = self.s.readline(size=28)\n d.addCallback(self._cbGotData, \"I was angry with my friend:\\r\")\n d.addCallback(lambda _: self.s.readline())\n d.addCallback(self._cbGotData, \"\\nI told my wrath, my wrath did end.\\r\\n\")", "title": "" }, { "docid": "950d5be354967e079b1d18434a205cfb", "score": "0.4616929", "text": "def Readout(self):\n buf = ''\n out = []\n while True:\n if not self.blocking:\n data = self.tn.read_eager().decode()\n if data == '':\n return None\n buf += data\n else:\n buf += self.tn.read_some().decode()\n lines = buf.splitlines()\n if len(lines) > 1:\n for buf in lines[:-1]:\n if buf:\n out.append(buf)\n buf = lines[-1]\n if buf == '> ':\n return out", "title": "" }, { "docid": "d01733c330ac73a9a55ece247a1a9097", "score": "0.46053046", "text": "def WaitForString(inf, pattern, timeout=0, max_len=0, eat_to_eol=True,\n reset_on_activity=False):\n if timeout:\n end_time = time.time() + timeout\n else:\n end_time = 0\n\n if _DEBUG_READ:\n Trace('WaitForString: \"%s\", %.1f' % (pattern, timeout))\n\n buf = array.array('B') # unsigned char array\n eating = False\n while True:\n if end_time:\n remaining = end_time - time.time()\n if remaining <= 0:\n Trace('Timeout expired after %.1f seconds' % timeout)\n return None\n else:\n remaining = None\n\n if IsDataAvailable(inf, remaining):\n if reset_on_activity and timeout:\n end_time = time.time() + timeout\n\n buf.fromfile(inf, 1)\n if _DEBUG_READ:\n c = buf.tostring()[-1:]\n ci = ord(c)\n if ci < 0x20: c = '.'\n if _DEBUG_READ > 1:\n print 'read [%c] 0x%02x' % (c, ci)\n\n if not eating:\n if buf.tostring().endswith(pattern):\n if eat_to_eol:\n if _DEBUG_READ > 1:\n Trace('Matched; eating to EOL')\n eating = True\n else:\n ShowTimeout(timeout, end_time)\n return buf\n if _DEBUG_READ > 2:\n print '/%s/ ? \"%s\"' % (pattern, buf.tostring())\n else:\n if buf.tostring()[-1:] == '\\n':\n ShowTimeout(timeout, end_time)\n return buf\n\n if max_len and len(buf) >= max_len: return None", "title": "" }, { "docid": "3dd5167c7f8d504f759a14c9a2553a2c", "score": "0.4581808", "text": "def retrlines(self, cmd, callback = None):\r\n if callback is None: callback = print_line\r\n resp = self.sendcmd('TYPE A')\r\n with self.transfercmd(cmd) as conn, \\\r\n conn.makefile('r', encoding=self.encoding) as fp:\r\n while 1:\r\n line = fp.readline()\r\n if self.debugging > 2: print('*retr*', repr(line))\r\n if not line:\r\n break\r\n if line[-2:] == CRLF:\r\n line = line[:-2]\r\n elif line[-1:] == '\\n':\r\n line = line[:-1]\r\n callback(line)\r\n return self.voidresp()", "title": "" }, { "docid": "deb2c50ea6fcc10e8336fb56fd979288", "score": "0.45759445", "text": "def _read(\n self,\n reader: Callable[[], bytes],\n count: int,\n end_indicator_checker: Callable[[bytes], bool],\n suppress_end_en: bool,\n termination_char: Optional[int],\n termination_char_en: bool,\n timeout_exception: Type[Exception],\n ) -> Tuple[bytes, StatusCode]:\n # NOTE: Some interfaces return not only a single byte but a complete\n # block for each read therefore we must handle the case that the\n # termination character is in the middle of the block or that the\n # maximum number of bytes is exceeded\n\n # Turn the termination_char store as an int in VISA attribute in a byte\n term_char = (\n int_to_byte(termination_char) if termination_char is not None else b\"\"\n )\n\n finish_time = None if self.timeout is None else (time.time() + self.timeout)\n out = bytearray()\n while True:\n try:\n current = reader()\n except timeout_exception:\n return out, StatusCode.error_timeout\n\n if current:\n out.extend(current)\n end_indicator_received = end_indicator_checker(current)\n if end_indicator_received:\n if not suppress_end_en:\n # RULE 6.1.1\n return bytes(out), StatusCode.success\n else:\n if termination_char_en and (term_char in current):\n # RULE 6.1.2\n # Return everything up to and including the termination\n # character\n return (\n bytes(out[: out.index(term_char) + 1]),\n StatusCode.success_termination_character_read,\n )\n elif len(out) >= count:\n # RULE 6.1.3\n # Return at most the number of bytes requested\n return (bytes(out[:count]), StatusCode.success_max_count_read)\n\n if finish_time and time.time() > finish_time:\n return bytes(out), StatusCode.error_timeout", "title": "" }, { "docid": "11da6cf0410a78b9d5e6f69a22cfffa3", "score": "0.45630312", "text": "def handle_read(self):\n delimiter = b'\\r\\n\\r\\n'\n rsp_str = u''\n try:\n recv_buf = self.recv(5 * 1024 * 1024)\n if recv_buf == b'':\n raise Exception(\"_AsyncNetworkManager : remote server close\")\n self.rsp_buf += recv_buf\n loc = self.rsp_buf.find(delimiter)\n while loc >= 0:\n rsp_binary = self.rsp_buf[0:loc]\n loc += len(delimiter)\n self.rsp_buf = self.rsp_buf[loc:]\n\n rsp_str = binary2str(rsp_binary)\n\n self.handler_ctx.recv_func(rsp_str)\n loc = self.rsp_buf.find(delimiter)\n except Exception as e:\n if isinstance(e, IOError) and e.errno == 10035:\n return\n traceback.print_exc()\n err = sys.exc_info()[1]\n self.handler_ctx.error_func(str(err))\n print(rsp_str)\n return", "title": "" }, { "docid": "6ad91c9db3e4d2d955a6c9b907931eb3", "score": "0.4562119", "text": "def get_one_fw_telnet(self, port: int, parrent_ip='192.168.0.1') -> None:\r\n self.logger.info(f\"Try telnet connect to port {port}\")\r\n tn_subswitch = {}\r\n TPLINK = {}\r\n instances_devices = {}\r\n newsw_ip = f'192.168.0.{100 + int(port)}'\r\n real_ip = ''\r\n threads_tn = []\r\n for ip in self.base_ip, newsw_ip:\r\n tn_b_subswitch = TN_act()\r\n TPLINK_b = DEVICES_TPLINK()\r\n instances_devices[ip] = {'tn': tn_b_subswitch,\r\n 'dev': TPLINK_b}\r\n tn_thread = Thread(name=ip, target=tn_b_subswitch.telnet_connect, args=(\r\n ip, TPLINK_b.connect_data['port'], TPLINK_b.connect_data['user'],\r\n TPLINK_b.connect_data['password']), kwargs={\"timeout_tn\": 30})\r\n tn_thread.start()\r\n threads_tn.append(tn_thread)\r\n\r\n for thread in threads_tn:\r\n thread.join()\r\n\r\n for ip, inst_dev in instances_devices.items():\r\n if 'OK' in (inst_dev['tn']).status_connect:\r\n if ip == newsw_ip:\r\n self.actual_table_devices['main_table'].setdefault(port, {})['actual_firmware'] = 'Error'\r\n self.logger.info(f\"Telnet connect to old ip {ip} from port {port} - OK\")\r\n self.logger.error(f\"Error! Firmware update is not successful. Ip {ip} is old. Port: {port}\")\r\n return\r\n else:\r\n tn_subswitch = inst_dev['tn']\r\n TPLINK = inst_dev['dev']\r\n if tn_subswitch == {}:\r\n self.logger.error(f\"Error connect to ip{ip} from port {port}.\")\r\n self.logger.error(f\"Error connect ip {self.base_ip}: \"\r\n f\"{(instances_devices[self.base_ip]['tn']).status_connect}\")\r\n self.logger.error(f\"Error connect ip {newsw_ip} : {(instances_devices[newsw_ip]['tn']).status_connect}\")\r\n self.actual_table_devices['main_table'][port]['actual_firmware'] = 'Error'\r\n return\r\n\r\n self.logger.info(f\"Try to get actual fw on port {port}\")\r\n actual_firmware = tn_subswitch.get_actual_fw()\r\n if actual_firmware != '':\r\n actual_firmware = actual_firmware.decode('utf-8')\r\n actual_firmware = actual_firmware.replace('\\r', '')\r\n actual_firmware = actual_firmware.replace('-', '')\r\n actual_firmware = actual_firmware.lstrip()\r\n self.actual_table_devices['main_table'][port]['actual_firmware'] = actual_firmware\r\n self.actual_table_devices['main_table'][port]['firmware_up'] = 'Ok'\r\n else:\r\n self.actual_table_devices['main_table'][port]['actual_firmware'] = actual_firmware\r\n self.actual_table_devices['main_table'][port]['firmware_up'] = 'Error'", "title": "" }, { "docid": "c535466f2c592ceb4e25d4b1f4434307", "score": "0.45528466", "text": "def re_reader(item_re_str, fd, size, fname, output_tail=False, read_buffer_size=8192):\n import re\n item_re = re.compile(item_re_str)\n buf = b\"\"\n tot = 0\n while True:\n if size:\n r = fd.read(min(read_buffer_size, size - tot))\n else:\n r = fd.read(read_buffer_size)\n tot += len(r)\n buf += r\n\n m = item_re.match(buf)\n while m:\n yield m.groups()\n buf = buf[m.end():]\n m = item_re.match(buf)\n\n if not len(r) or (size!=None and tot >= size):\n if size != None and tot < size:\n raise DataError(\"Truncated input: \"\n \"Expected {0} bytes, got {1}\"\n .format(size, tot), fname)\n if len(buf):\n if output_tail:\n yield [buf]\n else:\n print(\"Couldn't match the last {0} bytes in {1}. \"\n \"Some bytes may be missing from input.\".format(len(buf), fname))\n break", "title": "" }, { "docid": "c849de0488eb3acf5dcef5c9ad0ce4e0", "score": "0.4539722", "text": "def read_trn(\n trn: Union[TextIO, str],\n warn: bool = True,\n processes: int = 0,\n chunk_size: int = config.DEFT_CHUNK_SIZE,\n) -> List[Tuple[str, List[str]]]:\n return list(read_trn_iter(trn, warn, processes, chunk_size))", "title": "" }, { "docid": "c62fd6c1a71c1f0bc2b0782131bfda69", "score": "0.44870275", "text": "def _readline(self) -> bytes:\r\n stamp = time.monotonic()\r\n while b\"\\r\\n\" not in self._buffer:\r\n avail = self._available()\r\n if avail:\r\n if self._sock_type == SOCK_STREAM:\r\n self._buffer += _the_interface.socket_read(self._socknum, avail)[1]\r\n elif self._sock_type == SOCK_DGRAM:\r\n self._buffer += _the_interface.read_udp(self._socknum, avail)[1]\r\n if (\r\n self._timeout\r\n and not avail\r\n and 0 < self._timeout < time.monotonic() - stamp\r\n ):\r\n self.close()\r\n raise RuntimeError(\"Didn't receive response, failing out...\")\r\n firstline, self._buffer = self._buffer.split(b\"\\r\\n\", 1)\r\n gc.collect()\r\n return firstline", "title": "" }, { "docid": "c85371905051ae3969874005341c4d79", "score": "0.44857273", "text": "def readline(self, timeout=None):\n # we break this up into multiple short reads to allow keyboard \n # interrupts\n start = time.time()\n ret = \"\"\n while True:\n if timeout is not None:\n remaining = start + timeout - time.time()\n if remaining <= 0:\n return \"\"\n else:\n remaining = 1\n \n try:\n return self.get(timeout=min(0.1, remaining))\n except Empty:\n pass", "title": "" }, { "docid": "0b911cd8905f0dc918bdfb5024e60056", "score": "0.44764334", "text": "async def read(self, reader: StreamReaderProtocol) -> ProxyProtocolResult:\n data = bytearray()\n while True:\n try:\n return self._parse(data)\n except ProxyProtocolWantRead as want_read:\n try:\n data += await self._handle_want(reader, want_read)\n except (EOFError, ConnectionResetError) as exc:\n return ProxyProtocolResultUnknown(exc)", "title": "" }, { "docid": "db7b1ad8e44e9227d19041fe31d6791a", "score": "0.44759342", "text": "def passive_read(before_read: Tuple[int, str], handle: Callable[[Callable], None],\n after_read: Tuple[int, str]) -> 1:\n\n nonlocal passive_client\n if not passive_client:\n reply(425, 'Enter passive mode first')\n else:\n reply(*before_read)\n def get_chunk(n): return Server.read_n_till_close(passive_client, n)\n handle(get_chunk)\n passive_client.close()\n passive_client = None\n reply(*after_read)", "title": "" }, { "docid": "f6ea828a694a392629a3da5a7e7d587f", "score": "0.44675878", "text": "def read(self, n):\n data = self.do_read(n)\n try:\n return data.decode(self.encoding, self.errors)\n except ValueError:\n # XXX Sigh. decode() doesn't handle incomplete strings well.\n # Use the retry strategy from codecs.StreamReader.\n for i in range(9):\n more = self.do_read(1)\n if not more:\n raise\n data += more\n try:\n return data.decode(self.encoding, self.errors)\n except ValueError:\n pass\n raise", "title": "" }, { "docid": "1ff7d317b98155413085486e1ad1cbdd", "score": "0.4452837", "text": "def test_expect(self):\n want = [b'x' * 10, b'match', b'y' * 10]\n telnet = test_telnet(want)\n (_,_,data) = telnet.expect([b'match'])\n self.assertEqual(data, b''.join(want[:-1]))", "title": "" }, { "docid": "4d79fd6af7101ce56529395301815301", "score": "0.44484237", "text": "def readline(self):\n data = \"\"\n while True:\n try:\n iota = self.read(1)\n except socket.timeout:\n continue\n if not iota:\n break\n else:\n data += iota\n if iota in \"\\n\":\n break\n return data", "title": "" }, { "docid": "04944fc0343176d19771b2173758622d", "score": "0.4446869", "text": "def ReadUdpThreadFunc(self): # Should be called from thread\n\n self.isDataReceived = False # Initially nothing received\n\n while True:\n data = self.ReceiveData() # Blocks (in thread) until data is returned (OR MAYBE UNTIL SOME TIMEOUT AS WELL)\n self.dataRX = data # Populate AFTER new data is received\n self.isDataReceived = True\n # When it reaches here, data received is available", "title": "" }, { "docid": "07c7cd15c9d9c8cd852184dd37b67514", "score": "0.44435966", "text": "def connect(self):\n try:\n self.telnet = telnetlib.Telnet(self.IP, self.Query)\n except telnetlib.socket.error:\n raise TS3Error(10, 'Can not open a link on the port or IP')\n output = self.telnet.read_until('TS3', self.Timeout)\n if output.endswith('TS3') == False:\n raise TS3Error(20, 'This is not a Teamspeak 3 Server')\n else:\n return True", "title": "" }, { "docid": "539b835f20b15a330ddf3b7b8abd1bf8", "score": "0.44412723", "text": "def reader(self):\n try:\n while self._reader_alive:\n # read all that is there or wait for one byte\n # data = self.serial.read(self.serial.in_waiting or 1)\n data = self.serial.readline()\n if data:\n # text = self.rx_decoder.decode(data)\n try:\n if chr(data[-1]) == '\\n':\n text, length = self.rx_decoder(data[:-1]) # get rid of newline\n # text = ''.join([chr(c) for c in data])\n self.textStream.emit(text)\n else:\n print('PAnIc:', data, data[-1], chr(data[-1]) == '\\n')\n except e:\n print('parse error:', e)\n pass\n\n except serial.SerialException:\n self.alive = False\n # self.console.cancel()\n raise # XXX handle instead of re-raise?", "title": "" }, { "docid": "bb87ed811169d87dab95e8e5132adbce", "score": "0.44402897", "text": "def wait_for(self, wait_pattern, timeout=5.0, verbose=False,\n remove_matching=[], **kwargs):\n\n with self.lock:\n gen = self.read_timeout(timeout, **kwargs)\n ret = []\n for line, groups in _wait_for(gen, wait_pattern, verbose=verbose,\n remove_matching=remove_matching):\n ret.append(line)\n if groups is not None:\n return ret, groups\n\n return False", "title": "" }, { "docid": "6d266f8136425e285f27fb7b790d363a", "score": "0.44401294", "text": "def tail_file_until(self, filename, start_pattern, callback, copy_pattern='', stop_pattern='', time_to_wait=None,\n read_from=10, conn=None):\n log.debug('Tailing File {0} for pattern \"{1}\" for {2}'.format(filename, stop_pattern, str(time_to_wait)))\n\n if not stop_pattern:\n cmd = \"\"\"sh -c '{0} -f -n {1} {2} | awk \"/{3}/ {{print; exit;}}\" ' \"\"\".format(\n self.cmd.tail(), read_from, filename, start_pattern)\n else:\n cmd = (\"\"\"sh -H -c ''{0} -f -n {1} {2} |\n awk \"{{ if (/{3}/ && !triggered) {{ triggered=1; print; }}\n else {{ if (triggered && /{4}/) {{ print; if (/{5}/) {{ exit; }} }} }} }}\"' \"\"\".\n format(self.cmd.tail(self.gnu_path), read_from, filename, start_pattern,\n copy_pattern, stop_pattern))\n\n with conn:\n return conn.send_cmd(cmd).expect_prompt(timeout=time_to_wait)", "title": "" }, { "docid": "d58b4669dd09a559b5471ed0afe3e147", "score": "0.44291583", "text": "def next(self, timeout=0):\n try:\n waitin, _waitout, _waiterror = select.select((self.streamSock,), (), (), timeout)\n if not waitin: return\n else:\n gpsd_response = self.streamSock.makefile() # '.makefile(buffering=4096)' In strictly Python3\n self.response = gpsd_response.readline()\n return self.response\n\n except OSError as error:\n sys.stderr.write('The readline OSError in GPSDSocket.next is this: ', error)", "title": "" }, { "docid": "366a363d40a545099a584bf9455d47b6", "score": "0.44086805", "text": "def readlines(self, sizehint=None, timeout=1):\n\t\tlines = []\n\t\twhile 1:\n\t\t\tline = self.readline(timeout=timeout)\n\t\t\tif line:\n\t\t\t\tlines.append(line)\n\t\t\tif not line or line[-1:] != '\\n':\n\t\t\t\tbreak\n\t\treturn lines", "title": "" }, { "docid": "ebb1a8184018293398ed69e44cfd1dc8", "score": "0.4406158", "text": "def _readline(self):\n logging.info('%s: reading line', self.port)\n if len(self._lines) > 1:\n return self._lines.pop(0)\n\n tail = ''\n if len(self._lines):\n tail = self._lines.pop()\n\n try:\n tail += self._read()\n except socket.error:\n logging.exception('%s: No new data', self.port)\n time.sleep(0.1)\n\n self._lines += LINESEPX.split(tail)\n if len(self._lines) > 1:\n return self._lines.pop(0)", "title": "" }, { "docid": "adf68260209766b38b4784ef11d7fae2", "score": "0.44004425", "text": "def pastegrep(expression, cb=save_paste, interval=20, endpoint='http://pastebin.com/ajax/realtime_data.php'):\n while True:\n fetch(expression, endpoint)\n sleep(interval)", "title": "" }, { "docid": "d11899be7dd0212eda87f3df6b980a5c", "score": "0.43954945", "text": "def telnet(connection_string, command, password, command_args):\n user, _, host = parse_connection_string(connection_string)\n\n try:\n executor = TelnetExecutor(host, user, password)\n except ValueError as err:\n click.echo(err)\n return None\n\n res = json_repr(*executor.execute(command, parameters=command_args))\n click.echo(res)", "title": "" }, { "docid": "a475f186188c3d1430d7f815ebcfeb5c", "score": "0.43857557", "text": "def getline(self):\n if not hasattr(self, 'net_cmd_lines'):\n self.net_cmd_lines = []\n\n if self.net_cmd_lines:\n return self.net_cmd_lines.pop(0)\n\n partial = getattr(self, 'net_cmd_partial', '')\n fd = self.fp.fileno()\n while not self.net_cmd_lines:\n data_available = select([fd], [], [], self.timeout())[0]\n if data_available:\n buf = os.read(fd, 1024)\n if buf == '':\n if self.debug():\n carp('Unexpected EOF on command channel')\n\n self.close()\n return None\n\n # Prepend the last data read and then break into lines.\n buf = partial + buf\n buf = re.split(r'\\015?\\012', buf)\n partial = buf.pop(len(buf) - 1)\n self.net_cmd_lines.extend([x + '\\n' for x in buf])\n else:\n if self.debug():\n carp('Timeout')\n\n return None\n\n self.net_cmd_partial = partial\n return self.net_cmd_lines.pop(0)", "title": "" }, { "docid": "179343a31a7cedfff6128f2b2ef281e3", "score": "0.4385227", "text": "def read_until_regex(stream, regex, ignore_eof=False):\n name = pypdfBytes(\"\")\n\n while True:\n tok = stream.read(16)\n\n if not tok:\n # stream has truncated prematurely\n if ignore_eof:\n return name\n raise PdfStreamError(\"Stream has ended unexpectedly\")\n m__ = regex.search(tok)\n if m__ is not None:\n name += tok[: m__.start()]\n stream.seek(m__.start() - len(tok), 1)\n break\n name += tok\n\n return name", "title": "" }, { "docid": "2f2121fe924f06c00bcb9bfdcf3cf046", "score": "0.43745318", "text": "def read(self, header: Text):\n if header not in self.in_headers:\n raise ValueError('Invalid header')\n try:\n data = self.arduino.readline()\n data = data.decode('utf-8').strip()\n\n for i in range(50):\n if data == '':\n raise TimeoutError('Reached end of buffer')\n elif header not in data:\n time.sleep(0.05)\n data = self.arduino.readline()\n data = data.decode('utf-8').strip()\n else:\n self.log.info('RECV:' + data)\n return data\n raise TimeoutError('Not found in last 10 messages')\n except TimeoutError as e:\n raise e\n except Exception as e:\n print('err: ' + str(e))", "title": "" }, { "docid": "35f94105f97ee5643b4d3c15d6b05fdb", "score": "0.43598694", "text": "def _reader(self):\n # Thread will wait at this read until it connects.\n # Connection should occur as soon as _write_pipe has connected.\n read_pipe = open(READ_NAME, 'r')\n message = ''\n pipe_ok = True\n while pipe_ok:\n line = read_pipe.readline()\n # Stop timer as soon as we get first line of response.\n stop_time = time.time()\n while pipe_ok and line != '\\n':\n message += line\n line = read_pipe.readline()\n if line == '':\n # No data in read_pipe indicates that the pipe is broken\n # (Audacity may have crashed).\n PipeClient.reader_pipe_broken.set()\n pipe_ok = False\n if self.timer:\n xtime = (stop_time - self._start_time) * 1000\n message += 'Execution time: {0:.2f}ms'.format(xtime)\n self.reply = message\n PipeClient.reply_ready.set()\n message = ''\n read_pipe.close()", "title": "" }, { "docid": "e12bcbc348cf841dffdb52d447e80140", "score": "0.4357424", "text": "def expect(tty, exp_strings, ignore=\"\", timeout=TIMEOUT, raise_timeout=True,\n raise_os=True):\n\n string = \"\";\n while True:\n [read, write, error] = select.select([tty], [], [], timeout);\n if tty in read:\n try:\n char = os.read(tty, 1);\n if not char in ignore:\n string += char;\n\n except OSError:\n if raise_os:\n raise;\n else:\n return string;\n\n for exp_string in exp_strings:\n if string.find(exp_string) >= 0:\n return string;\n else:\n if raise_timeout:\n raise TimeOutError(\"Timeout while reading\", repr(string));\n else:\n return string;", "title": "" }, { "docid": "feaec0704249cc283694e130e18b5d41", "score": "0.4356364", "text": "def re_findall(self, pattern, flags=0):\n \n ret = re.findall(pattern, self.data, flags)\n \n if debug:\n print(\"re_findall: return %d slices\" % (len(ret)))\n \n return ret", "title": "" }, { "docid": "e8303d6c481a86ff46fd729221cc363d", "score": "0.43491787", "text": "def readlines(self, sizehint=None, timeout=1):\r\n lines = []\r\n while 1:\r\n line = self.readline(timeout=timeout)\r\n if line:\r\n lines.append(line)\r\n if not line or line[-1:] != '\\n':\r\n break\r\n return lines", "title": "" }, { "docid": "a91a429a1e17d2b48de166f2bff029aa", "score": "0.43418136", "text": "def readline(self, bts=16,*args, **kwargs):\n msg = self.connection_file.readline()\n logger.debug(\"Retrieved %r from %s\", msg, self.shortname)\n return msg", "title": "" }, { "docid": "a3f94261b23fb6fba64e21b496a1ee2e", "score": "0.43413782", "text": "def read(self):\n\n self.tmp_parser = Parser(self, self.stream.getStream())\n\n while True:\n try:\n data = self.stream.getStream().recv(DEFAULT_BYTES)\n if data is not None:\n self.tmp_parser.parser(data)\n time.sleep(0.5)\n except BluetoothError, e:\n print e\n time.sleep(0.5)\n continue\n # for b in data:\n # print '0x%s, ' % b.encode('hex'),\n # print \"\"", "title": "" }, { "docid": "e99bcd0bcb7ce2c5719b0e6b9e660156", "score": "0.43328848", "text": "def testManyReaddeleg(t, env, funct=_recall, response=NFS4_OK):\n # XXX needs to use _get_deleg\n count = 100 # Number of read delegations to grab\n c = env.c1\n c.init_connection('pynfs%i_%s' % (os.getpid(), t.code), cb_ident=0)\n cbids = []\n fh, stateid = c.create_confirm(t.code, access=OPEN4_SHARE_ACCESS_READ,\n deny=OPEN4_SHARE_DENY_NONE)\n for i in range(count):\n c.init_connection('pynfs%i_%s_%i' % (os.getpid(), t.code, i), cb_ident=0)\n fh, stateid = c.open_confirm(t.code, access=OPEN4_SHARE_ACCESS_READ,\n deny=OPEN4_SHARE_DENY_NONE)\n \n # Get a read delegation\n res = c.open_file(t.code, access=OPEN4_SHARE_ACCESS_READ,\n set_recall=True,\n recall_funct=funct, recall_return=response)\n fh, stateid = c.confirm(t.code, res)\n deleg_info = res.resarray[-2].switch.switch.delegation\n if deleg_info.delegation_type == OPEN_DELEGATE_READ:\n cbids.append(c.cbid)\n if not cbids:\n t.pass_warn(\"Could not get any read delegations\")\n print \"Got %i out of %i read delegations\" % (len(cbids), count)\n # Cause them to be recalled\n fh2, stateid2 = _cause_recall(t, env)\n miss_count = 0\n for id in cbids:\n res = c.cb_server.get_recall_res(id)\n if res is None:\n miss_count += 1\n else:\n check(res, msg=\"DELEGRETURN for cb_id=%i\" % id)\n if miss_count:\n t.pass_warn(\"Recall never occurred for %i of %i read delegations\" %\n (miss_count, len(cbids)))", "title": "" }, { "docid": "0dfcaa294bec44d00a11cc0507238393", "score": "0.4330003", "text": "def __init__(self, ipaddress, port=5555, timeout=1, limit=-20):\n self.timeout = timeout # timeout for the telnet comms\n self.ipaddress = ipaddress\n self.port = port\n self.tn = itechbl12hi_common.telnet_setup(ipaddress, port, timeout)\n self.device_id = self.get_device_id() # gets the device of the telnet device, makes sure its the right one\n self.turn_off_RF() # turn off the RF output\n self.limit = limit # set the RF output limit\n # self.set_output_power_limit(limit) # set the RF output limit\n\n print(\"Opened connection to RF source \" + self.device_id) # tells the user the device has been connected to", "title": "" }, { "docid": "6750528904c8a1473bdf39c4931a81d1", "score": "0.43215543", "text": "def telnet_client(self, disp = False):\n #object field\n self.client = pexpect.spawn(\"telnet %s\" % self._ipaddr)\n if disp:\n self.client.logfile = sys.stdout\n else:\n self.client.logfile = None\n index = self.client.expect([\".*ogin:\", \"refused$\", pexpect.TIMEOUT, pexpect.EOF])\n if index != 0:\n printd(\"%s: cannot telnet connect to server\\n\" % self._ipaddr)\n return None\n self.client.send(self._username + \"\\r\")\n self.client.expect(r\".*assword:.*\")\n self.client.send(self._password + \"\\r\")\n index = self.client.expect([self.prompt, r\".*invalid.*\", pexpect.TIMEOUT])\n if index != 0:\n printd(\"%s: cannot create telnet client with credential\\n\" % self._ipaddr)\n return None\n return self.client", "title": "" }, { "docid": "7050e514f718221f0ffabe9ec5aa85c5", "score": "0.43204072", "text": "def create_tel_parser(self, file_handle):\r\n parser = DostaAbcdjmDclTelemeteredParser(\r\n file_handle, self.exception_callback)\r\n return parser", "title": "" }, { "docid": "7050e514f718221f0ffabe9ec5aa85c5", "score": "0.43204072", "text": "def create_tel_parser(self, file_handle):\r\n parser = DostaAbcdjmDclTelemeteredParser(\r\n file_handle, self.exception_callback)\r\n return parser", "title": "" }, { "docid": "4aeca568b4bc0f697b90667df73071af", "score": "0.43163288", "text": "def telnet(self, host, username=\"stoke\", password=\"stoke\"):\n self.username = username\n self.password = password\n self.ses = pexpect.spawn(\"telnet -E -8 \" + host)\n try:\n self.ses.expect(login_prompt_regex)\n self.login()\n except pexpect.TIMEOUT:\n misc.testerror(\"Problem connecting in cisco_telnet()\")\n return False\n self.ses.delaybeforesend = 0\n self.init_vars(host)\n return True", "title": "" }, { "docid": "c6e6ab2aee7fab9989c6b010e3bea817", "score": "0.43152368", "text": "def _parseTail(self):\n return self", "title": "" }, { "docid": "c20883565699daa4990ee1c324dcb813", "score": "0.43144414", "text": "def read(self, n):\n if self.buf:\n assert not self.atcr\n data = self.buf\n self.buf = \"\"\n else:\n data = self.do_read(n)\n\n # The following whole ugly mess is because we need to keep track of\n # exactly which line separators we have seen for self.newlines,\n # grumble, grumble. This has an interesting corner-case.\n #\n # Consider a file consisting of exactly one line ending with '\\r'.\n # The first time you read(), you will not know whether it is a\n # CR separator or half of a CRLF separator. Neither will be marked\n # as seen, since you are waiting for your next read to determine\n # what you have seen. But there's no more to read ...\n \n if self.atcr:\n if data.startswith(\"\\n\"):\n data = data[1:]\n self.CRLF = True\n if not data:\n data = self.do_read(n)\n else:\n self.CR = True\n self.atcr = False\n \n for i in range(len(data)):\n if data[i] == '\\n':\n if i > 0 and data[i-1] == '\\r':\n self.CRLF = True\n else:\n self.NL = True\n elif data[i] == '\\r':\n if i < len(data)-1 and data[i+1] != '\\n':\n self.CR = True\n \n if \"\\r\" in data:\n self.atcr = data.endswith(\"\\r\")\n data = replace_crlf_with_lf(data)\n \n return data", "title": "" }, { "docid": "d67aded5c93750c341cd4683c09a8f14", "score": "0.43138763", "text": "def try_read_until(fp, pattern, chunk_size=16384):\n assert len(pattern) > 0 and isinstance(pattern, bytes)\n\n chunk_size = max(chunk_size, len(pattern))\n\n buff = []\n tail = b''\n data = b''\n\n old_pos = fp.tell()\n\n while True:\n chunk = fp.read(chunk_size)\n if not chunk:\n break\n\n split_pos = (tail + chunk).find(pattern)\n\n if split_pos >= 0:\n split_pos += len(pattern) # Include pattern into result\n buff.append( chunk[:split_pos-len(tail)] )\n data = b''.join(buff)\n break\n else:\n tail = chunk[-len(pattern):] # Memorize last len(pattern) positions to match at next iteration\n buff.append( chunk )\n\n fp.seek(old_pos + len(data))\n return data or None", "title": "" }, { "docid": "7ac8497cd7162279fd5ef411496e69a7", "score": "0.43082702", "text": "def get_telnet(self):\n if self.telnet_enabled and self.telnet is None:\n from adapter.telnet import TelnetConsole, TelnetException\n try:\n self.telnet = TelnetConsole(self)\n except Exception:\n self.logger.warning(\"Cannot connect to telnet. You may not be able to simulate phonecalls and locations.\")\n return self.telnet", "title": "" }, { "docid": "434f587598486a701a15fc72bb4a67e0", "score": "0.4305032", "text": "def readline(self, *args, **kw):\n result = self.stream.readline(*args, **kw)\n self.status = self.proc.poll()\n self.check_status()\n return result", "title": "" }, { "docid": "068d507a02c88f48aa828663aaf14b1d", "score": "0.43031156", "text": "def find_all(self, data: bytes) -> List[Match]:\n pass", "title": "" }, { "docid": "d154c48b5de34321e662a73e7dfa0b58", "score": "0.429365", "text": "def parse(text):\n for parser in parsers:\n try:\n return parser(text).line()\n except Exception as e:\n print e.message\n pass", "title": "" }, { "docid": "3c91f2f84008c1f34d84033f54218169", "score": "0.42868263", "text": "def test_readline(self):\n d = self.s.readline()\n d.addCallback(self._cbGotData, 'I was angry with my friend:\\r\\n')\n return d", "title": "" }, { "docid": "84f00f4509c9d71f9e277d27bbfb8cc3", "score": "0.42801675", "text": "def _listen(self):\n self.socket.listen(5)\n lora = LoRa(mode=LoRa.LORA)\n s = socket.socket(socket.AF_LORA, socket.SOCK_RAW)\n s.setblocking(False)\n _thread.start_new_thread(self._receive_from_lopy, (s,))\n\n while True:\n (client, address) = self.socket.accept()\n client.settimeout(60)\n _thread.start_new_thread(self._handle_client, (client, address, s))", "title": "" }, { "docid": "aa783728715cbe145df376a0f89edb87", "score": "0.4270651", "text": "def read_until(self, marker):\n\n if not isinstance(marker, byte_cls) and not isinstance(marker, Pattern):\n raise TypeError(pretty_message(\n '''\n marker must be a byte string or compiled regex object, not %s\n ''',\n type_name(marker)\n ))\n\n output = b''\n\n is_regex = isinstance(marker, Pattern)\n\n while True:\n if len(self._decrypted_bytes) > 0:\n chunk = self._decrypted_bytes\n self._decrypted_bytes = b''\n else:\n if self._ssl is None:\n self._raise_closed()\n to_read = libssl.SSL_pending(self._ssl) or 8192\n chunk = self.read(to_read)\n\n offset = len(output)\n output += chunk\n\n if is_regex:\n match = marker.search(output)\n if match is not None:\n end = match.end()\n break\n else:\n # If the marker was not found last time, we have to start\n # at a position where the marker would have its final char\n # in the newly read chunk\n start = max(0, offset - len(marker) - 1)\n match = output.find(marker, start)\n if match != -1:\n end = match + len(marker)\n break\n\n self._decrypted_bytes = output[end:] + self._decrypted_bytes\n return output[0:end]", "title": "" }, { "docid": "c652009c474b62a7ac264d639a8162ef", "score": "0.42615974", "text": "def read_timeout(self, timeout=5.0, delim='\\r\\n', verbose=False):\n channel = self._channel\n if channel is None:\n raise PPCommChannelClosed()\n\n with self.lock:\n t0 = time.time()\n buf = ''\n\n def check_timeout():\n if timeout is None:\n return True\n return ((time.time() - t0) <= timeout)\n\n while channel.recv_ready() or check_timeout():\n if channel.recv_ready():\n if six.PY3:\n buf += channel.recv(1024).decode('ascii')\n else:\n buf += channel.recv(1024)\n\n lines = buf.split(delim)\n if not buf.endswith(delim):\n buf = lines[-1]\n lines = lines[:-1]\n else:\n buf = ''\n\n for line in lines:\n vlog(verbose, '<- %s' % line)\n yield line.rstrip()\n\n else:\n time.sleep(0.01)\n\n if channel.recv_stderr_ready():\n line = channel.recv_stderr(1024)\n vlog(verbose, '<stderr- %s' % line)\n\n if not check_timeout():\n raise TimeoutError('Elapsed %.2f s' % (time.time() - t0))", "title": "" } ]
0216413026baa5891f5309f4e1972205
Formats event values using the helper.
[ { "docid": "43c9d59e8073cde2d1018052190f9cf9", "score": "0.62962395", "text": "def FormatEventValues(self, output_mediator, event_values):\n if not self._winevt_resources_helper:\n self._winevt_resources_helper = output_mediator.GetWinevtResourcesHelper()\n\n message_string = None\n provider_identifier = event_values.get('provider_identifier', None)\n source_name = event_values.get('source_name', None)\n message_identifier = event_values.get('message_identifier', None)\n event_version = event_values.get('event_version', None)\n if (provider_identifier or source_name) and message_identifier:\n message_string_template = self._winevt_resources_helper.GetMessageString(\n provider_identifier, source_name, message_identifier, event_version)\n if message_string_template:\n string_values = [\n string or '' for string in event_values.get('strings', [])]\n\n try:\n message_string = message_string_template.format(*string_values)\n except (IndexError, TypeError) as exception:\n logger.error((\n 'Unable to format message: 0x{0:08x} of provider: {1:s} '\n 'template: \"{2:s}\" and strings: \"{3:s}\" with error: '\n '{4!s}').format(\n message_identifier, provider_identifier or '',\n message_string_template, ', '.join(string_values), exception))\n # Unable to create the message string.\n # TODO: consider returning the unformatted message string.\n\n event_values['message_string'] = message_string", "title": "" } ]
[ { "docid": "d1973e8892563aa058b554c01322cbf0", "score": "0.61856985", "text": "def format_events(self, event):\n date = dateparser.parse(event['start']).strftime('%m/%d')\n title = event['title']\n venue = event['venue']\n id = event['id']\n\n formatted = u\"\"\"\n {} @ {} on {}\n \"\"\".format(date, title, venue)\n\n return html.unescape(formatted)", "title": "" }, { "docid": "4fa15522528bfdf45ac5c532d6954eab", "score": "0.584638", "text": "def _format_record(self, record: DAY_RECORD) -> Collection:\n\n return record.date, record.end_date, record.value", "title": "" }, { "docid": "ac1b75e951da8e1c6c24663066b1dc4a", "score": "0.5775876", "text": "def formats(self):\r\n\t\tpass", "title": "" }, { "docid": "c07e24ebe81a3ce4f93971c3b0019ca4", "score": "0.57217467", "text": "def format(record, **overrides):\n try:\n event = {}\n event.update(process_attributes)\n event.update(thread_attributes.__dict__)\n event.update(record)\n event.update(overrides)\n for k,v in event.items():\n try:\n json.dumps(v)\n except:\n event[k] = str(v)\n return json.dumps(event)\n except:\n return traceback.format_exc()", "title": "" }, { "docid": "2e432ae2ad950f730377e48998ff5c69", "score": "0.5697494", "text": "def GetFormattedField(\n self, output_mediator, field_name, event, event_data, event_data_stream,\n event_tag):\n if field_name in self._event_tag_field_names:\n return self._FormatTag(output_mediator, event_tag)\n\n callback_function = self._callback_functions.get(field_name, None)\n if callback_function:\n output_value = callback_function(\n output_mediator, event, event_data, event_data_stream)\n elif field_name in self._event_data_stream_field_names:\n output_value = getattr(event_data_stream, field_name, None)\n else:\n output_value = getattr(event_data, field_name, None)\n\n if output_value is not None and not isinstance(output_value, str):\n output_value = '{0!s}'.format(output_value)\n\n return output_value", "title": "" }, { "docid": "5ce72b0cabf7a17ed7ff8b583d56efc8", "score": "0.56809556", "text": "def _formatDataValue(self):\n return {'value': self._formatValue(),\n 'type': self.value_types.get(self.type, self.type)\n }", "title": "" }, { "docid": "ca64a849e0f6c693b8b9415261e75f14", "score": "0.56702495", "text": "def get_formatted_value(values):\n if isinstance(values, list):\n value_dict = {}\n for item in values:\n value_dict[item[0]] = item[1]\n formatted = value_dict\n else:\n formatted = values\n\n return formatted", "title": "" }, { "docid": "e3e2d0ac2aa5c5ff3e79c95e4338a2bf", "score": "0.5658007", "text": "def _FormatTag(self, output_mediator, event_tag):\n if not event_tag:\n return '-'\n\n return ' '.join(event_tag.labels)", "title": "" }, { "docid": "974340d3ca8fd728e86efa1e1ff30a3f", "score": "0.56384236", "text": "def __format_helper(self, hour: datetime, values: list) -> dict: \n return {\n hour.strftime(\"%H-%d/%m/%Y\"):[\n {'value': val[1], 'type': val[0]}\n for val in values\n ]\n }", "title": "" }, { "docid": "88ed95c171abafc79d25b13f713c9b75", "score": "0.56295335", "text": "def _format_test_value(self, form):\n raw_value = self._get_test_value(form)\n formatters = {\n 'str': str,\n 'json': json.loads,\n 'int': int,\n 'float': float,\n 'email': self._parse_email,\n }\n format_val = formatters.get(self.data_format, 'str')\n return format_val(raw_value)", "title": "" }, { "docid": "53d42a3dc49e6b2b68e839d5ae76ad56", "score": "0.5610097", "text": "def __init__(self):\n event_data_stream = events.EventDataStream()\n\n super(FieldFormattingHelper, self).__init__()\n self._callback_functions = {}\n self._event_data_stream_field_names = event_data_stream.GetAttributeNames()\n self._event_tag_field_names = []\n\n for field_name, callback_name in self._FIELD_FORMAT_CALLBACKS.items():\n if callback_name == '_FormatTag':\n self._event_tag_field_names.append(field_name)\n else:\n self._callback_functions[field_name] = getattr(\n self, callback_name, None)", "title": "" }, { "docid": "a7b5be5ca307eb56e60f8bbe906813e7", "score": "0.55889684", "text": "def format(payload):", "title": "" }, { "docid": "8a806cd9cab3b4dbf039ee4a0af49e33", "score": "0.5503407", "text": "def wrapper_format_input(self):\n try:\n if self.formula[0] == '{':\n return self.format_system_of_equations()\n else:\n self.formula = self.format_vertical_slashes()\n self.formula = self.format_trigon_funcs()\n return [{'formula':self.formula, 'x_range':self.x_range}]\n except Exception as error:\n print(str(error))", "title": "" }, { "docid": "c02f6acaaaf28b49905de076f3c7f7d5", "score": "0.5463876", "text": "def convert(self, *args, **kwargs):\n # Have the keyword arguments default to empty strings, in the event\n # of missing keys for string formatting\n outdict = defaultdict(str, **kwargs)\n outformat = self.format\n extras = []\n\n # Convert raw timestamps into a datetime object\n if 'ts' in outdict:\n try:\n outdict['ts'] = datetime.fromtimestamp(float(outdict['ts']))\n outdict['ts'] = outdict['ts'].strftime(self.timeformat)\n except TypeError:\n pass\n except KeyError:\n pass\n except ValueError:\n pass\n\n if \"starttime\" in outdict and isinstance(outdict[\"starttime\"], datetime):\n outdict['starttime'] = outdict['starttime'].strftime(self.timeformat)\n if \"endtime\" in outdict and isinstance(outdict[\"endtime\"], datetime):\n outdict['endtime'] = outdict['endtime'].strftime(self.timeformat)\n if 'dt' in outdict and isinstance(outdict[\"dt\"], datetime):\n outdict['dt'] = outdict['dt'].strftime(self.timeformat)\n\n # Create directional arrows\n if 'dir_arrow' not in outdict:\n if outdict.get('direction') == 'cs':\n outdict['dir_arrow'] = '->'\n elif outdict.get('direction') == 'sc':\n outdict['dir_arrow'] = '<-'\n else:\n outdict['dir_arrow'] = '--'\n\n # Convert Nones into empty strings.\n # If --extra flag used, generate string representing otherwise hidden\n # fields.\n for key, val in sorted(outdict.items()):\n if val is None:\n val = ''\n outdict[key] = val\n if self.extra:\n if key not in self.format_fields:\n extras.append(\"%s=%s\" % (key, val))\n\n # Dump the args into a 'data' field\n outdict['data'] = self.delimiter.join(map(str, args))\n\n # Create an optional 'extra' field\n if self.extra:\n if 'extra' not in self.format_fields:\n outformat = outformat[:-1] + \" [ %(extra)s ]\\n\"\n outdict['extra'] = ', '.join(extras)\n\n # Convert the output dictionary into a string that is dumped to the\n # output location.\n output = outformat % outdict\n return output", "title": "" }, { "docid": "833fe50a89c152ebd4853aa17ee9b3f4", "score": "0.54514605", "text": "def get_formatted_value(self, v):\n subjects = [f.widget.subject for f in self.fields]\n values = v.split('|')\n zipped = zip(subjects, values)\n html = '<ul style=\"padding-left: 1em;\">'\n for x, y in zipped:\n html += '<li style=\"list-style-type: disc; list-style-position:outside; width: 160px;\">%s: %s</li>' % (x, y)\n html += \"</ul>\"\n\n return html", "title": "" }, { "docid": "c44da1dbeea6a5a63726cf4d83c343a1", "score": "0.5447525", "text": "def format(self, fake_func):\n return fake_func(*self.args)", "title": "" }, { "docid": "791609fab83c95f90ac4c78ed4c4fa26", "score": "0.5410502", "text": "def GetFormattedEvent(self, event, event_data, event_data_stream, event_tag):\n date_time = dfdatetime_posix_time.PosixTimeInMicroseconds(\n timestamp=event.timestamp)\n date_time_string = date_time.CopyToDateTimeStringISO8601()\n\n lines_of_text = [\n '+-' * 40,\n '[Timestamp]:',\n ' {0:s}'.format(date_time_string)]\n\n path_specification = getattr(event_data_stream, 'path_spec', None)\n if not path_specification:\n # Note that support for event_data.pathspec is kept for backwards\n # compatibility.\n path_specification = getattr(event_data, 'pathspec', None)\n\n if path_specification:\n lines_of_text.extend([\n '',\n '[Pathspec]:'])\n lines_of_text.extend([\n ' {0:s}'.format(line)\n for line in path_specification.comparable.split('\\n')])\n\n # Remove additional empty line.\n lines_of_text.pop()\n\n reserved_attributes = [\n '',\n '[Reserved attributes]:']\n additional_attributes = [\n '',\n '[Additional attributes]:']\n\n event_attributes = list(event_data.GetAttributes())\n if event_data_stream:\n event_attributes.extend(event_data_stream.GetAttributes())\n\n for attribute_name, attribute_value in sorted(event_attributes):\n # Some parsers have written bytes values to storage.\n if isinstance(attribute_value, bytes):\n attribute_value = attribute_value.decode('utf-8', 'replace')\n logger.warning(\n 'Found bytes value for attribute \"{0:s}\" for data type: '\n '{1!s}. Value was converted to UTF-8: \"{2:s}\"'.format(\n attribute_name, event_data.data_type, attribute_value))\n\n # Note that support for event_data.pathspec is kept for backwards\n # compatibility. The current value is event_data_stream.path_spec.\n if attribute_name in ('path_spec', 'pathspec'):\n continue\n\n attribute_string = ' {{{0!s}}} {1!s}'.format(\n attribute_name, attribute_value)\n\n if attribute_name in definitions.RESERVED_VARIABLE_NAMES:\n reserved_attributes.append(attribute_string)\n else:\n additional_attributes.append(attribute_string)\n\n lines_of_text.extend(reserved_attributes)\n lines_of_text.extend(additional_attributes)\n\n if event_tag:\n labels = [\n '\\'{0:s}\\''.format(label) for label in event_tag.labels]\n lines_of_text.extend([\n '',\n '[Tag]:',\n ' {{labels}} [{0:s}]'.format(', '.join(labels))])\n\n lines_of_text.append('')\n\n return '\\n'.join(lines_of_text)", "title": "" }, { "docid": "856c10ce5cd304201b8eed06fd933858", "score": "0.5396154", "text": "def _format_value(self, value):\n # Use renamed format_name() for Django versions >= 1.10.\n if hasattr(self, 'format_value'):\n return super(DateTimePicker, self).format_value(value)\n # Use old _format_name() for Django versions < 1.10.\n else:\n return super(DateTimePicker, self)._format_value(value)", "title": "" }, { "docid": "dd504ed8acd9376144b8e3f17a7ea7f1", "score": "0.53701323", "text": "def format(self, value):\n \n # recalculate values\n if self._is_dirty:\n self.initialize()\n \n # apply formatting\n return self.formatter.format(value)", "title": "" }, { "docid": "9523f3d64b370f0323a856ebad657125", "score": "0.5366524", "text": "def change_format(key_val):\n\tdevice_id = key_val[0][0][0]\n\tbase_timestamp = key_val[0][0][1]\n\tmean_sd = key_val[1]\n\treturn (device_id, (base_timestamp, mean_sd))", "title": "" }, { "docid": "3b1d36306317cfc350144137615a3e82", "score": "0.536188", "text": "def _format(self, data):\n return str(float(data))", "title": "" }, { "docid": "37d3c4ad50f674907f68eb4457f38168", "score": "0.5346742", "text": "def _set_format_fields(self, value):\n self.format_fields = value", "title": "" }, { "docid": "57d17b38679d6c7d96f1008636d494e5", "score": "0.5340107", "text": "def __call__(self, x, *args, **kwds):\n if x in self.skip_values:\n return ''\n else:\n return self.formatter(x, *args, **kwds)", "title": "" }, { "docid": "96cf951357fa4cd39bcda50ee6cc6cd1", "score": "0.533292", "text": "def _format(self, data: AnyStr) -> Any:\n\n return data", "title": "" }, { "docid": "ccaebb174885135890465bdca8f00144", "score": "0.53001463", "text": "def _format_record(self, record: DAY_RECORD) -> Collection:\n pass", "title": "" }, { "docid": "9788b12d071c059e15d201518f47a9df", "score": "0.52893513", "text": "def _fmt(self, timestamp):\n fn_read_fmt = {\"date\": timestamp.strftime(\"%Y%m%d%H%M%S\"),\n \"sat\": self.sat.upper(),\n \"product\": self.product.upper()}\n fn_write_fmt = None\n sf_read_fmt = None\n sf_write_fmt = sf_read_fmt\n\n return fn_read_fmt, sf_read_fmt, fn_write_fmt, sf_write_fmt", "title": "" }, { "docid": "aee3144b4cfeced1eaf9f22506ba6663", "score": "0.5281656", "text": "def f_format(self, s: str, *args, **kwargs):\n\n kws = SmartDict(self, kwargs)\n fmt = Formatter()\n\n return fmt.vformat(s, args, kws)", "title": "" }, { "docid": "f9ba84cd070cb1018a1583b19cae68ab", "score": "0.52814704", "text": "def format(**args):\n pass", "title": "" }, { "docid": "28629420b856cd3f3fa741c085a70226", "score": "0.5270362", "text": "def _formatValue(self):\n if self.type == 'wikibase-item':\n value = {'entity-type': 'item',\n 'numeric-id': self.getTarget().getID(numeric=True)}\n elif self.type in ('string', 'url', 'commonsMedia', 'monolingualtext'):\n value = self.getTarget()\n elif self.type in ('globe-coordinate', 'time', 'quantity'):\n value = self.getTarget().toWikibase()\n else:\n raise NotImplementedError('%s datatype is not supported yet.'\n % self.type)\n return value", "title": "" }, { "docid": "79eeb25f36eef3051e6d9ba8eae04b63", "score": "0.52612555", "text": "def _fmt(self, timestamp):\n fn_read_fmt = {\"date\": timestamp.strftime(\"%Y%m%d%H%M%S\"),\n \"sat\": self.sat, \"product\": self.product.upper()}\n fn_write_fmt = None\n sf_read_fmt = None\n sf_write_fmt = sf_read_fmt\n\n return fn_read_fmt, sf_read_fmt, fn_write_fmt, sf_write_fmt", "title": "" }, { "docid": "79eeb25f36eef3051e6d9ba8eae04b63", "score": "0.52612555", "text": "def _fmt(self, timestamp):\n fn_read_fmt = {\"date\": timestamp.strftime(\"%Y%m%d%H%M%S\"),\n \"sat\": self.sat, \"product\": self.product.upper()}\n fn_write_fmt = None\n sf_read_fmt = None\n sf_write_fmt = sf_read_fmt\n\n return fn_read_fmt, sf_read_fmt, fn_write_fmt, sf_write_fmt", "title": "" }, { "docid": "3a88b727d589f8f3b3e47399d9b92b7e", "score": "0.5218302", "text": "def format(self):\n raise NotImplementedError", "title": "" }, { "docid": "8849a9705fc22d519a31268e555d211a", "score": "0.5216213", "text": "def formatLog(self, key, value):\n return \"%s\\n\" % str(value)", "title": "" }, { "docid": "7cac2f1e2a343c14b1f641e2c54d9be0", "score": "0.5215168", "text": "def _fmt(self, timestamp):\n fn_read_fmt = {\"date\": timestamp.strftime(\"%Y%m%d%H%M%S\"),\n \"sat\": self.sat, \"product\": self.product}\n fn_write_fmt = None\n sf_read_fmt = None\n sf_write_fmt = sf_read_fmt\n\n return fn_read_fmt, sf_read_fmt, fn_write_fmt, sf_write_fmt", "title": "" }, { "docid": "a4e74999e8380308a35792de87039419", "score": "0.5190796", "text": "def _format(self, value, label, cast=None, null='(None)', empty='(Empty)', places=2, map=None, max_length=100, truncate='...'):\n if value is None:\n value = null % {'label': label}\n if cast is not None:\n value = cast(value)\n \n if isinstance(value, (datetime.datetime, datetime.time, datetime.date)):\n if isinstance(value, datetime.datetime):\n result_repr = capfirst(dateformat.format(value, settings.DATE_FORMAT))\n elif isinstance(value, datetime.time):\n result_repr = capfirst(dateformat.time_format(value, settings.TIME_FORMAT))\n else:\n result_repr = capfirst(dateformat.format(value, settings.DATE_FORMAT))\n elif isinstance(value, bool):\n BOOLEAN_MAPPING = {True: 'yes', False: 'no', None: 'unknown'}\n result_repr = E.IMG(src=\"%sadmin/img/icon-%s.gif\" % (settings.ADMIN_MEDIA_PREFIX, BOOLEAN_MAPPING[value]), alt=\"%s\" % value)\n elif isinstance(value, (float, Decimal)):\n result_repr = (u'%%.%sf' % places) % value\n elif map:\n result_repr = map.get(value, '--')\n elif isinstance(value, (SafeUnicode, SafeString)):\n try:\n return etree.fromstring(value)\n except etree.XMLSyntaxError:\n result_repr = value\n else:\n result_repr = unicode(value)\n \n if empty and result_repr == '':\n result_repr = empty % {'label': label} \n \n if not isinstance(result_repr, (SafeUnicode, SafeString)) and max_length and len(result_repr) > max_length:\n result_repr = E.ABBR(result_repr[:max_length-len(truncate)] + truncate, title=result_repr)\n return result_repr", "title": "" }, { "docid": "2d6773d6dd0855060db980d6618342b0", "score": "0.51813334", "text": "def update_formatted(self, val, pos):\n\n y, x = pos\n self.formatted[y][x].value = val", "title": "" }, { "docid": "e86328d7af456940870c590b68ff950a", "score": "0.5162175", "text": "def parse_event(event):\n\n ts_dt, type_, data = event\n ts = ts_dt.strftime(\"%H:%M:%SZ\")\n if type_ == \"added\":\n return (\n f\"{ts} added {bytes(data.hash)[::-1].hex()}\"\n f\" with feerate {data.fee/data.vsize:.2f} sat/vB\"\n f\" ({data.fee} sat, {data.vsize} vbytes)\"\n )\n\n if type_ == \"removed\":\n return (\n f\"{ts} removed {bytes(data.hash)[::-1].hex()}\"\n f\" with feerate {data.fee/data.vsize:.2f} sat/vB\"\n f\" ({data.fee} sat, {data.vsize} vbytes)\"\n f\" received {ts_dt.timestamp()-data.entry_time:.1f} seconds ago\"\n f\": {data.reason.decode('UTF-8')}\"\n )\n\n if type_ == \"rejected\":\n return (\n f\"{ts} rejected {bytes(data.hash)[::-1].hex()}\"\n f\": {data.reason.decode('UTF-8')}\"\n )\n\n if type_ == \"replaced\":\n return (\n f\"{ts} replaced {bytes(data.replaced_hash)[::-1].hex()}\"\n f\" with feerate {data.replaced_fee/data.replaced_vsize:.2f} sat/vB\"\n f\" received {ts_dt.timestamp()-data.replaced_entry_time:.1f} seconds ago\"\n f\" ({data.replaced_fee} sat, {data.replaced_vsize} vbytes)\"\n f\" with {bytes(data.replacement_hash)[::-1].hex()}\"\n f\" with feerate {data.replacement_fee/data.replacement_vsize:.2f} sat/vB\"\n f\" ({data.replacement_fee} sat, {data.replacement_vsize} vbytes)\"\n )\n\n raise NotImplementedError(\"Unsupported event type: {type_}\")", "title": "" }, { "docid": "2b5b9cfef36e516fbbca68139f0959ee", "score": "0.51424795", "text": "def _format(self, match):", "title": "" }, { "docid": "236de6fd1533068f32c55d13d8e150b1", "score": "0.5112352", "text": "def format_field(self, field):\n warnings.warn(\"Formatting by field is deprecated.\", DeprecationWarning)\n\n t, x = self.cat.simple(field)\n logging.debug(\"field:\", field)\n agent_address, feed_and_field = field.split(\".feeds.\")\n feed_tag, field = feed_and_field.split(\".\")\n\n json_body = []\n\n for _x, _t in zip(x, t):\n fields = {field: _x}\n json_body.append(\n {\n \"measurement\": agent_address,\n \"time\": timestamp2influxtime(_t),\n \"fields\": fields,\n \"tags\": {\n \"feed\": feed_tag\n }\n\n }\n )\n\n # print(\"payload: {}\".format(json_body))\n\n return json_body", "title": "" }, { "docid": "1099e7fed0e39dc73dd08cb301257a47", "score": "0.5098753", "text": "def formatted_val(self, key, val):\n if not hasattr(val, \"delfick_error_format\"):\n return val\n else:\n try:\n return val.delfick_error_format(key)\n except Exception as error:\n return \"<|Failed to format val for exception: val={0}, error={1}|>\".format(val, error)", "title": "" }, { "docid": "1a0d90dab59149a7ef8d9782d23d1b8b", "score": "0.50867456", "text": "def _format_logs(self, logs):\n str_logs = ['{} - {:.4}'.format(k, v) for k, v in logs.items()]\n s = ', '.join(str_logs)\n return s", "title": "" }, { "docid": "1a0d90dab59149a7ef8d9782d23d1b8b", "score": "0.50867456", "text": "def _format_logs(self, logs):\n str_logs = ['{} - {:.4}'.format(k, v) for k, v in logs.items()]\n s = ', '.join(str_logs)\n return s", "title": "" }, { "docid": "1a0d90dab59149a7ef8d9782d23d1b8b", "score": "0.50867456", "text": "def _format_logs(self, logs):\n str_logs = ['{} - {:.4}'.format(k, v) for k, v in logs.items()]\n s = ', '.join(str_logs)\n return s", "title": "" }, { "docid": "1a0d90dab59149a7ef8d9782d23d1b8b", "score": "0.50867456", "text": "def _format_logs(self, logs):\n str_logs = ['{} - {:.4}'.format(k, v) for k, v in logs.items()]\n s = ', '.join(str_logs)\n return s", "title": "" }, { "docid": "0c7b389dd859ed43d1ec2f46e35da1d7", "score": "0.5086249", "text": "def format_input(self,invals: AdcInput) -> bytes:\n concat = lambda s, f: ''.join([f % x for x in s])\n retval = ''\n retval += concat(invals.mag_meas, '%3.2f,')\n retval += concat(invals.euler_angle, '%3.2f,')\n retval += concat(invals.sun_measure, '%3.2f,')\n retval += concat(invals.epoch, '%02.0f,')\n retval += concat(invals.lla, '%3.2f,')\n retval += concat([invals.s_flag], '%1.0f,')\n retval = retval[:-1] #remove the trailing comma\n retval += os.linesep\n return retval.encode('utf-8')", "title": "" }, { "docid": "2c9bd372fe37032e35ebc60c0a6055d8", "score": "0.5075521", "text": "def encode(self):\n record = \"%s|%s|%s|%s|\" % (self.date, self.type, self.dest, self.desc)\n for account, delta in self.deltas.iteritems():\n if delta != 0.0:\n record += \"%s=%.2f,\" % (account, float(delta))\n if record[:-1] == \",\":\n record = record[:-1] # remove trailing comma\n record += \"|%s|%s\" % (self.id, self.uid)\n return record", "title": "" }, { "docid": "1eac21066ef32d41680a8afe1ee61721", "score": "0.5075433", "text": "def _convert_event(self, event):\n\n doc = {\"h\": event.hostname,\n \"ts\": event.timestamp}\n if event.args:\n doc[\"fields\"] = event.args\n return doc", "title": "" }, { "docid": "08fc83352e547d585eeae2c444b17e2e", "score": "0.506357", "text": "def formatted_value(self):\n return OnnxTranslator.Parameter.format_value(self.value)", "title": "" }, { "docid": "68cccdabe7c7b6bcd543418c7b80b9f3", "score": "0.50635076", "text": "def format_value(value, sf=3):\n if isinstance(value, str):\n return format_name(value)\n\n elif isinstance(value, list) or isinstance(value, np.ndarray):\n value = list(value)\n for i in range(len(value)):\n vv = format_value(value[i])\n value[i] = vv\n return \"[\" + \", \".join(value) + \"]\"\n\n elif value is None:\n return \"N/A\"\n\n else:\n fmt_str = \"{0:.%ig}\" % sf\n return fmt_str.format(value)", "title": "" }, { "docid": "278208bc8e62ac86d65e6bed8d659cea", "score": "0.50577164", "text": "def _format_record(self, record: DAY_RECORD\n ) -> Iterable[Union[int, decimal.Decimal]]:\n\n return record.date.year, record.date.month, record.value", "title": "" }, { "docid": "4ca6a26a1675f160de18c721a5c1f4ae", "score": "0.5050367", "text": "def format(self, record):\n\n cols = [record.seqid, record.source, record.type, record.start,\n record.end, record.score, record.strand, record.phase,\n self.format_attributes(record)]\n\n # If any of the columns are None value, repalce them with '.'\n cols = [col if col is not None else '.' for col in cols]\n\n # Convert all the columns to strings\n cols = [str(col) for col in cols]\n\n return '\\t'.join(cols)", "title": "" }, { "docid": "3fc6075868c2634b607112e638a6064b", "score": "0.5049101", "text": "def _dummy_formatter(value):\n if value is None: return None\n return str(value)", "title": "" }, { "docid": "cb13ca730c9c668310e02943e8642924", "score": "0.5047358", "text": "def format_value(self, value):\n if value is not None and not isinstance(value, (tuple, list)):\n value = value.split(',')\n return super().format_value(value)", "title": "" }, { "docid": "569da905061b90b607ef7e934674c2de", "score": "0.5045615", "text": "def normalize_field_function(self, value: dict) -> str:\r\n annotation, *other = value.items()\r\n if other:\r\n raise ValueError(\"Multiple annotations returned!\")\r\n self.annotations.update(**value)\r\n field_name, _ = annotation\r\n return field_name", "title": "" }, { "docid": "cbc2784bb1b6674384ef3573aa29d07f", "score": "0.5040373", "text": "def _rawEventToDescription(self, event):\n timestamp, value, eventType, index = event\n if eventType == Gamepad.EVENT_CODE_BUTTON:\n if index in self.buttonNames:\n button = self.buttonNames[index]\n else:\n button = str(index)\n if value == 0:\n return '%010u: Button %s released' % (timestamp, button)\n elif value == 1:\n return '%010u: button %s pressed' % (timestamp, button)\n else:\n return '%010u: button %s state %i' % (timestamp, button, value)\n elif eventType == Gamepad.EVENT_CODE_AXIS:\n if index in self.axisNames:\n axis = self.axisNames[index]\n else:\n axis = str(index)\n position = value / Gamepad.MAX_AXIS\n return '%010u: Axis %s at %+06.1f %%' % (timestamp, axis, position * 100)\n elif eventType == Gamepad.EVENT_CODE_INIT_BUTTON:\n if index in self.buttonNames:\n button = self.buttonNames[index]\n else:\n button = str(index)\n if value == 0:\n return '%010u: Button %s initially released' % (timestamp, button)\n elif value == 1:\n return '%010u: button %s initially pressed' % (timestamp, button)\n else:\n return '%010u: button %s initially state %i' % (timestamp, button, value)\n elif eventType == Gamepad.EVENT_CODE_INIT_AXIS:\n if index in self.axisNames:\n axis = self.axisNames[index]\n else:\n axis = str(index)\n position = value / Gamepad.MAX_AXIS\n return '%010u: Axis %s initially at %+06.1f %%' % (timestamp, axis, position * 100)\n else:\n return '%010u: Unknown event %u, Index %u, Value %i' % (timestamp, eventType, index, value)", "title": "" }, { "docid": "9ee18739750c6665795221539552ec65", "score": "0.5036978", "text": "def to_html(self, **kwargs) -> str:\n\n return self.value.strftime(\"%H:%M:%S.%f\")", "title": "" }, { "docid": "0e949ae16375f055c1e9e6aa442e8f99", "score": "0.5034431", "text": "def _make_format_line(self):\n fstring = ['\\tformat']\n for key in sorted(self.format):\n value = self.format[key]\n if key == 'datatype':\n # Datatype must come first!\n fstring.insert(1, \"%s=%s\" % (key, value))\n elif key in ('interleave', ):\n # IGNORE the interleaving -- not implemented\n continue\n else:\n if key == 'symbols':\n value = '\"%s\"' % \"\".join(sorted([\n s for s in self.symbols if not self.is_missing_or_gap(s)\n ]))\n fstring.append(\"%s=%s\" % (key, value))\n return \" \".join(fstring) + \";\"", "title": "" }, { "docid": "b853b4fd7927309e35b2ee5b3824769a", "score": "0.50254", "text": "def __str__(self):\n return 'T={0:.2f}, {1}'.format(\n self.timestamp, self.handler.__name__)", "title": "" }, { "docid": "74403db49d54dba91f12794ed39fdd1a", "score": "0.5012767", "text": "def _format_record(self, record: DAY_RECORD\n ) -> Tuple[datetime.date, decimal.Decimal, decimal.Decimal]:\n\n daily_value = (1 + round(record.value * 1 / 100, 8))\n\n return (record.date, record.value, daily_value)", "title": "" }, { "docid": "74403db49d54dba91f12794ed39fdd1a", "score": "0.5012767", "text": "def _format_record(self, record: DAY_RECORD\n ) -> Tuple[datetime.date, decimal.Decimal, decimal.Decimal]:\n\n daily_value = (1 + round(record.value * 1 / 100, 8))\n\n return (record.date, record.value, daily_value)", "title": "" }, { "docid": "583874b76d9ef57343523d5d03f2c09e", "score": "0.50127536", "text": "def formatData(self):\n #manipulate data and return, data will be the parameters passed into the template\n data = None\n return data", "title": "" }, { "docid": "5b8b769e90aeeb9a05ad32f2cb05c770", "score": "0.5011792", "text": "def format_to_string(self, pretty=False):\n trace = {}\n trace['traceEvents'] = self._metadata + self._events\n if pretty:\n return json.dumps(trace, indent=4, separators=(',', ': '))\n else:\n return json.dumps(trace, separators=(',', ':'))", "title": "" }, { "docid": "17d064c116384dcafd5f1777ebc1e767", "score": "0.50111884", "text": "def _format(self, data):\n return \"'{0}'\".format(str(data))", "title": "" }, { "docid": "17d064c116384dcafd5f1777ebc1e767", "score": "0.50111884", "text": "def _format(self, data):\n return \"'{0}'\".format(str(data))", "title": "" }, { "docid": "17d064c116384dcafd5f1777ebc1e767", "score": "0.50111884", "text": "def _format(self, data):\n return \"'{0}'\".format(str(data))", "title": "" }, { "docid": "17d064c116384dcafd5f1777ebc1e767", "score": "0.50111884", "text": "def _format(self, data):\n return \"'{0}'\".format(str(data))", "title": "" }, { "docid": "17d064c116384dcafd5f1777ebc1e767", "score": "0.50111884", "text": "def _format(self, data):\n return \"'{0}'\".format(str(data))", "title": "" }, { "docid": "17d064c116384dcafd5f1777ebc1e767", "score": "0.50111884", "text": "def _format(self, data):\n return \"'{0}'\".format(str(data))", "title": "" }, { "docid": "961b05531c98d58e55894781ceaab58c", "score": "0.5007684", "text": "def __formatTag__(value):\n if '.' in value:\n value = value[value.index('.')+1:]\n return value", "title": "" }, { "docid": "4d484240270a8486a181efa367c03a46", "score": "0.5005684", "text": "def prepare_value(self, value):\n if isinstance(value, (list, tuple)):\n return '\\n'.join(duration_string(v) for v in value)\n else:\n return value", "title": "" }, { "docid": "e1438c796f026e9995c1df6a52b13ce4", "score": "0.5004489", "text": "def format_sse(data: str, event=None) -> str:\n msg = f'data: {data}\\n\\n'\n if event is not None:\n msg = f'event: {event}\\n{msg}'\n return msg", "title": "" }, { "docid": "f2ff58d33fdf22f2e45c36f3a3f7f51e", "score": "0.5002583", "text": "def _FormatEntryItems(self, entry_items):\n lines = []\n for index, entry_item in enumerate(entry_items):\n value_string = self._FormatIntegerAsHexadecimal8(entry_item.object_offset)\n line = self._FormatValue(\n f' Entry item: {index:d} object offset', value_string)\n lines.append(line)\n\n value_string = self._FormatIntegerAsHexadecimal8(entry_item.hash)\n line = self._FormatValue(\n f' Entry item: {index:d} hash', value_string)\n lines.append(line)\n\n return ''.join(lines)", "title": "" }, { "docid": "c59975f057ecb6cdfe056b69fb33b6d5", "score": "0.5001884", "text": "def transform_times(event):\n if isinstance(event, dict):\n retval = {}\n for key, value in event.items():\n if key == 'times' and len(value) == 2:\n retval[key] = [transform_time(t) for t in value]\n else:\n retval[key] = transform_times(value)\n else:\n retval = event\n return retval", "title": "" }, { "docid": "63508b9cad30d70479576b5047e075a8", "score": "0.4993644", "text": "def format_spec(spec, normalize=False):\n from_spec = deepcopy(spec)\n newspec = {}\n\n def _format(value):\n if normalize:\n return 'VALUE'\n else:\n if isinstance(value, datetime):\n return value.isoformat()\n elif isinstance(value, unicode):\n return value.encode('utf-8')\n else:\n return str(value)\n\n def format_value(value):\n if isinstance(value, list):\n newlist = []\n for item in value:\n newvalue = format_value(item)\n # When normalizing, do not add a value to a list if it's already in\n # This way all plain value lists will remain the same\n if normalize is False or (normalize is True and newvalue not in newlist):\n newlist.append(newvalue)\n return newlist\n elif isinstance(value, dict):\n newdict = {}\n for itemkey, item in value.items():\n newdict[itemkey] = format_value(item)\n return newdict\n else:\n return _format(value)\n\n for key, value in from_spec.items():\n newspec[key] = format_value(value)\n\n return newspec", "title": "" }, { "docid": "ed9d2333e3650fcf13d521e47c2616c3", "score": "0.49779424", "text": "def _FormatMessage(\n self, output_mediator, event, event_data, event_data_stream):\n message_formatter = output_mediator.GetMessageFormatter(\n event_data.data_type)\n if not message_formatter:\n logger.warning(\n 'Using default message formatter for data type: {0:s}'.format(\n event_data.data_type))\n message_formatter = self._DEFAULT_MESSAGE_FORMATTER\n\n event_values = event_data.CopyToDict()\n message_formatter.FormatEventValues(output_mediator, event_values)\n\n return message_formatter.GetMessage(event_values)", "title": "" }, { "docid": "377111b8dbebf8914e0c2c16a4564803", "score": "0.49712902", "text": "def format (*values):\n\tself=__module__\n\treturn u\"\\n\".join(_format(values))", "title": "" }, { "docid": "73101043f5e652acac73dc066d8dee36", "score": "0.49686646", "text": "def GetFieldValues(\n self, output_mediator, event, event_data, event_data_stream, event_tag):", "title": "" }, { "docid": "0bb2d94f9fb07bfa1db68d7e9a98b931", "score": "0.49638608", "text": "def getEventArgsPrototypeStringDict(self, obj):\n d = self.getEventArgsDict(obj)\n d2 = {}\n for l in d:\n if len(d[l]) > 0:\n d2[l] = \", \".join([\"{} {}\".format(x[1], x[0]) for x in d[l]])\n else:\n # If event has no arguments parameter string should be empty\n d2[l] = \"\"\n return d2", "title": "" }, { "docid": "74e9b6ca4bf00d8e2f228aeeca5ab46f", "score": "0.49539793", "text": "def format():\n raise NotImplementedError(\"Abstract class\")", "title": "" }, { "docid": "03a9181a593fcb6d1c01ea3a33f0876c", "score": "0.49444556", "text": "def format(self, record):\n return json.dumps(Log.extract_record_data(record), separators=(\",\", \":\"))", "title": "" }, { "docid": "19ce048287269f9626b51e35c54f5d49", "score": "0.49373907", "text": "def format(myFormatString, dictOrValues):\n\t\tif hasattr(dictOrValues, '__iter__'):\n\t\t\tfor myDict in dictOrValues:\n\t\t\t\tmyDict['value'] = myFormatString % myDict\n\t\t\t\tyield myDict\n\t\telse:\n\t\t\tyield myFormatString % dictOrValues", "title": "" }, { "docid": "23e79263e5616c015d2a45de879e79f1", "score": "0.49267864", "text": "def format(self, record):\n log_fmt = self.FORMATS.get(record.levelno)\n formatter = logging.Formatter(log_fmt, \"%Y-%m-%d %H:%M:%S\")\n return formatter.format(record)", "title": "" }, { "docid": "d5c7c6c5966b1277d103f249abc0e028", "score": "0.49228436", "text": "def format_add_inner_func(f, format, entry, channel_index=0):\n old_f = format[entry][channel_index][1]\n format[entry][channel_index] = [format[entry][channel_index][0], lambda x: old_f(f(x))]\n return format", "title": "" }, { "docid": "e45cd38380ab2d0e24b898a75715e596", "score": "0.49158877", "text": "def get_format(track_info=None):\n\n new_format = '{\"datetime\": \"%(asctime)s\",' \\\n '\"name\": \"%(name)s\",' \\\n '\"level\": \"%(levelname)s\",' \\\n '\"log\": %(message)s,'\n\n new_format += '\"track\": %s}' % (json.dumps(track_info if track_info else helpers.get_track()))\n\n return new_format", "title": "" }, { "docid": "8fa49c7c4aa7d78671af9bd2d8b890f5", "score": "0.49016258", "text": "def __format_and_validate(self):\n\n return self.__format()", "title": "" }, { "docid": "ee87bef6d14b1c8cfec9fdda54013ce7", "score": "0.49015257", "text": "def _fmt(self, field):\n name = field[0]\n base, chars = self._fsplit(field[1])\n count = int(field[2]) if len(field) > 2 else 1\n proc = field[3] if len(field) > 3 else None\n return name, base, chars, count, proc", "title": "" }, { "docid": "8ed7951f78732c6c8cf66464ff5b8287", "score": "0.4900498", "text": "def _FormatTime(self, output_mediator, event, event_data, event_data_stream):\n # For now check if event.timestamp is set, to mimic existing behavior of\n # using --:--:-- for 0 timestamp values.\n if not event.timestamp:\n return '--:--:--'\n\n date_time = event.date_time\n if not date_time or date_time.is_local_time:\n date_time = dfdatetime_posix_time.PosixTimeInMicroseconds(\n timestamp=event.timestamp)\n\n year, month, day_of_month, hours, minutes, seconds = (\n date_time.GetDateWithTimeOfDay())\n\n if output_mediator.time_zone != pytz.UTC:\n try:\n datetime_object = datetime.datetime(\n year, month, day_of_month, hours, minutes, seconds,\n tzinfo=pytz.UTC)\n\n datetime_object = datetime_object.astimezone(output_mediator.time_zone)\n\n hours, minutes, seconds = (\n datetime_object.hour, datetime_object.minute,\n datetime_object.second)\n\n except (OSError, OverflowError, TypeError, ValueError):\n hours, minutes, seconds = (None, None, None)\n\n if None in (hours, minutes, seconds):\n self._ReportEventError(event, event_data, (\n 'unable to copy timestamp: {0!s} to a human readable time. '\n 'Defaulting to: \"--:--:--\"').format(event.timestamp))\n return '--:--:--'\n\n return '{0:02d}:{1:02d}:{2:02d}'.format(hours, minutes, seconds)", "title": "" }, { "docid": "b32f7b51d901ac9094fb6b0ca3e18560", "score": "0.48993587", "text": "def format(self, record):\n if isinstance(record, SimpleLogRecord):\n return _encode(record.format())\n return super(JsonFormatter, self).format(record)", "title": "" }, { "docid": "54f89356401848557e892d92f67089d2", "score": "0.4895909", "text": "def generate_events_from_value(value, handler, skip=False):\n t = SerdeUtil.get_type(value)\n if t == SerdeUtil.FIELD_VALUE_TYPE.BINARY:\n if not skip:\n handler.binary_value(value)\n elif t == SerdeUtil.FIELD_VALUE_TYPE.BOOLEAN:\n if not skip:\n handler.boolean_value(value)\n elif t == SerdeUtil.FIELD_VALUE_TYPE.DOUBLE:\n if not skip:\n handler.double_value(value)\n elif t == SerdeUtil.FIELD_VALUE_TYPE.INTEGER:\n if not skip:\n handler.integer_value(value)\n elif t == SerdeUtil.FIELD_VALUE_TYPE.LONG:\n if not skip:\n handler.long_value(value)\n elif t == SerdeUtil.FIELD_VALUE_TYPE.STRING:\n if not skip:\n handler.string_value(value)\n elif t == SerdeUtil.FIELD_VALUE_TYPE.TIMESTAMP:\n if not skip:\n handler.timestamp_value(value)\n elif t == SerdeUtil.FIELD_VALUE_TYPE.NUMBER:\n if not skip:\n handler.number_value(value)\n elif t == SerdeUtil.FIELD_VALUE_TYPE.JSON_NULL:\n if not skip:\n handler.json_null_value()\n elif t == SerdeUtil.FIELD_VALUE_TYPE.NULL:\n if not skip:\n handler.null_value()\n elif t == SerdeUtil.FIELD_VALUE_TYPE.EMPTY:\n if not skip:\n handler.empty_value()\n elif t == SerdeUtil.FIELD_VALUE_TYPE.MAP:\n if skip:\n return\n num_elements = len(value)\n handler.start_map(num_elements)\n if handler.stop():\n return\n for key in value:\n skip_field = handler.start_map_field(key)\n if handler.stop():\n return\n Nson.generate_events_from_value(value[key],\n handler,\n skip_field)\n if handler.stop():\n return\n handler.end_map_field(key)\n if handler.stop():\n return\n handler.end_map(num_elements)\n elif t == SerdeUtil.FIELD_VALUE_TYPE.ARRAY:\n if skip:\n return\n num_elements = len(value)\n handler.start_array(num_elements)\n if handler.stop():\n return\n index = 0\n for item in value:\n skip = handler.start_array_field(index)\n index += 1\n if handler.stop():\n return\n Nson.generate_events_from_value(item, handler, skip)\n if handler.stop():\n return\n handler.end_array_field(index)\n if handler.stop():\n return\n handler.end_array(num_elements)\n else:\n raise IllegalArgumentException(\n 'Unknown value type code: ' + str(t))", "title": "" }, { "docid": "5e474ee42f173d48ca231eaf4f16f6a9", "score": "0.4887734", "text": "def str_formats(self):\n templates = {}\n for field in self._fields:\n name, base, chars, count, proc = self._fmt(field)\n fill = '0' if base else ' '\n templates[name] = '{{{name}:{fill}{chars}{base}}}'.format(**locals())\n return templates", "title": "" }, { "docid": "bc890ba448d3b56dd9b20d70a218990e", "score": "0.48781067", "text": "def reformat(cls, entry):\n pass", "title": "" }, { "docid": "00db0e69a60ddaf9d88ff23f4bab84da", "score": "0.48735768", "text": "def _item_to_string(self, key, value, dataset=None):\n if isinstance(value, float) and 1.0 < value < 100.0:\n value = f\"{value:.{self.precision}f}\"\n elif isinstance(value, float):\n value = f\"{value:.{self.precision}e}\"\n if dataset is not None:\n key = f\"{dataset} {key}\"\n return f\"{key}: {value}\"", "title": "" }, { "docid": "f53e3234f705e656da34736ba81c8c76", "score": "0.4863256", "text": "def _data_events(self, object):\n\t\t# Builds an event list that gives for each event:\n\t\t# - Gramps ID\\n\"\n\t\t# - The event name\n\t\t# - The event date\n\t\t# - The event date in ISO format (sortable)\n\t\t# - The event place index (in table 'P'), -1 if none\n\t\t# - The event description\n\t\t# - The event text and notes (including event reference notes)\n\t\t# - A list of the event media index, in the form:\n\t\t# - media index (in table 'M')\n\t\t# - media thumbnail path\n\t\t# - [x1, y1, x2, y2] of the media reference\n\t\t# - notes of the media reference\n\t\t# - list of the media reference source citations index (in table 'C')\\n\"\n\t\t# - A list of the event source citations index (in table 'C')\n\t\tevent_ref_list = object.get_event_ref_list()\n\t\tif not event_ref_list: return(\"\")\n\t\trows = []\n\t\tfor event_ref in event_ref_list:\n\t\t\tif (event_ref.ref not in self.obj_dict[Event]): continue\n\t\t\tevent = self.database.get_event_from_handle(event_ref.ref)\n\t\t\tif (not event): continue\n\t\t\ttrow = \"\\t[\"\n\t\t\tevt_type = str(event.get_type())\n\t\t\tevent_role = event_ref.get_role()\n\t\t\tif (event_role != EventRoleType.PRIMARY and event_role != EventRoleType.FAMILY):\n\t\t\t\tevt_type += \" (%s)\" % event_role\n\t\t\tplace_index = -1\n\t\t\tplace_handle = event.get_place_handle()\n\t\t\tif (place_handle and (place_handle in self.obj_dict[Place])):\n\t\t\t\tplace_index = self.obj_dict[Place][place_handle][OBJDICT_INDEX]\n\t\t\tevt_desc = event.get_description()\n\t\t\ttrow += \"\\\"\" + self.obj_dict[Event][event_ref.ref][OBJDICT_GID] + \"\\\",\"\n\t\t\ttrow += \"\\\"\" + script_escape(html_escape(evt_type)) + \"\\\",\"\n\t\t\tevt_date = format_date(event.get_date_object())\n\t\t\ttrow += \"\\\"\" + script_escape(html_escape(evt_date)) + \"\\\",\"\n\t\t\tevt_date = format_date(event.get_date_object(), True)\n\t\t\ttrow += \"\\\"\" + script_escape(html_escape(evt_date)) + \"\\\",\"\n\t\t\ttrow += str(place_index) + \",\"\n\t\t\tif (evt_desc is None): evt_desc = \"\"\n\t\t\ttrow += \"\\\"\" + script_escape(html_escape(evt_desc)) + \"\\\",\"\n\t\t\t# Get event notes\n\t\t\tnotelist = event.get_note_list()\n\t\t\tnotelist.extend(event_ref.get_note_list())\n\t\t\tattrlist = event.get_attribute_list()\n\t\t\tattrlist.extend(event_ref.get_attribute_list())\n\t\t\ttrow += \"\\\"\" + script_escape(self.get_notes_attributes_text(notelist, attrlist)) + \"\\\",\"\n\t\t\t# Get event media\n\t\t\ttrow += self._data_media_reference_index(event)\n\t\t\ttrow += \",\"\n\t\t\t# Get event sources\n\t\t\tcitationlist = event.get_citation_list()\n\t\t\tcitationlist.extend(event_ref.get_citation_list())\n\t\t\tfor attr in attrlist: citationlist.extend(attr.get_citation_list())\n\t\t\ttrow += self._data_source_citation_index_from_list(citationlist)\n\t\t\t#\n\t\t\ttrow += \"]\"\n\t\t\trows.append(trow)\n\t\treturn(\",\\n\".join(rows))", "title": "" }, { "docid": "d69dc7c649f21ca9796bbb3895a95c6a", "score": "0.48620108", "text": "def _format(self, data: _format_input) -> _format_return:\n\n def _format_item(\n cond: Union[Dict, Dict[str, Dict]]) -> Dict[str, Dict]:\n \"\"\"Transform Dict to Dict[str, Dict].\"\"\"\n if isinstance(list(cond.values())[0], dict):\n for value in list(cond.values()):\n for key in list(self._indicators):\n value.setdefault(key, 0.)\n return cond\n else:\n return {str(cond): {}.fromkeys(self._indicators, -1)}\n\n if isinstance(data, UserList):\n return [_format_item(i) for i in data.data]\n\n elif isinstance(data, list):\n return [_format_item(i) for i in data]\n\n else:\n return _format_item(data)", "title": "" }, { "docid": "da3d7b674c5da846eb0e6bbbfd94d922", "score": "0.48612654", "text": "def test_entry_formatting(exopy_qtbot):\n e = MonitoredEntry(name='test', formatting='{a} = {b}',\n depend_on=['a', 'b'])\n\n e.update(dict(a=1, b=2, c=3))\n\n def assert_value():\n assert e.value == '1 = 2'\n exopy_qtbot.wait_until(assert_value)", "title": "" }, { "docid": "30aa011d9b968b3084c76b9a466c879b", "score": "0.48607677", "text": "def format_add_outer_func(f, format, entry, channel_index=0):\n old_f = format[entry][channel_index][1]\n format[entry][channel_index] = [format[entry][channel_index][0], lambda x: f(old_f(x))]\n return format", "title": "" }, { "docid": "dda767dadfe1c89013b62ee127e95501", "score": "0.4860735", "text": "def formatter(format_string, kwargs):\n for key, val in kwargs.items():\n key2 = \"{%s}\" % (key)\n if key2 in format_string:\n # explicitly convert val to str\n format_string = format_string.replace(key2, str(val))\n kwargs[key] = \"\"\n return format_string", "title": "" }, { "docid": "d74793330e6c8662bd4b46ace83ea026", "score": "0.48595375", "text": "def _format(self, original, **kwargs):\n\n try:\n return lib.dict_format(original, **kwargs)\n except KeyError as e:\n log.error(\n \"One of the {variables} defined in the application \"\n \"definition wasn't found in this session.\\n\"\n \"The variable was %s \" % e\n )\n log.error(json.dumps(kwargs, indent=4, sort_keys=True))\n\n raise ValueError(\n \"This is typically a bug in the pipeline, \"\n \"ask your developer.\")", "title": "" }, { "docid": "89590351e7a0fcedc6d69eeff86b7e78", "score": "0.4849913", "text": "def _format(self, data):\n return str(int(data))", "title": "" }, { "docid": "e3b875bb311fc946c60c640db91e365d", "score": "0.4845211", "text": "def _wrap_value(func, state_dict):\n\n def new_func(label, *args, **kwargs):\n value = func(label, *args, **kwargs)\n if label not in counts[\"widgets\"]:\n counts[\"widgets\"][label] = {}\n\n # st.date_input and st.time return datetime object, convert to str\n formatted_value = replace_empty(value)\n if (\n isinstance(value, datetime.datetime)\n or isinstance(value, datetime.date)\n or isinstance(value, datetime.time)\n ):\n formatted_value = str(value)\n\n if formatted_value not in counts[\"widgets\"][label]:\n counts[\"widgets\"][label][formatted_value] = 0\n if formatted_value != state_dict.get(label, None):\n counts[\"widgets\"][label][formatted_value] += 1\n state_dict[label] = formatted_value\n return value\n\n return new_func", "title": "" } ]
a49cad5154db3f4173ed1f408c04a4ca
Convert XML derived dict to tf.Example proto. Notice that this function normalizes the bounding box coordinates provided by the raw data.
[ { "docid": "f89a97e7a31b3db1b5ae28192b332a36", "score": "0.0", "text": "def selective_search(data,\n dataset_directory,\n label_map_dict,\n ignore_difficult_instances=False,\n image_subdirectory='JPEGImages'):\n img_path = os.path.join(data['folder'], image_subdirectory, data['filename'])\n full_path = os.path.join(dataset_directory, img_path)\n with tf.gfile.GFile(full_path, 'rb') as fid:\n encoded_jpg = fid.read()\n\n width = int(data['size']['width'])\n height = int(data['size']['height'])\n\n # Use SelectiveSearch to get the proposals.\n\n file_bytes = np.fromstring(encoded_jpg, dtype=np.uint8)\n\n bgr = cv2.imdecode(file_bytes, cv2.IMREAD_UNCHANGED)\n height, width, _ = bgr.shape\n assert bgr.shape[0] == height and bgr.shape[1] == width\n\n if height / width >= 2.2:\n width = int(height / 2.2)\n bgr = cv2.resize(bgr, (width, height))\n elif width / height >= 2.2:\n height = int(width / 2.2)\n bgr = cv2.resize(bgr, (width, height))\n\n ss.setBaseImage(bgr)\n #ss.switchToSelectiveSearchFast()\n ss.switchToSelectiveSearchQuality()\n rects = ss.process()\n\n rects = np.stack([rect for rect in rects if rect[2] >= 20 and rect[3] >= 20],\n axis=0)\n\n height, width = bgr.shape[0], bgr.shape[1]\n x, y, w, h = [rects[:, i] for i in range(4)]\n proposals = np.stack(\n [y / height, x / width, (y + h) / height, (x + w) / width], axis=-1)\n return proposals", "title": "" } ]
[ { "docid": "5cedbb9d92ce614e221c52c0c553315a", "score": "0.69029593", "text": "def parse_example_proto(example_serialized):\n # Dense features in Example proto.\n feature_map = {\n \"image/encoded\": tf.io.FixedLenFeature([], dtype=tf.string, default_value=\"\"),\n \"image/class/label\": tf.io.FixedLenFeature([], dtype=tf.int64, default_value=-1),\n \"image/class/text\": tf.io.FixedLenFeature([], dtype=tf.string, default_value=\"\"),\n }\n sparse_float32 = tf.io.VarLenFeature(dtype=tf.float32)\n # Sparse features in Example proto.\n feature_map.update(\n {\n k: sparse_float32\n for k in [\n \"image/object/bbox/xmin\",\n \"image/object/bbox/ymin\",\n \"image/object/bbox/xmax\",\n \"image/object/bbox/ymax\",\n ]\n }\n )\n\n features = tf.io.parse_single_example(serialized=example_serialized, features=feature_map)\n label = tf.cast(features[\"image/class/label\"], dtype=tf.int32)\n\n xmin = tf.expand_dims(features[\"image/object/bbox/xmin\"].values, 0)\n ymin = tf.expand_dims(features[\"image/object/bbox/ymin\"].values, 0)\n xmax = tf.expand_dims(features[\"image/object/bbox/xmax\"].values, 0)\n ymax = tf.expand_dims(features[\"image/object/bbox/ymax\"].values, 0)\n\n # Note that we impose an ordering of (y, x) just to make life difficult.\n bbox = tf.concat([ymin, xmin, ymax, xmax], 0)\n\n # Force the variable number of bounding boxes into the shape\n # [1, num_boxes, coords].\n bbox = tf.expand_dims(bbox, 0)\n bbox = tf.transpose(a=bbox, perm=[0, 2, 1])\n\n return features[\"image/encoded\"], label, bbox", "title": "" }, { "docid": "caaa5e4501410a983fd4d58e9fec4540", "score": "0.6847513", "text": "def _parse_train_example_proto(example_serialized):\n # Dense features in Example proto.\n feature_map = {\n 'image/height': tf.FixedLenFeature([], dtype=tf.int64),\n 'image/width': tf.FixedLenFeature([], dtype=tf.int64),\n 'image/colorspace': tf.VarLenFeature(dtype=tf.string),\n 'image/channels': tf.FixedLenFeature([], dtype=tf.int64),\n 'image/class/label': tf.FixedLenFeature([], dtype=tf.int64),\n 'image/class/synset': tf.VarLenFeature(dtype=tf.string),\n 'image/format': tf.VarLenFeature(dtype=tf.string),\n 'image/filename': tf.VarLenFeature(dtype=tf.string),\n 'image/encoded': tf.FixedLenFeature([], dtype=tf.string), \n } \n features = tf.parse_single_example(example_serialized, feature_map)\n label = tf.cast(features['image/class/label'], dtype=tf.int32) -1 \n one_hot_label = tf.one_hot(label, _NUM_CLASSES, 1, 0) #convert it to a one_hot vector \n\n # Directly fixing values of min and max\n xmin = tf.expand_dims([0.0], 0)\n ymin = tf.expand_dims([0.0], 0)\n xmax = tf.expand_dims([1.0], 0)\n ymax = tf.expand_dims([1.0], 0)\n\n # Note that we impose an ordering of (y, x) just to make life difficult.\n bbox = tf.concat([ymin, xmin, ymax, xmax], 0)\n\n # Force the variable number of bounding boxes into the shape\n # [1, num_boxes, coords].\n bbox = tf.expand_dims(bbox, 0)\n bbox = tf.transpose(bbox, [0, 2, 1])\n\n return features['image/encoded'], one_hot_label, bbox", "title": "" }, { "docid": "d6efe1b15fae462550273d7c5ebf2452", "score": "0.68353194", "text": "def _parse_example_proto(example_serialized):\n # Dense features in Example proto.\n feature_map = {\n 'image/height': tf.FixedLenFeature([], dtype=tf.int64),\n 'image/width': tf.FixedLenFeature([], dtype=tf.int64),\n 'image/colorspace': tf.VarLenFeature(dtype=tf.string),\n 'image/channels': tf.FixedLenFeature([], dtype=tf.int64),\n 'image/class/label': tf.FixedLenFeature([], dtype=tf.int64),\n 'image/class/synset': tf.VarLenFeature(dtype=tf.string),\n 'image/format': tf.VarLenFeature(dtype=tf.string),\n 'image/filename': tf.VarLenFeature(dtype=tf.string),\n 'image/encoded': tf.FixedLenFeature([], dtype=tf.string), \n } \n features = tf.parse_single_example(example_serialized, feature_map)\n label = tf.cast(features['image/class/label'], dtype=tf.int32)\n one_hot_label = tf.one_hot(label, _NUM_CLASSES, 1, 0) #convert it to a one_hot vector \n\n # Directly fixing values of min and max\n xmin = tf.expand_dims([0.0], 0)\n ymin = tf.expand_dims([0.0], 0)\n xmax = tf.expand_dims([1.0], 0)\n ymax = tf.expand_dims([1.0], 0)\n\n # Note that we impose an ordering of (y, x) just to make life difficult.\n bbox = tf.concat([ymin, xmin, ymax, xmax], 0)\n\n # Force the variable number of bounding boxes into the shape\n # [1, num_boxes, coords].\n bbox = tf.expand_dims(bbox, 0)\n bbox = tf.transpose(bbox, [0, 2, 1])\n\n return features['image/encoded'], one_hot_label, bbox", "title": "" }, { "docid": "0a75140d0b00b140c50fe64907c3b955", "score": "0.65387064", "text": "def parse_example_proto(example_serialized):\n # Dense features in Example proto.\n feature_map = {\n 'image/encoded': tf.io.FixedLenFeature([], dtype=tf.string,\n default_value=''),\n 'image/class/label': tf.io.FixedLenFeature([1], dtype=tf.int64,\n default_value=-1),\n 'image/filename': tf.io.FixedLenFeature([], dtype=tf.string,\n default_value=\"\")\n }\n sparse_float32 = tf.io.VarLenFeature(dtype=tf.float32)\n # Sparse features in Example proto.\n feature_map.update(\n {k: sparse_float32 for k in ['image/object/bbox/xmin',\n 'image/object/bbox/ymin',\n 'image/object/bbox/xmax',\n 'image/object/bbox/ymax']})\n\n features = tf.io.parse_single_example(serialized=example_serialized, features=feature_map)\n label = tf.cast(features['image/class/label'], dtype=tf.int32)\n filename = tf.cast(features['image/filename'], dtype=tf.string)\n\n return features['image/encoded'], label, filename", "title": "" }, { "docid": "289db45332c94e04b173f3f898209ea3", "score": "0.648063", "text": "def dict_to_tf_example(data, dataset_directory, label_map_dict, image_subdirectory, annotation_path):\n\txml_tree = et.parse(annotation_path)\n\txml_root = xml_tree.getroot()\n\txml_objects = xml_root.findall(\"./object\")\n\tpolygons = []\n\timage_filename = xml_root.find('filename').text\n \n\tfull_path = os.path.join(image_subdirectory, image_filename)\n\t\n\twith tf.gfile.GFile(full_path, 'rb') as fid:\n\t\tencoded_jpg = fid.read()\n\n\tencoded_jpg_io = io.BytesIO(encoded_jpg)\n\timage = PIL.Image.open(encoded_jpg_io)\n\tif image.format != 'JPEG':\n\t\traise ValueError('Image format not JPEG')\n\t\n\tkey = hashlib.sha256(encoded_jpg).hexdigest()\n\n\twidth, height = image.size\n\n\txmin = []\n\tymin = []\n\txmax = []\n\tymax = []\n\tclasses = []\n\tclasses_text = []\n\ttruncated = []\n\tposes = []\n\tdifficult_obj = []\n\n\tif len(xml_objects) > 0:\n\t\tfor xml_object in xml_objects:\n\t\t\txml_polygon = xml_object.find('polygon')\n\t\t\tpolygon = []\n\t\t\tpoints = xml_polygon.iter('pt')\n\n\t\t\tdifficult = False\t# was genau bedeutet dass??? ist eine variable aus der xml!!!\n\t\t\tdifficult_obj.append(int(difficult))\n\n\t\t\tfor point in points:\n\t\t\t\tx = int(point.find('x').text)\n\t\t\t\ty = int(point.find('y').text)\n\n\t\t\t\tpolygon.append((x, y))\n \n\t\t\tmin_x = polygon[0][0]\n\t\t\tmin_y = polygon[0][1]\n\t\t\tmax_x = polygon[0][0]\n\t\t\tmax_y = polygon[0][1]\n \n\t\t\tfor point in polygon:\n\t\t\t\tmin_x = min(min_x, point[0])\n\t\t\t\tmin_y = min(min_y, point[1])\n\t\t\t\tmax_x = max(max_x, point[0])\n\t\t\t\tmax_y = max(max_y, point[1])\n\n\t\t\txmin.append(float(min_x) / width)\n\t\t\tymin.append(float(min_y) / height)\n\t\t\txmax.append(float(max_x) / width)\n\t\t\tymax.append(float(max_y) / height)\n\t\t\tclass_name = xml_object.find('name').text\n\t\t\tclasses_text.append(class_name.encode('utf8'))\n\t\t\tclasses.append(label_map_dict[class_name])\n\t\t\ttruncated.append(int(0))\n\t\t\tposes.append('Unspecified'.encode('utf8'))\n\n\texample = tf.train.Example(features=tf.train.Features(feature={\n\t\t'image/height': dataset_util.int64_feature(height),\n\t\t'image/width': dataset_util.int64_feature(width),\n\t\t'image/filename': dataset_util.bytes_feature(\n\t\t\tdata['filename'].encode('utf8')),\n\t\t'image/source_id': dataset_util.bytes_feature(\n\t\t\tdata['filename'].encode('utf8')),\n\t\t'image/key/sha256': dataset_util.bytes_feature(key.encode('utf8')),\n\t\t'image/encoded': dataset_util.bytes_feature(encoded_jpg),\n\t\t'image/format': dataset_util.bytes_feature('jpeg'.encode('utf8')),\n\t\t'image/object/bbox/xmin': dataset_util.float_list_feature(xmin),\n\t\t'image/object/bbox/xmax': dataset_util.float_list_feature(xmax),\n\t\t'image/object/bbox/ymin': dataset_util.float_list_feature(ymin),\n\t\t'image/object/bbox/ymax': dataset_util.float_list_feature(ymax),\n\t\t'image/object/class/text': dataset_util.bytes_list_feature(classes_text),\n\t\t'image/object/class/label': dataset_util.int64_list_feature(classes),\n\t\t'image/object/difficult': dataset_util.int64_list_feature(difficult_obj),\n\t\t'image/object/truncated': dataset_util.int64_list_feature(truncated),\n\t\t'image/object/view': dataset_util.bytes_list_feature(poses),\n\t}))\n\t\n\treturn example", "title": "" }, { "docid": "9145f8a203be1c03083882ca19d0ca90", "score": "0.61002815", "text": "def parse_example_proto(serialized_example: Text) -> Dict[Text, tf.Tensor]:\n features = {}\n for feature_name, feature_type in six.iteritems(\n collect_tensor_data.FEATURE_NAME_TO_TYPE):\n dtype = (\n tf.int64 if feature_type == collect_tensor_data.FeatureType.INT else # pylint: disable=g-long-ternary\n tf.float32 if feature_type == collect_tensor_data.FeatureType.FLOAT else\n tf.string if feature_type == collect_tensor_data.FeatureType.STRING else\n None)\n assert dtype is not None\n features[feature_name] = tf.io.VarLenFeature(dtype)\n parsed = tf.io.parse_single_example(serialized_example, features)\n for key in parsed:\n parsed[key] = tf.sparse.to_dense(parsed[key])\n return parsed", "title": "" }, { "docid": "3a4a8feeebeb2045c1d62af8310f7444", "score": "0.60377294", "text": "def _convert_to_example(image_data, superpixels, mask_instance, mask_class, shape, class_labels, class_labels_text, instance_labels):\n example = tf.train.Example(features=tf.train.Features(feature={\n 'image/height': _int64_feature(shape[0]),\n 'image/width': _int64_feature(shape[1]),\n 'image/channels': _int64_feature(shape[2]),\n #'image/shape': _int64_feature(shape),\n 'image/image_data':_bytes_feature(image_data.tostring()),\n 'image/superpixels':_bytes_feature(superpixels.tostring()),\n 'image/mask_instance':_bytes_feature(mask_instance.tostring()),\n 'image/mask_class':_bytes_feature(mask_class.tostring()),\n #'image/class_labels':_int64_feature(class_labels),\n #'image/instance_labels':_int64_feature(instance_labels)\n }))\n return example", "title": "" }, { "docid": "730ca0e38b40182f3808c09c725c61a8", "score": "0.59938943", "text": "def parse_example_proto_test_predict(example_proto):\n # Parse the input tf.train.Example proto using the dictionary above.\n example = tf.io.parse_single_example(example_proto, feature_descr)\n image = tf.io.parse_tensor(example[\"image\"], out_type=tf.uint8)\n # image = tf.io.decode_jpeg(example[\"image\"])\n image = tf.reshape(image, shape=[28, 28])\n image = tf.cast(image, tf.float32) / 255.\n # TODO - normalize the data here as well (so calc the mean and standard deviation)\n return image, example[\"label\"]", "title": "" }, { "docid": "94d03f6b4137105c083f9a547da4d690", "score": "0.59852034", "text": "def _get_example(self, data_dir: str) -> tf.train.Example:\n label_name_to_id = {name: i for (i, name) in self.label_map.items()}\n annotations_dir = os.path.join(data_dir, 'Annotations')\n images_dir = os.path.join(data_dir, 'images')\n all_annotation_paths = tf.io.gfile.glob(annotations_dir + r'/*.xml')\n\n for ind, ann_file in enumerate(all_annotation_paths):\n data = collections.defaultdict(list)\n tree = ET.parse(ann_file)\n root = tree.getroot()\n img_filename = _xml_get(root, 'filename').text\n img_file = os.path.join(images_dir, img_filename)\n with tf.io.gfile.GFile(img_file, 'rb') as fid:\n encoded_jpg = fid.read()\n image = tf.io.decode_jpeg(encoded_jpg, channels=3)\n height, width, _ = image.shape\n for child in root.iter('object'):\n category_name = _xml_get(child, 'name').text\n category_id = label_name_to_id[category_name]\n bndbox = _xml_get(child, 'bndbox')\n xmin = float(_xml_get(bndbox, 'xmin').text)\n xmax = float(_xml_get(bndbox, 'xmax').text)\n ymin = float(_xml_get(bndbox, 'ymin').text)\n ymax = float(_xml_get(bndbox, 'ymax').text)\n if xmax <= xmin or ymax <= ymin or xmax > width or ymax > height:\n # Skip annotations that have no area or are larger than the image\n continue\n data['xmin'].append(xmin / width)\n data['ymin'].append(ymin / height)\n data['xmax'].append(xmax / width)\n data['ymax'].append(ymax / height)\n data['category_id'].append(category_id)\n if not data['xmin']:\n # Skip examples which have no valid annotations\n continue\n feature_dict = tfrecord_lib.image_info_to_feature_dict(\n height, width, img_filename, ind, encoded_jpg, 'jpg'\n )\n bbox_feature_dict = _bbox_data_to_feature_dict(data)\n feature_dict.update(bbox_feature_dict)\n example = tf.train.Example(\n features=tf.train.Features(feature=feature_dict)\n )\n yield example", "title": "" }, { "docid": "fa3fc1bcb35fcbd7df8d435e05202f4d", "score": "0.59497213", "text": "def parse_example_proto(example_proto):\n # Parse the input tf.train.Example proto using the dictionary above.\n example = tf.io.parse_single_example(example_proto, feature_descr)\n image = tf.io.parse_tensor(example[\"image\"], out_type=tf.uint8)\n # image = tf.io.decode_jpeg(example[\"image\"])\n image = tf.reshape(image, shape=[28, 28])\n return image, example[\"label\"]", "title": "" }, { "docid": "29a9d0a52c71520983d5274f20f01d94", "score": "0.5816439", "text": "def _parse_example(serialized_example):\n\t\tfeature = tf.parse_single_example(serialized_example, \n\t\t\t\tfeatures={'label': tf.FixedLenFeature([], tf.float32),\n\t\t\t\t\t\t'image': tf.FixedLenFeature([], tf.string)})\n\t\t# Reinterpret the bytes of a string (from the file) as a vector of numbers.\n\t\timg = tf.decode_raw(feature['image'], tf.uint8)\n\t\t# reshape the image to proper shape\n\t\timg = tf.reshape(img, [28, 28, 1])\n\t\t# cast image data type to tf.float32 and normalize the image\n\t\timg = tf.cast(img, tf.float32) * (1. / 255) - 0.5\n\t\t# return a tuple\n\t\treturn feature['label'], img", "title": "" }, { "docid": "6456a0d6fb42dd3a226b1326624476f7", "score": "0.5808176", "text": "def _parse_example(serialized_example):\n data_fields = {\n \"inputs\": tf.VarLenFeature(tf.int64),\n \"targets\": tf.VarLenFeature(tf.int64)\n }\n parsed = tf.parse_single_example(serialized_example, data_fields)\n inputs = tf.sparse_tensor_to_dense(parsed[\"inputs\"])\n targets = tf.sparse_tensor_to_dense(parsed[\"targets\"])\n return inputs, targets", "title": "" }, { "docid": "8e8241e6662b7cddae37515d5382a400", "score": "0.5766105", "text": "def example_parser(serialized_example):\r\n features = tf.parse_single_example(\r\n serialized_example,\r\n features={\r\n 'image_raw': tf.FixedLenFeature([], tf.string),\r\n 'label': tf.FixedLenFeature([], tf.int64),\r\n })\r\n image = tf.decode_raw(features['image_raw'], tf.uint8)\r\n image.set_shape([28 * 28])\r\n\r\n # Normalize the values of the image from the range [0, 255] to [-0.5, 0.5]\r\n image = tf.cast(image, tf.float32) / 255 - 0.5\r\n label = tf.cast(features['label'], tf.int32)\r\n return image, tf.one_hot(label, 10)", "title": "" }, { "docid": "a74919fb8c68c5f0821facbc6f310876", "score": "0.57250744", "text": "def parse_tfrecord_example(serialized: bytes) -> typing.Tuple[tf.Tensor, tf.Tensor]:\n features = {\n \"feature/value\": tf.io.FixedLenFeature(shape=(), dtype=tf.string),\n \"label/value\": tf.io.FixedLenFeature(shape=(), dtype=tf.string),\n }\n example = tf.io.parse_single_example(serialized, features)\n x = tf.io.decode_raw(example[\"feature/value\"], feature_dtype)\n y = tf.io.decode_raw(example[\"label/value\"], label_dtype)\n # The shapes are encoded in the TFRecord file, but we cannot use\n # them dynamically (aka reshape according to the shape in this example).\n if feature_shape is not None:\n x = tf.reshape(x, shape=feature_shape)\n if label_shape is not None:\n y = tf.reshape(y, shape=label_shape)\n return x, y", "title": "" }, { "docid": "1af3e4b01d4f30da24458458d5cc41ef", "score": "0.5707062", "text": "def _parse_tf_imp_dict(example_proto):\n image_feature_description = {\n 'height': tf.io.FixedLenFeature([], tf.int64),\n 'width': tf.io.FixedLenFeature([], tf.int64),\n 'depth': tf.io.FixedLenFeature([], tf.int64),\n 'label': tf.io.FixedLenFeature([], tf.int64),\n 'class_label': tf.io.FixedLenFeature([], tf.string),\n 'image_name': tf.io.FixedLenFeature([], tf.string),\n 'image_raw': tf.io.FixedLenFeature([], tf.string)}\n\n return tf.io.parse_single_example(example_proto, image_feature_description)", "title": "" }, { "docid": "8d3f604dc20a0862101eb377493b56b9", "score": "0.56823385", "text": "def dict_to_tf_example(data,\n dataset_directory,\n label_map_dict,\n ignore_difficult_instances=False,\n image_subdirectory='JPEGImages',\n augment = 0):\n img_path = os.path.join(data['folder'],image_subdirectory,data['filename'])\n full_path = os.path.join(dataset_directory,img_path)\n image = misc.imread(full_path)\n image = misc.imresize(image,[FLAGS.image_size,FLAGS.image_size,3])\n\n width = int(data['size']['width'])\n height = int(data['size']['height'])\n \n filename = data['filename'].encode('utf8')\n\n ymin = []\n xmin = []\n xmax = []\n ymax = []\n classes = []\n classes_text = []\n truncated = []\n poses = []\n difficult_obj = []\n for obj in data['object']:\n difficult = bool(int(obj['difficult']))\n if ignore_difficult_instances and difficult:\n continue\n \n difficult_obj.append(int(difficult))\n \n xmin.append(np.round(float(obj['bndbox']['xmin']) / width * FLAGS.image_size))\n ymin.append(np.round(float(obj['bndbox']['ymin']) / height * FLAGS.image_size))\n xmax.append(np.round(float(obj['bndbox']['xmax']) / width * FLAGS.image_size))\n ymax.append(np.round(float(obj['bndbox']['ymax']) / height * FLAGS.image_size))\n classes_text.append(obj['name'].encode('utf8'))\n classes.append(label_map_dict[obj['name']])\n truncated.append(int(obj['truncated']))\n poses.append(obj['pose'].encode('utf8'))\n \n\n\n #rotate\n if augment == 1:\n \n r = image[:,:,0]\n g = image[:,:,1]\n b = image[:,:,2]\n r = np.transpose(r)\n g = np.transpose(g)\n b = np.transpose(b)\n\n image = np.stack((r,g,b),axis = 2).astype('uint8')\n\n \n image_raw = image.tostring()\n \n xmin = np.array(xmin)\n ymin = np.array(ymin)\n xmax = np.array(xmax)\n ymax = np.array(ymax)\n\n _xmin = ymin\n _ymin = FLAGS.image_size - xmax\n _xmax = ymax\n _ymax = FLAGS.image_size - xmin\n \n return make_tf_example(filename,height,width,image_raw,_xmin,_ymin,_xmax,_ymax,classes_text,classes,difficult_obj,truncated,poses)\n\n #crop\n if augment == 2:\n \n\n ind = random.randint(0,len(xmin)-1)\n xmin = np.array(xmin)\n ymin = np.array(ymin)\n xmax = np.array(xmax)\n ymax = np.array(ymax)\n\n cx = (xmin[ind] + xmax[ind]) / 2.0\n cy = (ymin[ind] + ymax[ind]) / 2.0\n cw = xmax[ind] - xmin[ind] + 1\n ch = ymax[ind] - ymin[ind] + 1\n\n crop_size = np.maximum(cw,ch) + random.randint(0,FLAGS.image_size - np.maximum(cw,ch))\n assert crop_size <= 300,'crop size too large'\n\n cx_min = np.round(cx - crop_size / 2)\n cy_min = np.round(cy - crop_size / 2)\n cx_max = np.round(cx + crop_size / 2)\n cy_max = np.round(cy + crop_size / 2)\n\n if cx_min < 0:\n cx_min = 0\n if cy_min < 0:\n cy_min = 0\n if cx_max > FLAGS.image_size - 1:\n cx_max = FLAGS.image_size - 1\n if cy_max > FLAGS.image_size - 1:\n cy_max = FLAGS.image_size - 1\n\n _h = cy_max - cy_min + 1\n _w = cx_max - cx_min + 1\n \n _xmin = xmin\n _ymin = ymin\n _xmax = xmax\n _ymax = ymax\n \n \n\n keep_inds = []\n for i in range(len(xmin)):\n \n t_xmin = _xmin[i]\n t_ymin = _ymin[i]\n t_xmax = _xmax[i]\n t_ymax = _ymax[i]\n if _xmin[i] < cx_min:\n t_xmin = cx_min\n if _ymin[i] < cy_min:\n t_ymin = cy_min\n if _xmax[i] > cx_max:\n t_xmax = cx_max\n if _ymax[i] > cy_max:\n t_ymax = cy_max\n \n t_w = t_xmax - t_xmin + 1\n t_h = t_ymax - t_ymin + 1\n\n t_a = t_w * t_h\n o_a = (_xmax[i] - _xmin[i] + 1) * (_ymax[i] - _ymin[i] + 1)\n\n if t_w > 0 and t_h > 0 and o_a * 1.0 / t_a < 2:\n\n _xmin[i] = t_xmin\n _xmax[i] = t_xmax\n _ymin[i] = t_ymin\n _ymax[i] = t_ymax\n keep_inds.append(i)\n\n assert len(keep_inds) != 0,'no box in image'\n\n _xmin = np.round((_xmin[keep_inds] - cx_min) / _w * FLAGS.image_size)\n _ymin = np.round((_ymin[keep_inds] - cy_min) / _h * FLAGS.image_size)\n _xmax = np.round((_xmax[keep_inds] - cx_min) / _w * FLAGS.image_size)\n _ymax = np.round((_ymax[keep_inds] - cy_min) / _h * FLAGS.image_size)\n \n _classes = np.array(classes)[keep_inds]\n\n image = image[int(cy_min):int(cy_max),int(cx_min):int(cx_max)]\n image = misc.imresize(image,[FLAGS.image_size,FLAGS.image_size,3])\n \n image_raw = image.tostring()\n\n return make_tf_example(filename,height,width,image_raw,_xmin,_ymin,_xmax,_ymax,classes_text,_classes,difficult_obj,truncated,poses)\n\n if augment == 3:\n image = image[:,::-1]\n _xmin = FLAGS.image_size - xmax\n _ymin = ymin\n _xmax = FLAGS.image_size - xmin\n _ymax = ymax\n image_raw = image.tostring()\n return make_tf_example(filename,height,width,image_raw,_xmin,_ymin,_xmax,_ymax,classes_text,classes,difficult_obj,truncated,poses)\n \n if augment == 4:\n channel = random.randint(0,2)\n for c in range(3):\n image[:,:,c] = image[:,:,channel]\n image_raw = image.tostring()\n return make_tf_example(filename,height,width,image_raw,xmin,ymin,xmax,ymax,classes_text,classes,difficult_obj,truncated,poses)\n \n\n \n image_raw = image.tostring()\n return make_tf_example(filename,height,width,image_raw,xmin,ymin,xmax,ymax,classes_text,classes,difficult_obj,truncated,poses)", "title": "" }, { "docid": "282fbc967da667f6df1296c0b8833834", "score": "0.56751895", "text": "def _parse_example(serialized, names, sparse_keys, dense_keys, dense_defaults,\n sparse_types, dense_shapes, name=None):\n result = _op_def_lib.apply_op(\"ParseExample\", serialized=serialized,\n names=names, sparse_keys=sparse_keys,\n dense_keys=dense_keys,\n dense_defaults=dense_defaults,\n sparse_types=sparse_types,\n dense_shapes=dense_shapes, name=name)\n return _ParseExampleOutput._make(result)", "title": "" }, { "docid": "2f1f7a87ca845b10b057f673890bb25a", "score": "0.56728077", "text": "def __parser__(self, example_proto):\r\n # configure feature and label length\r\n # It is crucial that for tf.string, the length is not specified, as the data is stored as a single string!\r\n x_config = tf.FixedLenFeature([], tf.string) \\\r\n if self.x_dtype == tf.string else tf.FixedLenFeature([self.num_features], self.x_dtype)\r\n if self.num_labels == 0:\r\n proto_config = {'x': x_config}\r\n else:\r\n y_config = tf.FixedLenFeature([], tf.string) \\\r\n if self.y_dtype == tf.string else tf.FixedLenFeature([self.num_labels], self.y_dtype)\r\n proto_config = {'x': x_config, 'y': y_config}\r\n\r\n # decode examples\r\n datum = tf.parse_single_example(example_proto, features=proto_config)\r\n if self.x_dtype == tf.string: # if input is string / bytes, decode it to float32\r\n # first decode data to uint8, as data is stored in this way\r\n datum['x'] = tf.decode_raw(datum['x'], tf.uint8)\r\n # then cast data to tf.float32\r\n datum['x'] = tf.cast(datum['x'], tf.float32)\r\n # cannot use string_to_number as there is only one string for a whole sample\r\n # datum['x'] = tf.strings.to_number(datum['x'], tf.float32) # this results in possibly a large number\r\n\r\n # return data\r\n if 'y' in datum:\r\n # y can be present in many ways:\r\n # 1. a single integer, which requires y to be int32 or int64 (e.g, used in tf.gather in cbn)\r\n # 2. num-class bool/integer/float variables. This form is more flexible as it allows multiple classes and\r\n # prior probabilities as targets\r\n # 3. float variables in regression problem.\r\n # but...\r\n # y is stored as int (for case 1), string (for other int cases), or float (for float cases)\r\n # in the case of tf.string and tf.int64, convert to to int32\r\n if self.y_dtype == tf.string:\r\n # avoid using string labels like 'cat', 'dog', use integers instead\r\n datum['y'] = tf.decode_raw(datum['y'], tf.uint8)\r\n datum['y'] = tf.cast(datum['y'], tf.int32)\r\n if self.y_dtype == tf.int64:\r\n datum['y'] = tf.cast(datum['y'], tf.int32)\r\n return datum['x'], datum['y']\r\n else:\r\n return datum['x']", "title": "" }, { "docid": "6ac5892572e31b8db56110e7efb43abd", "score": "0.56670314", "text": "def convert_to_example(image_path, boxes):\n with tf.gfile.GFile(image_path, 'rb') as fid:\n encoded_jpg = fid.read()\n encoded_jpg_io = io.BytesIO(encoded_jpg)\n image = PIL.Image.open(encoded_jpg_io)\n if image.format != 'JPEG':\n raise ValueError('Image format not jpeg')\n key = hashlib.sha256(encoded_jpg).hexdigest()\n\n (width, height) = image.size\n\n class_label = [b['class_label'].encode('utf8') for b in boxes]\n class_index = [b['class_index'] for b in boxes]\n ymin = [b['y_min'] for b in boxes]\n xmin = [b['x_min'] for b in boxes]\n ymax = [b['y_max'] for b in boxes]\n xmax = [b['x_max'] for b in boxes]\n\n example = tf.train.Example(features=tf.train.Features(feature={\n 'image/height': dataset_util.int64_feature(height),\n 'image/width': dataset_util.int64_feature(width),\n 'image/filename': dataset_util.bytes_feature(\n os.path.basename(image_path).encode('utf8')),\n 'image/source_id': dataset_util.bytes_feature(\n os.path.basename(image_path).encode('utf8')),\n 'image/key/sha256': dataset_util.bytes_feature(key.encode('utf8')),\n 'image/encoded': dataset_util.bytes_feature(encoded_jpg),\n 'image/format': dataset_util.bytes_feature('jpeg'.encode('utf8')),\n 'image/object/bbox/xmin': dataset_util.float_list_feature(xmin),\n 'image/object/bbox/xmax': dataset_util.float_list_feature(xmax),\n 'image/object/bbox/ymin': dataset_util.float_list_feature(ymin),\n 'image/object/bbox/ymax': dataset_util.float_list_feature(ymax),\n 'image/object/class/text': dataset_util.bytes_list_feature(class_label),\n 'image/object/class/label': dataset_util.int64_list_feature(class_index),\n }))\n\n return example", "title": "" }, { "docid": "11f169f3e177cbde7c82f6bb501a1504", "score": "0.56046087", "text": "def tiny_imagenet_parse(serialized_example):\n\n # This works for tf_slim model: resnet_50_v2 but NOT for Keras VGG16\n # Dense features in Example proto.\n feature_map = {\n 'height': tf.compat.v1.FixedLenFeature((), tf.int64),\n 'width': tf.compat.v1.FixedLenFeature((), tf.int64),\n 'channel': tf.compat.v1.FixedLenFeature((), tf.int64),\n 'label': tf.compat.v1.FixedLenFeature((), tf.int64),\n 'image_raw': tf.compat.v1.FixedLenFeature((), tf.string),\n 'location_raw': tf.compat.v1.FixedLenFeature((), tf.string)}\n\n features = tf.compat.v1.parse_single_example(serialized_example, feature_map)\n\n image_raw = tf.compat.v1.decode_raw(features[\"image_raw\"], tf.uint8)\n image = tf.reshape(image_raw, [64, 64, 3])\n\n return image", "title": "" }, { "docid": "fd489236afd8813c98ff9958f78db2b9", "score": "0.5597152", "text": "def create_example(data_dict):\n data_dict = {k: v for k, v in data_dict.iteritems() if v is not None}\n return tf.train.Example(\n # Example contains a Features proto object\n features=tf.train.Features(\n # Features has a map of string to Feature proto objects\n feature=data_dict\n )\n )", "title": "" }, { "docid": "ecf9e6e8683c1d849a2e8161e59733db", "score": "0.5584214", "text": "def _parse_function(example_proto):\n features = {\n \"image_raw\": tf.FixedLenFeature((), tf.string, default_value=\"\"),\n \"label\": tf.FixedLenFeature((), tf.int64),\n }\n parsed_features = tf.parse_single_example(example_proto, features)\n output_features = {\n \"image\": tf.reshape(\n tf.decode_raw(parsed_features[\"image_raw\"], tf.float32),\n [28, 28],\n ),\n }\n labels = tf.cast(parsed_features[\"label\"], tf.int32)\n # Returns a tuple (features, labels)\n return output_features, labels", "title": "" }, { "docid": "7f5b36874e45dd2d01cd87c32bfffc85", "score": "0.55545396", "text": "def serialize_example(*args):\n # Create a dictionary mapping the feature name to the tf.train.Example-compatible\n # data type.\n feature = {}\n for i, val in enumerate(args):\n if val.dtype in [tf.int32, tf.int64]:\n casted_val = _int64_feature(val)\n elif val.dtype in [tf.float16, tf.float32, tf.float64]:\n casted_val = _float_feature(val)\n else:\n casted_val = _bytes_feature(val)\n \n key = feature_name[i]\n feature[key] = casted_val\n \n # Create a Features message using tf.train.Example\n example_proto = tf.train.Example(\n features=tf.train.Features(feature=feature))\n return example_proto.SerializeToString()", "title": "" }, { "docid": "f486bc455218242b8931448146290be9", "score": "0.5492443", "text": "def parser(serialized_example):\n\n final_image = None\n final_label = None\n features = tf.parse_single_example(\n serialized_example,\n features={\n \"image/encoded\": tf.FixedLenFeature([], tf.string),\n \"image/class/label\": tf.FixedLenFeature([], tf.int64),\n })\n image = tf.image.decode_jpeg(features[\"image/encoded\"], channels=3)\n image = tf.image.convert_image_dtype(image, dtype=tf.float32)\n image = tf.image.resize_images(\n image,\n size=[224, 224])\n final_label = tf.cast(features[\"image/class/label\"], tf.int32)\n\n\n final_image = (tf.cast(image, tf.float32) * (1. / 255)) - 0.5\n\n #image = tf.image.convert_image_dtype(image, dtype=tf.float32)\n #final_image = image_preprocessing_fn(\n # image=image,\n # output_height=224,\n # output_width=224,\n # is_training=True)\n return final_image, tf.one_hot(final_label, FLAGS.num_classes)", "title": "" }, { "docid": "57245832a31bc6299c50601856b415ec", "score": "0.5466201", "text": "def parser(self, serialized_example):\n if self.test_small_sample:\n image = serialized_example\n label = tf.constant(0, tf.int32)\n else:\n features = tf.parse_single_example(\n serialized_example,\n features={\n 'raw_image':\n tf.FixedLenFeature((), tf.string, default_value=''),\n 'height':\n tf.FixedLenFeature([], dtype=tf.int64, default_value=-1),\n 'width':\n tf.FixedLenFeature([], dtype=tf.int64, default_value=-1),\n self.saliency_method:\n tf.VarLenFeature(tf.float32),\n 'label':\n tf.FixedLenFeature([], dtype=tf.int64, default_value=-1),\n 'prediction_class':\n tf.FixedLenFeature([], dtype=tf.int64, default_value=-1)\n })\n image = tf.image.decode_image(features['raw_image'], 3)\n image = tf.image.convert_image_dtype(image, dtype=tf.float32)\n\n saliency_heatmap = tf.expand_dims(features[self.saliency_method].values,\n 0)\n saliency_heatmap = tf.reshape(saliency_heatmap, IMAGE_DIMS)\n\n if self.transformation in ['modified_image', 'random_baseline']:\n # we apply test_time pre-processing to the raw image before modifying\n # according to the estimator ranking.\n image_preprocess = preprocess_image(\n image, image_size=IMAGE_DIMS[0], is_training=False)\n\n if self.transformation == 'modified_image':\n tf.logging.info('Computing feature importance estimate now...')\n image = compute_feature_ranking(\n input_image=image_preprocess,\n saliency_map=saliency_heatmap,\n threshold=self.threshold,\n global_mean=self.global_mean,\n rescale_heatmap=True,\n keep_information=self.keep_information,\n use_squared_value=self.use_squared_value)\n\n if self.transformation == 'random_baseline':\n tf.logging.info('generating a random baseline')\n image = random_ranking(\n input_image=image_preprocess,\n global_mean=self.global_mean,\n threshold=self.threshold,\n keep_information=self.keep_information)\n\n if self.mode == 'train':\n is_training = True\n else:\n is_training = False\n\n if self.transformation in ['random_baseline', 'modified_image']:\n tf.logging.info('starting pre-processing for training/eval')\n image = preprocess_image(\n image, image_size=IMAGE_DIMS[0], is_training=is_training)\n\n if self.transformation == 'raw_image':\n tf.logging.info('starting pre-processing for training/eval')\n image = preprocess_image(\n image, image_size=IMAGE_DIMS[0], is_training=is_training)\n\n label = tf.cast(tf.reshape(features['label'], shape=[]), dtype=tf.int32)\n\n return image, label", "title": "" }, { "docid": "ad511b4c50e8bd712c9e4060c7a254b5", "score": "0.54530054", "text": "def parser(self, example):\n parsed_example = tf.io.parse_single_example(\n example, {\"data\": tf.io.FixedLenFeature([], tf.string)})\n return tf.reshape(\n tf.io.decode_raw(parsed_example[\"data\"], tf.float32),\n self.input_shape)", "title": "" }, { "docid": "2c31a5ea235ed75d40346c9dc13394f2", "score": "0.54363275", "text": "def dict_to_example(dictionary):\n features = {}\n for k, v in six.iteritems(dictionary):\n features[k] = tf.train.Feature(int64_list=tf.train.Int64List(value=v))\n return tf.train.Example(features=tf.train.Features(feature=features))", "title": "" }, { "docid": "1bc21b82569ab3fa253e4a65cab60325", "score": "0.54182523", "text": "def _parse_function(proto):\n keys_to_features = {'train/image': tf.FixedLenFeature([], tf.string),\n 'train/label': tf.FixedLenFeature([], tf.int64)}\n\n parsed_features = tf.parse_single_example(proto, keys_to_features)\n parsed_features['train/image'] = tf.decode_raw(parsed_features['train/image'], tf.float32)\n\n return parsed_features['train/image'], parsed_features[\"train/label\"]", "title": "" }, { "docid": "2ddfa990be8391d676a66fbf6c74aa7e", "score": "0.5415006", "text": "def read_from_example(serialized_ex):\n coder = ImageCoder()\n example = tf.train.Example()\n example.ParseFromString(serialized_ex)\n features = example.features.feature\n\n # Load features from example.\n N = features['meta/N'].int64_list.value[0]\n im_datas = features['image/encoded'].bytes_list.value\n centers = features['image/centers'].int64_list.value\n xys = features['image/xys'].float_list.value\n face_pts = features['image/face_pts'].float_list.value\n toe_pts = features['image/toe_pts'].float_list.value\n vis = features['image/visibilities'].int64_list.value\n scales = np.array(features['image/scale_factors'].float_list.value)\n gt3ds = features['mosh/gt3ds'].float_list.value\n poses = features['mosh/poses'].float_list.value\n shape = features['mosh/shape'].float_list.value\n time_pts = features['meta/time_pts'].int64_list.value\n start_pts = np.array(features['image/crop_pts'].int64_list.value)\n im_shapes = features['image/heightwidths'].int64_list.value\n im_paths = features['image/filenames'].bytes_list.value\n\n # Process and reshape features.\n images = [coder.decode_jpeg(im_data) for im_data in im_datas]\n centers = np.array(centers).reshape((N, 2))\n gt3ds = np.array(gt3ds).reshape((N, -1, 3))\n gt3ds = gt3ds[:, :14] # Don't want toes_pts or face_pts\n xys = np.array(xys).reshape((N, 2, 14))\n vis = np.array(vis, dtype=np.float).reshape((N, 1, 14))\n face_pts = np.array(face_pts).reshape((N, 3, 5))\n toe_pts = np.array(toe_pts).reshape((N, 3, 6))\n kps = np.dstack((\n np.hstack((xys, vis)),\n face_pts,\n toe_pts,\n ))\n kps = np.transpose(kps, axes=[0, 2, 1])\n poses = np.array(poses).reshape((N, 24, 3))\n shape = np.array(shape)\n start_pts = np.array(start_pts).reshape((N, 2))\n im_shapes = np.array(im_shapes).reshape((N, 2))\n\n return {\n 'N': N,\n 'centers': centers,\n 'kps': kps,\n 'gt3ds': gt3ds,\n 'images': images,\n 'im_shapes': im_shapes,\n 'im_paths': im_paths,\n 'poses': poses,\n 'scales': scales,\n 'shape': shape,\n 'start_pts': start_pts,\n 'time_pts': time_pts,\n }", "title": "" }, { "docid": "156ca2f7da01dd300e97a57889999b43", "score": "0.5363978", "text": "def decode(self, serialized_example):\n sparse = self.raw_features(serialized_example)\n features = {}\n for k, v in sparse.items():\n is_sparse = isinstance(self.specs.get(k, None), tf.io.VarLenFeature)\n features[k] = tf.sparse.to_dense(v) if is_sparse else v\n\n result = {}\n for k, v in features.items():\n if v.dtype == tf.string and v.shape.rank > 0 and v.shape[0] == 1:\n parsed = v[0]\n else:\n parsed = v\n parsed = parsed.numpy() if self._to_numpy else parsed\n parsed = parsed.decode() if isinstance(parsed, bytes) else parsed\n # Enforces the final shapes if possible.\n shape = self._shapes.get(k, None)\n parsed = tf.ensure_shape(parsed, shape) if shape is not None else parsed\n result[k] = parsed\n return result", "title": "" }, { "docid": "c3b3bf239334f9198d6489f649296cde", "score": "0.53601253", "text": "def parse(image, transcript, label):\n\t\toutput = {\n\t\t\t'image' : tf.train.Feature(bytes_list = tf.train.BytesList(value = [image])),\n\t\t\t'transcripts' : tf.train.Feature(bytes_list = tf.train.BytesList(value = [transcript])),\n\t\t\t'label' : tf.train.Feature(bytes_list = tf.train.BytesList(value = [label]))}\n\t\t\n\t\treturn tf.train.Example(features = tf.train.Features(feature = output)).SerializeToString()", "title": "" }, { "docid": "b351c50f2e30b5a236aa79e307bb121f", "score": "0.5356347", "text": "def _parse_example(\n self, example: tf.Tensor\n ) -> Tuple[Dict[str, tf.Tensor], tf.Tensor]:\n # do parsing on the cpu\n with tf.device(\"/cpu:0\"):\n # define input shapes\n # TODO: update this for your data set\n features = {\n \"image\": tf.FixedLenFeature(shape=[28, 28, 1], dtype=tf.float32),\n \"label\": tf.FixedLenFeature(shape=[1], dtype=tf.int64),\n }\n example = tf.parse_single_example(example, features=features)\n\n # only augment training data\n if self.mode == \"train\":\n input_data = self._augment(example[\"image\"])\n else:\n input_data = example[\"image\"]\n\n return {\"input\": input_data}, example[\"label\"]", "title": "" }, { "docid": "6a5d00bfab624a90140562766bfd209c", "score": "0.5338674", "text": "def _decode_record(record, name_to_features, schema_tensors):\n\n example = tf.parse_single_example(record, name_to_features)\n\n # tf.Example only supports tf.int64, but the TPU only supports tf.int32.\n # So cast all int64 to int32.\n for name in list(example.keys()):\n t = example[name]\n if t.dtype == tf.int64:\n t = tf.cast(t, tf.int32)\n example[name] = t\n\n # Here we need to insert schema's entity embedding to each example.\n\n # Shapes for reference: (all have type tf.float32)\n # \"cat_slot_emb\": [max_num_cat_slot, hidden_dim]\n # \"cat_slot_value_emb\": [max_num_cat_slot, max_num_value, hidden_dim]\n # \"noncat_slot_emb\": [max_num_noncat_slot, hidden_dim]\n # \"req_slot_emb\": [max_num_total_slot, hidden_dim]\n # \"intent_emb\": [max_num_intent, hidden_dim]\n\n service_id = example[\"service_id\"]\n for key, value in schema_tensors.items():\n example[key] = value[service_id]\n return example", "title": "" }, { "docid": "e5639b0140e8ef5316ea1edfd58a73d3", "score": "0.53354055", "text": "def imagenet_parse(serialized_example):\n dim = 224\n\n features = tf.compat.v1.parse_single_example(serialized_example,\n features={\n 'image/class/label': tf.FixedLenFeature([], tf.int64),\n 'image/encoded': tf.FixedLenFeature([], tf.string)})\n image_data = features['image/encoded']\n\n # Decode the jpeg\n with tf.name_scope('prep_image', [image_data], None):\n # decode and reshape to default 224x224\n # pylint: disable=no-member\n image = tf.image.decode_jpeg(image_data, channels=3)\n image = tf.image.convert_image_dtype(image, dtype=tf.float32)\n image = tf.image.resize_images(image, [dim, dim])\n\n return image", "title": "" }, { "docid": "ded3eb3fc2325d5a0ff4d07fabd2b37d", "score": "0.5334299", "text": "def decode_example(protos, params):\n dtype_map = {\n \"str\": tf.string,\n \"int\": tf.int64,\n \"float\": tf.float32\n }\n\n # Create feature schema map for protos.\n tf_example_features = {\n feat[\"name\"]: (\n tf.io.FixedLenFeature(\n shape=feat[\"shape\"], dtype=dtype_map[feat[\"dtype\"]]\n )\n if feat[\"type\"] == \"FixedLen\"\n else tf.io.FixedLenSequenceFeature(\n shape=feat[\"shape\"], dtype=dtype_map[feat[\"dtype\"]]\n )\n )\n for feat in params[\"tf_record_example_schema\"]\n }\n\n # Parse features from tf.Example.\n parsed_features = tf.io.parse_single_example(\n serialized=protos, features=tf_example_features\n )\n\n # Convert from a scalar string tensor (whose single string has\n # length height * width * depth) to a uint8 tensor with shape\n # [height * width * depth].\n if params[\"image_encoding\"] == \"raw\":\n image = tf.io.decode_raw(\n input_bytes=parsed_features[params[\"image_feature_name\"]],\n out_type=tf.uint8\n )\n elif params[\"image_encoding\"] == \"png\":\n image = tf.io.decode_png(\n contents=parsed_features[params[\"image_feature_name\"]],\n channels=params[\"image_depth\"]\n )\n elif params[\"image_encoding\"] == \"jpeg\":\n image = tf.io.decode_jpeg(\n contents=parsed_features[params[\"image_feature_name\"]],\n channels=params[\"image_depth\"]\n )\n\n # Reshape flattened image back into normal dimensions.\n image = tf.reshape(\n tensor=image,\n shape=[\n params[\"image_height\"],\n params[\"image_width\"],\n params[\"image_depth\"]\n ]\n )\n\n return image", "title": "" }, { "docid": "1cd551e3f34b8c7eda233098d14120cc", "score": "0.533174", "text": "def _decode_raw_protobuf_string(self, protobuf_string):\n keys_to_features = _get_keys_to_features()\n tensor_dict = tf.parse_single_example(protobuf_string, keys_to_features)\n return {'images': tf.image.decode_jpeg(tensor_dict['image'], channels=3),\n 'labels': tf.image.decode_png(tensor_dict['label'], channels=1)}", "title": "" }, { "docid": "7a5f7f0f0557e28e7eb26a4f93308a56", "score": "0.53313845", "text": "def create_tf_example(data_dict,\n label_map\n ):\n encoded_jpg = img.resize_jpeg((data_dict['images'][0]['Path']), 1000)\n encoded_jpg_io = io.BytesIO(encoded_jpg)\n image = Image.open(encoded_jpg_io)\n width, height = image.size\n width = int(width)\n height = int(height)\n\n filename = data_dict['images'][0]['Path'].encode('utf-8')\n image_format = b'jpg'\n xmins, xmaxs, ymins, ymaxs = [], [], [], []\n classes_text, classes = [], []\n\n for bb_record in data_dict['images'][0]['observations']:\n xmins.append(float(bb_record['bb_xmin']))\n xmaxs.append(float(bb_record['bb_xmax']))\n ymins.append(float(bb_record['bb_ymin']))\n ymaxs.append(float(bb_record['bb_ymax']))\n classes_text.append(bb_record['bb_primary_label'].encode('utf8'))\n classes.append(label_map[bb_record['bb_primary_label']])\n\n tf_example = tf.train.Example(features=tf.train.Features(feature={\n 'image/height': dataset_util.int64_feature(height),\n 'image/width': dataset_util.int64_feature(width),\n 'image/filename': dataset_util.bytes_feature(filename),\n 'image/source_id': dataset_util.bytes_feature(filename),\n 'image/encoded': dataset_util.bytes_feature(encoded_jpg),\n 'image/format': dataset_util.bytes_feature(image_format),\n 'image/object/bbox/xmin': dataset_util.float_list_feature(xmins),\n 'image/object/bbox/xmax': dataset_util.float_list_feature(xmaxs),\n 'image/object/bbox/ymin': dataset_util.float_list_feature(ymins),\n 'image/object/bbox/ymax': dataset_util.float_list_feature(ymaxs),\n 'image/object/class/text': dataset_util.bytes_list_feature(classes_text),\n 'image/object/class/label': dataset_util.int64_list_feature(classes),\n }))\n return tf_example", "title": "" }, { "docid": "2bd6d341d2bb99c196ee6c1c7ba72ab5", "score": "0.5331023", "text": "def RowToExample(self, instance: Dict[str, Any]) -> tf.train.Example:\n return utils.row_to_example(self._type_map, instance)", "title": "" }, { "docid": "08b8f8c86916df0559cc83bc207af123", "score": "0.5307692", "text": "def parse_record(serialized_example):\n features = tf.io.parse_single_example(\n serialized_example,\n features={\n 'height': tf.io.FixedLenFeature([], tf.int64),\n 'width': tf.io.FixedLenFeature([], tf.int64),\n 'image_id': tf.io.FixedLenFeature([], tf.int64),\n 'landmarks': tf.io.FixedLenFeature([], tf.string),\n 'image_name': tf.io.FixedLenFeature([], tf.string),\n 'image_jpeg': tf.io.FixedLenFeature([], tf.string),\n 'format': tf.io.FixedLenFeature([], tf.string)\n })\n height = tf.cast(features['height'], tf.int32)\n width = tf.cast(features['width'], tf.int32)\n image_id = tf.cast(features['image_id'], tf.int32)\n image_name = tf.cast(features['image_name'], tf.string)\n landmarks = tf.cast(features['landmarks'], tf.string)\n image = tf.cast(tf.image.decode_jpeg(features['image_jpeg'], channels=3), tf.uint8)\n image_shape = tf.stack([height, width, 3])\n image = tf.reshape(image, image_shape)\n image_info = {'image_name': image_name, 'image_id': image_id, 'landmarks': landmarks}\n return [image, image_info]", "title": "" }, { "docid": "c36f94c6039d2c926f4dc2e1c8bfcb59", "score": "0.5302261", "text": "def _get_example(self, data_dir: str) -> tf.train.Example:\n data_dir = os.path.abspath(data_dir)\n # Process labels.json file\n label_file = os.path.join(data_dir, 'labels.json')\n with open(label_file, 'r') as f:\n data = json.load(f)\n\n # Load all Annotations\n img_to_annotations = collections.defaultdict(list)\n for annotation in data['annotations']:\n image_id = annotation['image_id']\n img_to_annotations[image_id].append(annotation)\n\n # For each Image:\n for image in data['images']:\n img_id = image['id']\n file_name = image['file_name']\n full_path = os.path.join(data_dir, 'images', file_name)\n with tf.io.gfile.GFile(full_path, 'rb') as fid:\n encoded_jpg = fid.read()\n image = tf.io.decode_jpeg(encoded_jpg, channels=3)\n height, width, _ = image.shape\n feature_dict = tfrecord_lib.image_info_to_feature_dict(\n height, width, file_name, img_id, encoded_jpg, 'jpg'\n )\n data, _ = _coco_annotations_to_lists(\n img_to_annotations[img_id], height, width\n )\n if not data['xmin']:\n # Skip examples which have no annotations\n continue\n bbox_feature_dict = _bbox_data_to_feature_dict(data)\n feature_dict.update(bbox_feature_dict)\n example = tf.train.Example(\n features=tf.train.Features(feature=feature_dict)\n )\n yield example", "title": "" }, { "docid": "f7ec32e054d5b70582f9fef0928f3260", "score": "0.5269521", "text": "def decode_example(self, tfexample_data):\n value, shape = self._get_value_and_shape(tfexample_data)\n if self._encoded_to_bytes:\n if self._encoding == Encoding.ZLIB:\n value = tf.io.decode_compressed(value, compression_type='ZLIB')\n value = tf.io.decode_raw(value, self.tf_dtype)\n value = tf.reshape(value, shape)\n\n return value", "title": "" }, { "docid": "0ffdfa8f030a0da6f2fbf42b85d3af14", "score": "0.5243228", "text": "def eval_single_example(model: Model, serialized_example: Text) -> Result:\n example_dict = parse_example_proto(serialized_example)\n for key in example_dict:\n example_dict[key] = tf.expand_dims(example_dict[key], axis=0)\n example = Example(**example_dict)\n return predict(model, example)", "title": "" }, { "docid": "e54db40b8f25af1ec9c4bccf0f85a04f", "score": "0.5240795", "text": "def serialize_example(image, label):\n # Create a dictionary mapping the feature name to the tf.Example-compatible\n # data type.\n feature = {\n 'image': tf.train.Feature(float_list=tf.train.FloatList(value=image)),\n 'label': tf.train.Feature(int64_list=tf.train.Int64List(value=[label])),\n }\n\n # Create a Features message using tf.train.Example.\n\n example_proto = tf.train.Example(features=tf.train.Features(feature=feature))\n return example_proto.SerializeToString()", "title": "" }, { "docid": "2abce4f90f2a92d3bbff1c0864b90066", "score": "0.5236489", "text": "def _parse_function(example_proto):\n dics = {\n 'image': tf.FixedLenFeature([], dtype=tf.string),\n 'image_shape': tf.FixedLenFeature(shape=(3, ), dtype=tf.int64),\n 'label': tf.FixedLenFeature([], dtype=tf.int64),\n }\n parsed_example = tf.parse_single_example(example_proto, features=dics)\n\n image = tf.reshape(tf.decode_raw(\n parsed_example['image'], tf.uint8), parsed_example['image_shape'])\n label = parsed_example['label']\n\n image = tf.cast(image, tf.float32)\n label = tf.cast(label, tf.float32)\n\n return image, label", "title": "" }, { "docid": "ca5180a33ac31aac95a4dc95a23aa47f", "score": "0.52364326", "text": "def parse_fn(drawit_proto):\n num_classes = 345\n\n features = {\"doodle\": tf.FixedLenFeature((28 * 28), dtype=tf.int64),\n \"class_index\": tf.FixedLenFeature((), tf.int64, default_value=0)}\n\n parsed_features = tf.parse_single_example(drawit_proto, features)\n\n labels = parsed_features[\"class_index\"]\n labels = tf.one_hot(labels, num_classes)\n\n features = parsed_features['doodle']\n\n features = tf.reshape(features, [28, 28, 1])\n features = tf.cast(features, tf.float32)\n\n features = (features / 127.5) - 1\n\n return features, labels", "title": "" }, { "docid": "b861c5a80ed06c8147ca761632654069", "score": "0.52164644", "text": "def _decode_record(record, name_to_features):\n example = tf.parse_single_example(record, name_to_features)\n\n input_ids = tf.sparse.to_dense(example[\"input_ids\"])\n input_ids = tf.reshape(input_ids, shape=[-1, input_dim])\n\n input_dicts = tf.sparse.to_dense(example[\"input_dicts\"])\n input_dicts = tf.reshape(input_dicts, shape=[-1, dict_dim])\n if augmenter is None or not is_training:\n example[\"input_ids\"], example[\"input_dicts\"] = input_ids, input_dicts\n else:\n example[\"input_ids\"], example[\"input_dicts\"] = augmenter.augment(input_ids, input_dicts)\n example[\"label_ids\"] = tf.sparse.to_dense(example[\"label_ids\"])\n example[\"label_ids\"] = tf.reshape(example[\"label_ids\"], shape=[-1])\n example[\"seq_length\"] = example[\"seq_length\"]\n\n return example", "title": "" }, { "docid": "7b0baaa27b4b7ce8b94e6dfbea78429b", "score": "0.5201054", "text": "def single_example_parser(serialized_example):\n # Dimensions of the images in the CIFAR-10 dataset.\n # See http://www.cs.toronto.edu/~kriz/cifar.html for a description of the\n # input format.\n features = tf.io.parse_single_example(\n serialized_example,\n features={\n 'image': tf.io.FixedLenFeature([], tf.string),\n 'label': tf.io.FixedLenFeature([], tf.int64),\n })\n image = tf.io.decode_raw(features['image'], tf.uint8)\n image.set_shape([DEPTH * HEIGHT * WIDTH])\n\n # Reshape from [depth * height * width] to [depth, height, width].\n image = tf.cast(\n tf.transpose(tf.reshape(image, [DEPTH, HEIGHT, WIDTH]), [1, 2, 0]),\n tf.float32)\n label = tf.cast(features['label'], tf.int32)\n \n # Resize the image to add four extra pixels on each side.\n image = tf.image.resize_with_crop_or_pad(image, HEIGHT + 8, WIDTH + 8)\n # Randomly crop a [HEIGHT, WIDTH] section of the image.\n image = tf.image.random_crop(image, [HEIGHT, WIDTH, DEPTH])\n # Randomly flip the image horizontally.\n image = tf.image.random_flip_left_right(image)\n \n label = tf.one_hot(label, NUM_CLASSES)\n return image, label", "title": "" }, { "docid": "1b19784a7017839b868fbce520b33084", "score": "0.51893854", "text": "def _serialize_example(x, y, x_dtype=np.uint8, y_dtype=np.uint8) -> bytes:\n\n def _bytes_feature(value):\n \"\"\"Returns a bytes_list from a string / byte.\"\"\"\n if isinstance(value, type(tf.constant(0))):\n value = (\n value.numpy()\n ) # BytesList won't unpack a string from an EagerTensor.\n return tf.train.Feature(bytes_list=tf.train.BytesList(value=[value]))\n\n def _int64_feature(value):\n \"\"\"Returns an int64_list from a bool / enum / int / uint.\"\"\"\n return tf.train.Feature(int64_list=tf.train.Int64List(value=[value]))\n\n x = np.asanyarray(x).astype(x_dtype)\n y = np.asanyarray(y).astype(y_dtype)\n\n # This is a dictionary used to construct a protobuf message.\n tf_feature = {\n \"feature/value\": _bytes_feature(x.tobytes()),\n \"feature/dtype\": _bytes_feature(x.dtype.name.encode()),\n \"feature/ndim\": _int64_feature(x.ndim),\n }\n # Add shape info. This part is dynamic because the array could have any\n # number of dimensions.\n tf_feature.update(\n {f\"feature/shape/{i}\": _int64_feature(s) for i, s in enumerate(x.shape)}\n )\n\n # Update with information about labels. We add label information after all\n # feature information has been added so that feature information all\n # stays together, and all label information stays together.\n # Otherwise, feature and label info would be interleaved.\n tf_feature.update(\n {\n \"label/value\": _bytes_feature(y.tobytes()),\n \"label/dtype\": _bytes_feature(y.dtype.name.encode()),\n \"label/ndim\": _int64_feature(y.ndim),\n }\n )\n tf_feature.update(\n {f\"label/shape/{i}\": _int64_feature(s) for i, s in enumerate(y.shape)}\n )\n\n example_proto = tf.train.Example(features=tf.train.Features(feature=tf_feature))\n\n return example_proto.SerializeToString()", "title": "" }, { "docid": "57a80f969a8f32dfe5dbfe831cd9f71d", "score": "0.51782936", "text": "def _decode_record(record, name_to_columns):\n example = tf.io.parse_example(serialized=record, features=name_to_columns)\n # tf.Example only supports tf.int64, but the TPU only supports tf.int32.\n # So cast all int64 to int32.\n for name in list(example.keys()):\n t = example[name]\n if t.dtype == tf.int64:\n t = tf.cast(t, dtype=tf.int32)\n example[name] = t\n return example", "title": "" }, { "docid": "c747e27e966cc69d407775951a7c2edd", "score": "0.5174566", "text": "def _example_parser(example: Dict[str, tf.Tensor]) -> Dict[str, tf.Tensor]:\n per_example_step_seed = tf.random.experimental.stateless_fold_in(\n self._seed, example[self._enumerate_id_key])\n if self._preprocessing_type == 'inception':\n # `inception_preprocessing.preprocess_image` returns images in [-1, 1].\n image = inception_preprocessing.preprocess_image(\n example['image'],\n height=self._image_size,\n width=self._image_size,\n seed=per_example_step_seed,\n is_training=self._is_training)\n # Rescale to [0, 1].\n image = (image + 1.0) / 2.0\n elif self._preprocessing_type == 'resnet':\n # `resnet_preprocessing.preprocess_image` returns images in [0, 1].\n image = resnet_preprocessing.preprocess_image(\n image_bytes=example['image'],\n is_training=self._is_training,\n use_bfloat16=self._use_bfloat16,\n image_size=self._image_size,\n seed=per_example_step_seed,\n resize_method=self._resnet_preprocessing_resize_method)\n else:\n raise ValueError(\n 'Invalid preprocessing type, must be one of \"inception\" or '\n '\"resnet\", received {}.'.format(self._preprocessing_type))\n\n if self._normalize_input:\n image = (tf.cast(image, tf.float32) - IMAGENET_MEAN) / IMAGENET_STDDEV\n if self._use_bfloat16:\n image = tf.cast(image, tf.bfloat16)\n\n # Note that labels are always float32, even when images are bfloat16.\n if self._one_hot:\n label = tf.one_hot(example['label'], 1000, dtype=tf.float32)\n else:\n label = tf.cast(example['label'], tf.float32)\n parsed_example = {\n 'features': image,\n 'labels': label,\n }\n if self._include_file_name and 'file_name' in example:\n parsed_example['file_name'] = example['file_name']\n return parsed_example", "title": "" }, { "docid": "05d7abbbe47aa135c13c44ad4d1e9387", "score": "0.516807", "text": "def convert_example(ex_index, example, label_list, max_seq_length,\n sub_tokenizer):\n label_map = {}\n for (i, label) in enumerate(label_list):\n label_map[label] = i\n\n input_ids = sub_tokenizer.encode(example.text)\n\n # Zero-pad up to the sequence length.\n if len(input_ids) > max_seq_length:\n input_ids = input_ids[0:max_seq_length]\n while len(input_ids) < max_seq_length:\n input_ids.append(0)\n\n assert len(input_ids) == max_seq_length\n\n label_id = label_map[example.label]\n if ex_index < 5:\n logging.info(\"*** Example ***\")\n logging.info(\"rid: %s\", example.eid)\n logging.info(\"tokens: %s\", sub_tokenizer.decode(input_ids))\n logging.info(\"input_ids: %s\", \" \".join([str(x) for x in input_ids]))\n logging.info(\"label: %s (id = %d)\", example.label, label_id)\n\n feature = InputFeatures(\n input_ids=input_ids,\n label_id=label_id)\n return feature", "title": "" }, { "docid": "b1e6d72f15e10f765634a11a6081411e", "score": "0.51467884", "text": "def create_tf_example(image_path,image_name,height,width,sign_type_num,x_min,y_min,x_max,y_max):\n \n # TODO(user): Populate the following variables from your example.\n\n filename = image_name # Filename of the image. Empty if image is not from file\n encoded_image_data =image_data = tf.gfile.FastGFile(image_path, 'rb').read()\n image_format = b'jpeg' # b'jpeg' or b'png'\n\n xmins = [x_min/ORIGINAL_WIDTH] # List of normalized left x coordinates in bounding box (1 per box)\n xmaxs = [x_max/ORIGINAL_WIDTH]# List of normalized right x coordinates in bounding box\n # (1 per box)\n ymins = [y_min/ORIGINAL_HEIGHT] # List of normalized top y coordinates in bounding box (1 per box)\n ymaxs = [y_max/ORIGINAL_HEIGHT] # List of normalized bottom y coordinates in bounding box\n # (1 per box)\n classes = [sign_type_num] # List of integer class id of bounding box (1 per box)\n classes_text =list(mydict.keys())[list(mydict.values()).index(sign_type_num)]\n print(classes_text)# List of string class name of bounding box (1 per box)\n\n tf_example = tf.train.Example(features=tf.train.Features(feature={\n 'image/height': dataset_util.int64_feature(height),\n 'image/width': dataset_util.int64_feature(width),\n 'image/filename': dataset_util.bytes_feature(filename.encode()),\n 'image/source_id': dataset_util.bytes_feature(filename.encode()),\n 'image/encoded': dataset_util.bytes_feature(encoded_image_data),\n 'image/format': dataset_util.bytes_feature(image_format),\n 'image/object/bbox/xmin': dataset_util.float_list_feature(xmins),\n 'image/object/bbox/xmax': dataset_util.float_list_feature(xmaxs),\n 'image/object/bbox/ymin': dataset_util.float_list_feature(ymins),\n 'image/object/bbox/ymax': dataset_util.float_list_feature(ymaxs),\n 'image/object/class/text': dataset_util.bytes_list_feature([classes_text.encode()]),\n 'image/object/class/label': dataset_util.int64_list_feature(classes),\n }))\n return tf_example", "title": "" }, { "docid": "babb8d82436c03b9f7c877b87c40c94e", "score": "0.5144022", "text": "def raw_features(self, serialized_example):\n ctx_specs = {}\n seq_specs = {}\n for k, v in self._specs.items():\n target_specs = seq_specs if k in self.sequence_keys else ctx_specs\n v = tf.io.VarLenFeature(v) if isinstance(v, tf.dtypes.DType) else v\n target_specs[k] = v\n context, sparse = tf.io.parse_single_sequence_example(\n serialized_example, ctx_specs, seq_specs)\n\n sparse.update(context)\n return sparse", "title": "" }, { "docid": "dbf592ead267e597c89d4ae46894e7ce", "score": "0.5137344", "text": "def _from_tensor(self, bboxes, sample=None):\n # for now, we can only unpack torchvision-format bbox dictionary lists (everything else will throw)\n assert isinstance(bboxes, list), \"input should be list since we do batch predictions\"\n if all([isinstance(d, dict) and len(d) == 3 and\n all([k in [\"boxes\", \"labels\", \"scores\"] for k in d]) for d in bboxes]):\n outputs = []\n for batch_idx, d in enumerate(bboxes):\n boxes = d[\"boxes\"].detach().cpu()\n labels = d[\"labels\"].detach().cpu()\n scores = d[\"scores\"].detach().cpu()\n assert boxes.shape[0] == labels.shape[0] and boxes.shape[0] == scores.shape[0], \"mismatched tensor dims\"\n curr_output = []\n for box_idx, box in enumerate(boxes):\n if sample is not None and self.task.gt_key in sample and sample[self.task.gt_key][batch_idx]:\n gt_box = sample[self.task.gt_key][batch_idx][0] # use first gt box to get image-level props\n out = thelper.data.BoundingBox(labels[box_idx].item(), box, confidence=scores[box_idx].item(),\n image_id=gt_box.image_id, task=self.task)\n\n elif sample is not None and \"idx\" in sample:\n out = thelper.data.BoundingBox(labels[box_idx].item(), box, confidence=scores[box_idx].item(),\n image_id=sample[\"idx\"][batch_idx], task=self.task)\n else:\n out = thelper.data.BoundingBox(labels[box_idx].item(), box, confidence=scores[box_idx].item(),\n task=self.task)\n curr_output.append(out)\n outputs.append(curr_output)\n return outputs\n raise AssertionError(\"unrecognized packed bboxes vector format\")", "title": "" }, { "docid": "b82d3570e0327d811a62607ce3fbcb51", "score": "0.51355875", "text": "def _parse_tfexample_fn(example_proto, mode):\n feature_to_type = {\n \"ink\": tf.VarLenFeature(dtype=tf.float32),\n \"shape\": tf.FixedLenFeature([2], dtype=tf.int64)\n }\n if mode != tf.estimator.ModeKeys.PREDICT:\n # The labels won't be available at inference time, so don't add them\n # to the list of feature_columns to be read.\n feature_to_type[\"class_index\"] = tf.FixedLenFeature([1], dtype=tf.int64)\n\n parsed_features = tf.parse_single_example(example_proto, feature_to_type)\n labels = None\n if mode != tf.estimator.ModeKeys.PREDICT:\n labels = parsed_features[\"class_index\"]\n parsed_features[\"ink\"] = tf.sparse_tensor_to_dense(parsed_features[\"ink\"])\n return parsed_features, labels", "title": "" }, { "docid": "1a7d9b9b4cb6d60648534e444821a4ad", "score": "0.5129206", "text": "def record_parser(value, preprocessor=None, max_classes=-1):\n keys_to_features = {\n 'image/height':\n tf.FixedLenFeature((), tf.int64, default_value=-1),\n 'image/width':\n tf.FixedLenFeature((), tf.int64, default_value=-1),\n 'image/channels':\n tf.FixedLenFeature((), tf.int64, default_value=-1),\n 'image/encoded':\n tf.FixedLenFeature((), tf.string, default_value=''),\n 'image/format':\n tf.FixedLenFeature((), tf.string, default_value='jpeg'),\n 'image/class/label':\n tf.FixedLenFeature([], dtype=tf.int64, default_value=-1),\n 'image/class/text':\n tf.FixedLenFeature([], dtype=tf.string, default_value=''),\n 'image/class/synset':\n tf.FixedLenFeature([], dtype=tf.string, default_value=''),\n 'image/object/number':\n tf.FixedLenFeature([], tf.int64, default_value=0),\n 'image/object/bbox/xmin':\n tf.VarLenFeature(dtype=tf.float32),\n 'image/object/bbox/ymin':\n tf.VarLenFeature(dtype=tf.float32),\n 'image/object/bbox/xmax':\n tf.VarLenFeature(dtype=tf.float32),\n 'image/object/bbox/ymax':\n tf.VarLenFeature(dtype=tf.float32),\n 'image/object/bbox/label':\n tf.VarLenFeature(dtype=tf.int64),\n }\n\n parsed = tf.parse_single_example(value, keys_to_features)\n\n image = tf.image.decode_jpeg(parsed['image/encoded'], channels=3)\n image = tf.image.convert_image_dtype(image, dtype=tf.float32)\n\n height, width = parsed['image/height'], parsed['image/width']\n if preprocessor is not None:\n image = preprocessor(image)\n\n label = tf.cast(\n tf.reshape(parsed['image/class/label'], shape=[]),\n dtype=tf.int32)\n text = parsed['image/class/text']\n synset = parsed['image/class/synset']\n\n # Load the bbox data\n num_bboxes = tf.cast(parsed['image/object/number'], tf.int32)\n xmin = tf.expand_dims(parsed['image/object/bbox/xmin'].values, 0)\n ymin = tf.expand_dims(parsed['image/object/bbox/ymin'].values, 0)\n xmax = tf.expand_dims(parsed['image/object/bbox/xmax'].values, 0)\n ymax = tf.expand_dims(parsed['image/object/bbox/ymax'].values, 0)\n bbox_coords = tf.concat(axis=0, values=[xmin, ymin, xmax, ymax])\n bbox_coords = tf.transpose(bbox_coords, [1, 0])\n\n bbox_labels = tf.sparse_tensor_to_dense(parsed['image/object/bbox/label'])\n\n return (image, height, width, label, text,\n synset, num_bboxes, bbox_coords, bbox_labels)", "title": "" }, { "docid": "4defc685c20f653cde472d18d6d04feb", "score": "0.51290375", "text": "def encode_example(example_dict: Mapping[str, Any]) -> Mapping[str, Any]:\n result_dict = dict()\n for k, v in example_dict.items():\n if isinstance(v, tf.Tensor):\n v = v.numpy()\n if isinstance(v, dict):\n for ki, vi in encode_example(v).items():\n result_dict[f\"{k}/{ki}\"] = vi\n elif isinstance(v, (np.ndarray, jnp.ndarray)):\n if v.dtype == np.uint8:\n # We encode images to png\n if v.ndim == 4:\n # Since encode_png accepts only a single image for a batch of images\n # we just stack them over their first axis.\n v = v.reshape((-1,) + v.shape[-2:])\n image_string = tf.image.encode_png(v).numpy()\n result_dict[k] = tf.train.Feature(\n bytes_list=tf.train.BytesList(value=[image_string]))\n elif v.dtype == np.int32:\n # int32 are promoted to int64\n value = v.reshape([-1]).astype(np.int64)\n result_dict[k] = tf.train.Feature(\n int64_list=tf.train.Int64List(value=value))\n else:\n # Since tf.Records do not support reading float64, here for any values\n # we interpret them as int64 and store them in this format, in order\n # when reading to be able to recover the float64 values.\n value = v.reshape([-1]).view(np.int64)\n result_dict[k] = tf.train.Feature(\n int64_list=tf.train.Int64List(value=value))\n else:\n raise NotImplementedError(f\"Currently the only supported feature types \"\n f\"are tf.Tensor, np.ndarray and jnp.ndarray. \"\n f\"Encountered value of type {type(v)}.\")\n return result_dict", "title": "" }, { "docid": "cdf8013c14ca90e11353a112e87ddd36", "score": "0.5120887", "text": "def _decode_record(record, name_to_features):\r\n example = tf.parse_single_example(record, name_to_features)\r\n\r\n # tf.Example only supports tf.int64, but the TPU only supports tf.int32.\r\n # So cast all int64 to int32.\r\n for name in list(example.keys()):\r\n t = example[name]\r\n if t.dtype == tf.int64:\r\n t = tf.to_int32(t)\r\n example[name] = t\r\n\r\n return example", "title": "" }, { "docid": "6e577a4bb265b2aa5546ac90f3bfd9be", "score": "0.511739", "text": "def _decode_record(record,\n name_to_features):\n example = tf.parse_single_example(record, name_to_features)\n \n # tf.Example only supports tf.int64, but the TPU only supports tf.int32. So cast all int64 to int32.\n for name in list(example.keys()):\n t = example[name]\n if t.dtype == tf.int64:\n t = tf.to_int32(t)\n example[name] = t\n \n return example", "title": "" }, { "docid": "ea43a1c054f1686b60dc110d04e3d38c", "score": "0.511166", "text": "def _decode_record(record, name_to_features):\n example = tf.parse_single_example(record, name_to_features)\n\n # tf.Example only supports tf.int64, but the TPU only supports tf.int32.\n # So cast all int64 to int32.\n for name in list(example.keys()):\n t = example[name]\n if t.dtype == tf.int64:\n t = tf.to_int32(t)\n example[name] = t\n\n return example", "title": "" }, { "docid": "ea43a1c054f1686b60dc110d04e3d38c", "score": "0.511166", "text": "def _decode_record(record, name_to_features):\n example = tf.parse_single_example(record, name_to_features)\n\n # tf.Example only supports tf.int64, but the TPU only supports tf.int32.\n # So cast all int64 to int32.\n for name in list(example.keys()):\n t = example[name]\n if t.dtype == tf.int64:\n t = tf.to_int32(t)\n example[name] = t\n\n return example", "title": "" }, { "docid": "ea43a1c054f1686b60dc110d04e3d38c", "score": "0.511166", "text": "def _decode_record(record, name_to_features):\n example = tf.parse_single_example(record, name_to_features)\n\n # tf.Example only supports tf.int64, but the TPU only supports tf.int32.\n # So cast all int64 to int32.\n for name in list(example.keys()):\n t = example[name]\n if t.dtype == tf.int64:\n t = tf.to_int32(t)\n example[name] = t\n\n return example", "title": "" }, { "docid": "ea43a1c054f1686b60dc110d04e3d38c", "score": "0.511166", "text": "def _decode_record(record, name_to_features):\n example = tf.parse_single_example(record, name_to_features)\n\n # tf.Example only supports tf.int64, but the TPU only supports tf.int32.\n # So cast all int64 to int32.\n for name in list(example.keys()):\n t = example[name]\n if t.dtype == tf.int64:\n t = tf.to_int32(t)\n example[name] = t\n\n return example", "title": "" }, { "docid": "fa73212bb8fff27972fd1fc4b922ffcc", "score": "0.50928694", "text": "def _decode_record(record, name_to_features=name_to_features):\n example = tf.io.parse_single_example(serialized=record, features=name_to_features)\n\n # tf.Example only supports tf.int64, but the TPU only supports tf.int32.\n # So cast all int64 to int32.\n for name in list(example.keys()):\n t = example[name]\n if name != 'unique_id': #t.dtype == tf.int64:\n t = tf.cast(t, dtype=tf.int32)\n example[name] = t\n\n return example", "title": "" }, { "docid": "72c679419e9bbf3133dca46e04d8f93f", "score": "0.5089781", "text": "def _decode_record(record, name_to_features):\n example = tf.parse_single_example(record, name_to_features)\n\n # tf.Example only supports tf.int64, but the TPU only supports tf.int32.\n # So cast all int64 to int32.\n for name in list(example.keys()):\n t = example[name]\n if t.dtype == tf.int64:\n t = tf.cast(t, tf.int32)\n example[name] = t\n\n return example", "title": "" }, { "docid": "2d9d8bdd5d8415f955443f10336b441e", "score": "0.50896466", "text": "def _decode_record(record, name_to_features):\n example = tf.parse_single_example(record, name_to_features)\n\n # tf.Example only supports tf.int64, but the TPU only supports tf.int32.\n # So cast all int64 to int32.\n for name in list(example.keys()):\n t = example[name]\n if t.dtype == tf.int64:\n t = tf.to_int32(t)\n example[name] = t\n\n return example", "title": "" }, { "docid": "2d9d8bdd5d8415f955443f10336b441e", "score": "0.50896466", "text": "def _decode_record(record, name_to_features):\n example = tf.parse_single_example(record, name_to_features)\n\n # tf.Example only supports tf.int64, but the TPU only supports tf.int32.\n # So cast all int64 to int32.\n for name in list(example.keys()):\n t = example[name]\n if t.dtype == tf.int64:\n t = tf.to_int32(t)\n example[name] = t\n\n return example", "title": "" }, { "docid": "a9c2196a1616027ee5e3cabff5467305", "score": "0.50870585", "text": "def _convert_to_example(file_path, image_buffer):\n file_name = file_path.split('/')[-2]\n #print(file_name)\n\n example = tf.train.Example(features=tf.train.Features(feature={\n 'label': _bytes_feature(tf.compat.as_bytes(file_name)),\n 'data': _bytes_feature(image_buffer)\n }))\n return example", "title": "" }, { "docid": "b0cea538dbfff600e1308115688d8704", "score": "0.50778425", "text": "def xml_as_tensor(xml_path, dst_img_size, name_converter, classes):\n tree = ET.parse(xml_path)\n size = tree.find('size')\n width = int(size.find('width').text)\n height = int(size.find('height').text)\n if height == 0 or width == 0:\n raise Exception\n\n h_ratio = dst_img_size / height\n w_ratio = dst_img_size / width\n\n label = np.zeros(shape=[dst_img_size, dst_img_size, len(classes)], dtype=np.float32)\n objs = tree.findall('object')\n\n for obj in objs:\n bbox = obj.find('bndbox')\n xmin = int(float(bbox.find('xmin').text) * w_ratio)\n xmax = int(float(bbox.find('xmax').text) * w_ratio)\n ymin = int(float(bbox.find('ymin').text) * h_ratio)\n ymax = int(float(bbox.find('ymax').text) * h_ratio)\n class_index = classes.index(name_converter[obj.find('name').text.lower().strip()])\n label[ymin: ymax, xmin: xmax, class_index] = 1\n\n return label", "title": "" }, { "docid": "22bb002a0671b8070cbe5cb7f7222699", "score": "0.50776637", "text": "def _decode_record(record, name_to_features):\n example = tf.io.parse_single_example(record, name_to_features)\n\n # tf.Example only supports tf.int64, but the TPU only supports tf.int32.\n # So cast all int64 to int32.\n for name in list(example.keys()):\n t = example[name]\n if t.dtype == tf.int64:\n t = tf.cast(t, dtype=tf.int32)\n example[name] = t\n\n return example", "title": "" }, { "docid": "7a57d2e3ffb3909b693eb3f0b1e4137c", "score": "0.5075211", "text": "def create_tf_example(filename, label0, labels, signature_vs_others):\n image_format = b'jpg'\n\n with open(filename, 'rb') as image:\n f = image.read()\n encoded_image_data = bytes(f)\n\n width, height = label0['asset']['size']['width'], label0['asset']['size']['height']\n regions = label0['regions']\n\n xmins = []\n xmaxs = []\n ymins = []\n ymaxs = []\n classes_text = []\n classes = []\n\n for bbox in regions:\n if bbox['tags'][0] not in labels:\n continue\n\n # checking if bbox coordinates are correct:\n\n w, h = bbox['boundingBox']['width'], bbox['boundingBox']['height']\n assert (bbox['boundingBox']['left'] == bbox['points'][0]['x'])\n assert (bbox['boundingBox']['top'] == bbox['points'][0]['y'])\n assert (bbox['boundingBox']['left'] + w - bbox['points'][2]['x'] <= 0.0001)\n assert (bbox['boundingBox']['top'] + h - bbox['points'][2]['y'] <= 0.0001)\n\n xmins.append(bbox['points'][0]['x'] / width)\n xmaxs.append(bbox['points'][2]['x'] / width)\n ymins.append(bbox['points'][0]['y'] / height)\n ymaxs.append(bbox['points'][2]['y'] / height)\n\n if signature_vs_others:\n if bbox['tags'][0] == 'signature':\n classes_text.append('signature'.encode('utf-8'))\n classes.append(1)\n else:\n classes_text.append('others'.encode('utf-8'))\n classes.append(2)\n else:\n if bbox['tags'][0] in labels:\n idx = labels.index(bbox['tags'][0]) + 1\n classes_text.append(bbox['tags'][0].encode('utf-8'))\n classes.append(idx)\n\n tf_example = tf.train.Example(features=tf.train.Features(feature={\n 'image/height': dataset_util.int64_feature(height),\n 'image/width': dataset_util.int64_feature(width),\n 'image/filename': dataset_util.bytes_feature(filename),\n 'image/source_id': dataset_util.bytes_feature(filename),\n 'image/encoded': dataset_util.bytes_feature(encoded_image_data),\n 'image/format': dataset_util.bytes_feature(image_format),\n 'image/object/bbox/xmin': dataset_util.float_list_feature(xmins),\n 'image/object/bbox/xmax': dataset_util.float_list_feature(xmaxs),\n 'image/object/bbox/ymin': dataset_util.float_list_feature(ymins),\n 'image/object/bbox/ymax': dataset_util.float_list_feature(ymaxs),\n 'image/object/class/text': dataset_util.bytes_list_feature(classes_text),\n 'image/object/class/label': dataset_util.int64_list_feature(classes),\n }))\n\n return tf_example", "title": "" }, { "docid": "f767bb31b97b3e05e6caa15daba24367", "score": "0.5075196", "text": "def dict_to_tf_example(filename,\n mask_path,\n label_map_dict,\n img_path):\n with tf.gfile.GFile(img_path, 'rb') as fid:\n encoded_jpg = fid.read()\n encoded_jpg_io = io.BytesIO(encoded_jpg)\n image = PIL.Image.open(encoded_jpg_io)\n width = np.asarray(image).shape[1]\n height = np.asarray(image).shape[0]\n if image.format != 'JPEG':\n raise ValueError('Image format not JPEG')\n key = hashlib.sha256(encoded_jpg).hexdigest()\n\n with tf.gfile.GFile(mask_path, 'rb') as fid:\n encoded_mask_png = fid.read()\n encoded_png_io = io.BytesIO(encoded_mask_png)\n mask = PIL.Image.open(encoded_png_io)\n mask_np = np.asarray(mask.convert('L'))\n if mask.format != 'PNG':\n raise ValueError('Mask format not PNG')\n\n xmins = []\n ymins = []\n xmaxs = []\n ymaxs = []\n classes = []\n classes_text = []\n truncated = []\n poses = []\n difficult_obj = []\n masks = []\n\n cv2.imshow(\"origin\", mask_np)\n cv2.imwrite('origin.png', mask_np)\n\n for k in list(mask_pixel.keys()):\n class_name = k\n\n pixel_vals = mask_pixel[class_name]\n\n for pixel_val in pixel_vals: \n print('for pixel val#:', pixel_val) \n \n mask_copy = mask_np.copy()\n mask_copy[mask_np == pixel_val] = 255\n ret,thresh = cv2.threshold(mask_copy, 254, 255, cv2.THRESH_BINARY)\n (_, conts, _) = cv2.findContours(thresh.copy(), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)\n\n index = 0\n if conts != None:\n for c in conts:\n #rect = cv2.boundingRect(c)\n x, y, w, h = cv2.boundingRect(c)\n xmin = float(x)\n xmax = float(x+w)\n ymin = float(y)\n ymax = float(y+h)\n xmins.append(xmin / width)\n ymins.append(ymin / height)\n xmaxs.append(xmax / width)\n ymaxs.append(ymax / height)\n print(filename, 'bounding box for', class_name, xmin, xmax, ymin, ymax)\n\n classes_text.append(class_name.encode('utf8'))\n classes.append(label_map_dict[class_name])\n\n mask_np_black = mask_np*0\n cv2.drawContours(mask_np_black, [c], -1, (255,255,255), cv2.FILLED)\n\n mask_remapped = (mask_np_black == 255).astype(np.uint8)\n masks.append(mask_remapped)\n\n feature_dict = {\n 'image/height': dataset_util.int64_feature(height),\n 'image/width': dataset_util.int64_feature(width),\n 'image/filename': dataset_util.bytes_feature(\n filename.encode('utf8')),\n 'image/source_id': dataset_util.bytes_feature(\n filename.encode('utf8')),\n 'image/key/sha256': dataset_util.bytes_feature(key.encode('utf8')),\n 'image/encoded': dataset_util.bytes_feature(encoded_jpg),\n 'image/format': dataset_util.bytes_feature('jpeg'.encode('utf8')),\n 'image/object/bbox/xmin': dataset_util.float_list_feature(xmins),\n 'image/object/bbox/xmax': dataset_util.float_list_feature(xmaxs),\n 'image/object/bbox/ymin': dataset_util.float_list_feature(ymins),\n 'image/object/bbox/ymax': dataset_util.float_list_feature(ymaxs),\n 'image/object/class/text': dataset_util.bytes_list_feature(classes_text),\n 'image/object/class/label': dataset_util.int64_list_feature(classes),\n 'image/object/difficult': dataset_util.int64_list_feature(difficult_obj),\n 'image/object/truncated': dataset_util.int64_list_feature(truncated),\n 'image/object/view': dataset_util.bytes_list_feature(poses),\n }\n\n encoded_mask_png_list = []\n for mask in masks:\n img = PIL.Image.fromarray(mask)\n output = io.BytesIO()\n img.save(output, format='PNG')\n encoded_mask_png_list.append(output.getvalue())\n feature_dict['image/object/mask'] = (dataset_util.bytes_list_feature(encoded_mask_png_list))\n\n example = tf.train.Example(features=tf.train.Features(feature=feature_dict))\n return example", "title": "" }, { "docid": "587a5a1a63a0bd8366e9a800f7fa3293", "score": "0.5069893", "text": "def _decode_record(record, name_to_features):\n example = tf.parse_single_example(record, name_to_features)\n\n # tf.Example only supports tf.int64, but the TPU only supports tf.int32.\n # So cast all int64 to int32.\n for name in list(example.keys()):\n t = example[name]\n if t.dtype == tf.int64:\n t = tf.cast(t, tf.int32)\n example[name] = t\n\n return example", "title": "" }, { "docid": "7cd8aaec4d376c29df0efb471341034b", "score": "0.50648224", "text": "def read_and_convert(self):\n if self._example_pointer == self._num_examples:\n return None\n image = self._images[self._example_pointer].tostring()\n label = int(self._labels[self._example_pointer])\n self._example_pointer += 1\n\n example = tf.train.Example(features=tf.train.Features(feature={\n 'image': ExampleReader._bytes_feature(image),\n 'label': ExampleReader._int64_feature(label)\n }))\n return example", "title": "" }, { "docid": "f37ecc2a81002a3c8b75045ae7bbd434", "score": "0.50617594", "text": "def _serialize_feat(self, index, distortion, distorted_image, reference_image, dmos, dmos_std):\n example_proto = tf.train.Example(features=tf.train.Features(feature={\n \"index\": _int64_feature(int(index)),\n \"distortion\": _bytes_feature(bytes(distortion, 'utf-8')),\n \"distorted_image\": _bytes_feature(distorted_image),\n \"reference_image\": _bytes_feature(reference_image),\n \"dmos\": _float_feature(float(dmos)),\n \"dmos_std\": _float_feature(float(dmos_std))\n }))\n return example_proto.SerializeToString()", "title": "" }, { "docid": "a6bfc8e5da7b9c93347317d4a4a32ec1", "score": "0.50461507", "text": "def serve_tf_examples_fn(serialized_tf_examples):\n reshaped_examples = tf.reshape(serialized_tf_examples, [-1, 1])\n transformed_features = model.tft_layer({_FEATURE_KEY: reshaped_examples})\n\n outputs = model(transformed_features)\n return {'outputs': outputs}", "title": "" }, { "docid": "4921d6ef1a1c4a400e896e180639269d", "score": "0.5040913", "text": "def _bbox_data_to_feature_dict(data):\n bbox_feature_dict = {\n 'image/object/bbox/xmin': tfrecord_lib.convert_to_feature(data['xmin']),\n 'image/object/bbox/xmax': tfrecord_lib.convert_to_feature(data['xmax']),\n 'image/object/bbox/ymin': tfrecord_lib.convert_to_feature(data['ymin']),\n 'image/object/bbox/ymax': tfrecord_lib.convert_to_feature(data['ymax']),\n 'image/object/class/label': tfrecord_lib.convert_to_feature(\n data['category_id']\n ),\n }\n return bbox_feature_dict", "title": "" }, { "docid": "b36568b7512c85cade134a3b23f777e6", "score": "0.5039481", "text": "def as_tf_example(example):\n return tf.train.Example(features=tf.train.Features(feature={\n 'sequence': tf.train.Feature(\n bytes_list=tf.train.BytesList(value=[example['sequence']])),\n 'mutation_sequence': tf.train.Feature(\n bytes_list=tf.train.BytesList(value=[example['mutation_sequence']])),\n 'partition': tf.train.Feature(\n bytes_list=tf.train.BytesList(value=[example['partition']])),\n 'is_viable': tf.train.Feature(\n int64_list=tf.train.Int64List(value=[int(example['is_viable'])])),\n 'num_mutations': tf.train.Feature(\n int64_list=tf.train.Int64List(value=[example['num_mutations']])),\n 'viral_selection': tf.train.Feature(\n float_list=tf.train.FloatList(value=[example['viral_selection']])),\n }))", "title": "" }, { "docid": "69deae93818b97d18fc55a569f026d08", "score": "0.50203973", "text": "def preprocess_data(sample):\r\n image = sample[\"image\"]\r\n bbox = swap_xy(sample[\"objects\"][\"bbox\"])\r\n class_id = tf.cast(sample[\"objects\"][\"label\"], dtype=tf.int32)\r\n\r\n image, bbox = random_flip_horizontal(image, bbox)\r\n image, image_shape, _ = resize_and_pad_image(image)\r\n\r\n bbox = tf.stack(\r\n [\r\n bbox[:, 0] * image_shape[1],\r\n bbox[:, 1] * image_shape[0],\r\n bbox[:, 2] * image_shape[1],\r\n bbox[:, 3] * image_shape[0],\r\n ],\r\n axis=-1,\r\n )\r\n bbox = convert_to_xywh(bbox)\r\n return image, bbox, class_id", "title": "" }, { "docid": "d384a83b6f67ff40c41916c8c4006047", "score": "0.50134623", "text": "def parse_example_spec(self):\n return {\n self.key:\n parsing_ops.FixedLenFeature(self.shape, self.dtype,\n self.default_value)\n }", "title": "" }, { "docid": "21f6cae6c99c3f078a7f0ecfe1f9bb68", "score": "0.5012778", "text": "def dict_to_tf_example(image_path, data, label_map_dict):\n with tf.gfile.GFile(image_path, 'rb') as fid:\n encoded_jpg = fid.read()\n encoded_jpg_io = io.BytesIO(encoded_jpg)\n image = Image.open(encoded_jpg_io)\n if image.format != 'JPEG' and image.format != 'PNG':\n raise ValueError('Image format not JPEG or PNG')\n key = hashlib.sha256(encoded_jpg).hexdigest()\n width, height = image.size\n #if width != 1600 and height != 1200:\n # print(width, height)\n image_format = os.path.splitext(image_path)[1]\n xmin = []\n ymin = []\n xmax = []\n ymax = []\n classes = []\n classes_text = []\n difficult = []\n for vehicle in data['det_results']:\n anno = vehicle\n x_min = max(anno['x_min'], 0)\n y_min = max(anno['y_min'], 0)\n x_max = anno['x_max']\n y_max = anno['y_max']\n xmin.append(float(x_min) / width)\n ymin.append(float(y_min) / height)\n xmax.append(float(x_max) / width)\n ymax.append(float(y_max) / height)\n vehicle_category = vehicle['class_id']\n #print(vehicle_category)\n category_width = x_max - x_min\n vehicle_category = min(vehicle_category, 1)\n classes.append(vehicle_category + 1)\n if vehicle_category == 0:\n classes_text.append(bytes('head', encoding='utf-8'))\n else:\n classes_text.append(bytes('rear', encoding='utf-8'))\n if 'NotUse' in vehicle['types'] or category_width < 240:\n difficult.append(int(True))\n else:\n difficult.append(int(False))\n global pics, gts, simple, hard\n pics += 1\n gts += len(data['det_results'])\n simple += difficult.count(False)\n hard += difficult.count(True)\n #height = 240\n #width = 320\n boxes = np.stack([xmin, ymin, xmax, ymax], axis=-1)\n difficult = np.asarray(difficult, dtype=np.int32)\n classes = np.asarray(classes, dtype=np.int32)\n #target_size = [height, width]\n #image = image.resize((width, height), Image.ANTIALIAS)\n #image, boxes = transform_img_and_boxes(image, boxes, target_size)\n xmin = list(boxes[:, 0])\n ymin = list(boxes[:, 1])\n xmax = list(boxes[:, 2])\n ymax = list(boxes[:, 3])\n #image = image.resize((width, height), Image.ANTIALIAS)\n temp_io = io.BytesIO()\n example = tf.train.Example(\n features=tf.train.Features(\n feature={\n 'filename': tf.train.Feature(bytes_list=tf.train.BytesList(value=[bytes(image_path, encoding='utf-8')])),\n 'boxes': tf.train.Feature(bytes_list=tf.train.BytesList(value=[boxes.tostring()])),\n 'is_crowd': tf.train.Feature(bytes_list=tf.train.BytesList(value=[difficult.tostring()])),\n 'class' : tf.train.Feature(bytes_list=tf.train.BytesList(value=[classes.tostring()])) \n }))\n return example", "title": "" }, { "docid": "71584a4aa3b7b412b4676a25c5e0ae80", "score": "0.5006463", "text": "def convert_to(data, num_examples, filename,\n features = {\n 'image': {'in_width': 512, 'width': 512},\n 'label': {'in_width': 512, 'width': 512}\n }):\n\n s_rows = features['image'].in_width\n t_rows = features['label'].in_width\n\n print('Writing', filename)\n writer = tf.python_io.TFRecordWriter(filename)\n\n\n search_raw = np.asarray(image*255, dtype=np.bool_).tostring()\n temp_raw = np.asarray(label*255, dtype=np.bool_).tostring()\n\n ex = tf.train.Example(features=tf.train.Features(feature={\n 'image': _bytes_feature(search_raw),\n 'label': _bytes_feature(temp_raw),}))\n\n writer.write(ex.SerializeToString())\n\n writer.close()", "title": "" }, { "docid": "5c825d3d39696e9fb46cad57b40d82c2", "score": "0.5006309", "text": "def serialize_example(feature0, feature1, feature2, feature3):\n # Create a dictionary mapping the feature name to the tf.Example-compatible\n # data type.\n feature = {\n 'feature0': _int64_feature(feature0),\n 'feature1': _int64_feature(feature1),\n 'feature2': _bytes_feature(feature2),\n 'feature3': _float_feature(feature3),\n }\n\n # Create a Features message using tf.train.Example.\n\n example_proto = tf.train.Example(features=tf.train.Features(feature=feature))\n return example_proto.SerializeToString()", "title": "" }, { "docid": "fefdb93a86415a85f4c98de90840e758", "score": "0.49977002", "text": "def _preprocess_structured_data(\n features: Features, label: int) -> testbed_base.Data:\n # features can be a dict of numeric features or a single numeric feature\n if isinstance(features, Dict):\n features = tf.concat(\n [tf.cast(tf.expand_dims(x, -1), tf.float64) for x in features.values()],\n axis=0)\n else:\n features = tf.cast(features, tf.float64)\n\n features = _standardize_data(features)\n chex.assert_shape(features, (features.shape[0],))\n chex.assert_shape(label, ())\n label = tf.expand_dims(label, -1)\n\n return testbed_base.Data(x=features, y=label)", "title": "" }, { "docid": "5e1f0b4fbaa17558772e7997421900d0", "score": "0.49938083", "text": "def parse_sequence_example(self,serialized, image_feature, \n caption_feature, mask_feature,cls_lbl_feature):\n context, sequence = tf.parse_single_sequence_example(\n serialized,\n context_features={\n image_feature: tf.FixedLenFeature([], dtype=tf.string)\n },\n sequence_features={\n caption_feature: tf.FixedLenSequenceFeature([], dtype=tf.int64),\n mask_feature: tf.FixedLenSequenceFeature([],dtype=tf.float32),\n cls_lbl_feature:tf.FixedLenSequenceFeature([], dtype=tf.int64)\n })\n\n encoded_image = context[image_feature]\n caption = sequence[caption_feature]\n mask = sequence[mask_feature]\n cls_lbl = sequence[cls_lbl_feature]\n return encoded_image, caption, mask,cls_lbl", "title": "" }, { "docid": "aad3f2bdd749699c481a4479c4a21fcd", "score": "0.4992735", "text": "def preprocess_data(sample):\r\n image = sample[\"image\"]\r\n bbox = ioc.swap_xy(sample[\"objects\"][\"bbox\"])\r\n class_id = tf.cast(sample[\"objects\"][\"label\"], dtype=tf.int32)\r\n\r\n image, bbox = random_flip_horizontal(image, bbox)\r\n image, image_shape, _ = ioc.resize_and_pad_image(image)\r\n\r\n bbox = tf.stack(\r\n [\r\n bbox[:, 0] * image_shape[1],\r\n bbox[:, 1] * image_shape[0],\r\n bbox[:, 2] * image_shape[1],\r\n bbox[:, 3] * image_shape[0],\r\n ],\r\n axis=-1,\r\n )\r\n bbox = ioc.convert_to_xywh(bbox)\r\n return image, bbox, class_id", "title": "" }, { "docid": "590b4a389563a0d79632f82fd8a133d5", "score": "0.49884203", "text": "def _convert_to_example(file_path, image_buffer):\r\n file_name = file_path.split('/')[-1]\r\n\r\n example = tf.train.Example(features=tf.train.Features(feature={\r\n 'image/file_name': _bytes_feature(tf.compat.as_bytes(os.path.basename(file_name))),\r\n 'image/encoded_image': _bytes_feature((image_buffer))\r\n }))\r\n return example", "title": "" }, { "docid": "b1dc7f729c6b01e3c3d1a47d7d79e77f", "score": "0.49774525", "text": "def _serialize_feat(self, index, distortion, distorted_image, reference_image, dmos,\n dmos_realigned, dmos_realigned_std):\n example_proto = tf.train.Example(features=tf.train.Features(feature={\n \"index\": _int64_feature(int(index)),\n \"distortion\": _bytes_feature(bytes(distortion, 'utf-8')),\n \"distorted_image\": _bytes_feature(distorted_image),\n \"reference_image\": _bytes_feature(reference_image),\n \"dmos\": _float_feature(float(dmos)),\n \"dmos_realigned\": _float_feature(float(dmos_realigned)),\n \"dmos_realigned_std\": _float_feature(float(dmos_realigned_std))\n }))\n return example_proto.SerializeToString()", "title": "" }, { "docid": "a99a02d6e035f3a64f78b72170a0a6eb", "score": "0.4963192", "text": "def _convert_tf_record_schema_json_to_dict(tf_record_json_schema):\n example = {}\n for key, value in tf_record_json_schema.items():\n if value[\n constants.FEATURE_STORE.TF_RECORD_SCHEMA_FEATURE] == constants.FEATURE_STORE.TF_RECORD_SCHEMA_FEATURE_FIXED and \\\n value[\n constants.FEATURE_STORE.TF_RECORD_SCHEMA_TYPE] == constants.FEATURE_STORE.TF_RECORD_INT_TYPE:\n example[str(key)] = tf.FixedLenFeature([], tf.int64)\n if value[\n constants.FEATURE_STORE.TF_RECORD_SCHEMA_FEATURE] == constants.FEATURE_STORE.TF_RECORD_SCHEMA_FEATURE_FIXED and \\\n value[\n constants.FEATURE_STORE.TF_RECORD_SCHEMA_TYPE] == constants.FEATURE_STORE.TF_RECORD_FLOAT_TYPE:\n example[str(key)] = tf.FixedLenFeature([], tf.float32)\n if value[\n constants.FEATURE_STORE.TF_RECORD_SCHEMA_FEATURE] == constants.FEATURE_STORE.TF_RECORD_SCHEMA_FEATURE_FIXED and \\\n value[\n constants.FEATURE_STORE.TF_RECORD_SCHEMA_TYPE] == constants.FEATURE_STORE.TF_RECORD_STRING_TYPE:\n example[str(key)] = tf.FixedLenFeature([], tf.float32)\n if value[\n constants.FEATURE_STORE.TF_RECORD_SCHEMA_FEATURE] == constants.FEATURE_STORE.TF_RECORD_SCHEMA_FEATURE_VAR and \\\n value[\n constants.FEATURE_STORE.TF_RECORD_SCHEMA_TYPE] == constants.FEATURE_STORE.TF_RECORD_INT_TYPE:\n example[str(key)] = tf.VarLenFeature(tf.int64)\n if value[\n constants.FEATURE_STORE.TF_RECORD_SCHEMA_FEATURE] == constants.FEATURE_STORE.TF_RECORD_SCHEMA_FEATURE_VAR and \\\n value[\n constants.FEATURE_STORE.TF_RECORD_SCHEMA_TYPE] == constants.FEATURE_STORE.TF_RECORD_FLOAT_TYPE:\n example[str(key)] = tf.VarLenFeature(tf.float32)\n if value[\n constants.FEATURE_STORE.TF_RECORD_SCHEMA_FEATURE] == constants.FEATURE_STORE.TF_RECORD_SCHEMA_FEATURE_VAR and \\\n value[\n constants.FEATURE_STORE.TF_RECORD_SCHEMA_TYPE] == constants.FEATURE_STORE.TF_RECORD_STRING_TYPE:\n example[str(key)] = tf.VarLenFeature(tf.string)\n return example", "title": "" }, { "docid": "9fb13049778281131a9b9c9f1bed5e5a", "score": "0.49611977", "text": "def _serialize_feat(self, index, image, mos, score_dist, challenge, tags, *args, **kwargs):\n example_proto = tf.train.Example(features=tf.train.Features(feature={\n \"index\": _int64_feature(index),\n \"image\": _bytes_feature(image),\n \"mos\": _float_feature(mos),\n \"score_dist\": _bytes_feature(score_dist),\n \"tags\": _bytes_feature(tags),\n \"challenge\": _bytes_feature(challenge),\n }))\n return example_proto.SerializeToString()", "title": "" }, { "docid": "e18473c8eeee95a8f8f5ad332a0d5caf", "score": "0.49608102", "text": "def _parse_fn(example: tf.Tensor):\n label_defaults = [[0.0]]\n dense_defaults = [\n [0.0] for _ in range(self._num_dense_features)\n ]\n num_sparse_features = len(self._vocab_sizes)\n categorical_defaults = [\n [0] for _ in range(num_sparse_features)\n ]\n record_defaults = label_defaults + dense_defaults + categorical_defaults\n fields = tf.io.decode_csv(\n example, record_defaults, field_delim='\\t', na_value='-1')\n\n num_labels = 1\n label = tf.reshape(fields[0], [batch_size, 1])\n\n features = {}\n num_dense = len(dense_defaults)\n\n dense_features = []\n offset = num_labels\n for idx in range(num_dense):\n dense_features.append(fields[idx + offset])\n features['dense_features'] = tf.stack(dense_features, axis=1)\n\n offset += num_dense\n features['sparse_features'] = {}\n\n for idx in range(num_sparse_features):\n features['sparse_features'][str(idx)] = fields[idx + offset]\n\n return features, label", "title": "" }, { "docid": "eb8efa9e2e9d7492df33a84a5b031ce4", "score": "0.49575472", "text": "def _serialize_feat(self, distorted_image, reference_image, mos):\n example_proto = tf.train.Example(features=tf.train.Features(\n feature={\n \"distorted_image\": _bytes_feature(distorted_image),\n \"reference_image\": _bytes_feature(reference_image),\n \"mos\": _float_feature(float(mos))\n }))\n return example_proto.SerializeToString()", "title": "" }, { "docid": "d2348644401417bb9f9f678becadd416", "score": "0.4944183", "text": "def parse(image, transcript):\n\t\toutput = {\n\t\t\t'image' : tf.train.Feature(bytes_list = tf.train.BytesList(value = [image])),\n\t\t\t'transcripts' : tf.train.Feature(bytes_list = tf.train.BytesList(value = [transcript]))\n\t\t}\n\t\treturn tf.train.Example(features = tf.train.Features(feature = output)).SerializeToString()", "title": "" }, { "docid": "25bc563d9fc42a4a0fd938f281b0da05", "score": "0.49422956", "text": "def parse_tfrecords_function(example_proto):\n features = {\n \"ids\": tf.VarLenFeature(tf.int64),\n \"values\": tf.VarLenFeature(tf.float32),\n \"label\": tf.FixedLenFeature([], tf.int64, default_value=0)\n }\n\n parsed_features = tf.parse_single_example(example_proto, features)\n labels = parsed_features[\"label\"]\n ids = parsed_features[\"ids\"]\n values = parsed_features[\"values\"]\n\n return labels, ids, values", "title": "" }, { "docid": "5fe030eb75b0150b48b7c29300062777", "score": "0.49400043", "text": "def create_tf_example(packed_sequence):\n features = collections.OrderedDict()\n features[\"packed_input_ids\"] = create_int_feature(packed_sequence[0])\n features[\"packed_input_mask\"] = create_int_feature(packed_sequence[1])\n features[\"packed_segment_ids\"] = create_int_feature(packed_sequence[2])\n features[\"packed_position_ids\"] = create_int_feature(packed_sequence[3])\n features[\"packed_masked_lm_positions\"] = create_int_feature(packed_sequence[4])\n features[\"packed_masked_lm_ids\"] = create_int_feature(packed_sequence[5])\n features[\"packed_masked_lm_mask\"] = create_float_feature(packed_sequence[6])\n features[\"packed_next_sentence_labels\"] = create_int_feature(packed_sequence[7])\n features[\"packed_next_sentence_mask\"] = create_float_feature(packed_sequence[8])\n\n tf_example = tf.train.Example(features=tf.train.Features(feature=features))\n return tf_example.SerializeToString()", "title": "" }, { "docid": "18e699c770e4986fabcb5a63465800b5", "score": "0.49393255", "text": "def transform_features_fn(serialized_tf_example):\n raw_feature_spec = tf_transform_output.raw_feature_spec()\n raw_features = tf.io.parse_example(serialized_tf_example, raw_feature_spec)\n transformed_features = model.tft_layer(raw_features)\n logging.info('eval_transformed_features = %s', transformed_features)\n return transformed_features", "title": "" }, { "docid": "10ad93698f49f19870564f67084b07f8", "score": "0.49278077", "text": "def parse_tfrecords_function(example_proto):\n\n if FLAGS.label_type == \"int\":\n features = {\n \"ids\": tf.VarLenFeature(tf.int64),\n \"values\": tf.VarLenFeature(tf.float32),\n \"label\": tf.FixedLenFeature([], tf.int64, default_value=0)\n }\n\n parsed_features = tf.parse_single_example(example_proto, features)\n labels = parsed_features[\"label\"]\n ids = parsed_features[\"ids\"]\n values = parsed_features[\"values\"]\n\n elif FLAGS.label_type == \"float\":\n features = {\n \"ids\": tf.VarLenFeature(tf.int64),\n \"values\": tf.VarLenFeature(tf.float32),\n \"label\": tf.FixedLenFeature([], tf.float32, default_value=0)\n }\n\n parsed_features = tf.parse_single_example(example_proto, features)\n labels = tf.cast(parsed_features[\"label\"], tf.int32)\n ids = parsed_features[\"ids\"]\n values = parsed_features[\"values\"]\n\n return labels, ids, values", "title": "" }, { "docid": "8ea18fb5b4a90a8d41b2764d5b08c30d", "score": "0.49234477", "text": "def derivative_parser(self, example, n_params=None):\n parsed_example = tf.io.parse_single_example(\n example, {\"data\": tf.io.FixedLenFeature([], tf.string)})\n return tf.reshape(\n tf.io.decode_raw(parsed_example[\"data\"], tf.float32),\n self.input_shape + (n_params,))", "title": "" }, { "docid": "ed6d6b7d36092adca1fa6835e89bdfaa", "score": "0.49213117", "text": "def parse_sequence_example(serialized):\n context, sequence = tf.parse_single_sequence_example(\n serialized,\n context_features={\n 'image/encoded': tf.FixedLenFeature([], dtype=tf.string),\n 'image/width': tf.FixedLenFeature([], dtype=tf.int64),\n 'image/height': tf.FixedLenFeature([], dtype=tf.int64),\n 'image/filename': tf.FixedLenFeature([], dtype=tf.string),\n },\n sequence_features={\n 'image/anchors/anchors': tf.FixedLenSequenceFeature([4], dtype=tf.float32),\n 'image/anchors/classes': tf.FixedLenSequenceFeature([1], dtype=tf.int64),\n 'image/coords/ids': tf.FixedLenSequenceFeature([1], dtype=tf.int64),\n 'image/coords/coords': tf.FixedLenSequenceFeature([2], dtype=tf.float32),\n 'image/sides/side_classes': tf.FixedLenSequenceFeature([1], dtype=tf.int64),\n 'image/sides/ids': tf.FixedLenSequenceFeature([1], dtype=tf.int64),\n 'image/sides/offsets': tf.FixedLenSequenceFeature([1], dtype=tf.float32),\n })\n\n image = context['image/encoded']\n img_file = context['image/filename']\n\n targets = dict()\n targets['anchors'] = sequence['image/anchors/anchors']\n targets['classes'] = sequence['image/anchors/classes']\n targets['coord_ids'] = sequence['image/coords/ids']\n targets['coords'] = sequence['image/coords/coords']\n targets['side_classes'] = sequence['image/sides/side_classes']\n targets['side_ids'] = sequence['image/sides/ids']\n targets['offsets'] = sequence['image/sides/offsets']\n\n return image, targets, img_file", "title": "" }, { "docid": "0979f57f29702fb3a2cf9879d60f6982", "score": "0.4912613", "text": "def decode(self, serialized_example, items=None):\n context, feature_list = tf.parse_single_sequence_example(\n serialized_example, self._keys_to_context_features,\n self._keys_to_sequence_features)\n # Reshape non-sparse elements just once:\n for k in self._keys_to_context_features:\n v = self._keys_to_context_features[k]\n if isinstance(v, tf.FixedLenFeature):\n context[k] = tf.reshape(context[k], v.shape)\n if not items:\n items = self._items_to_handlers.keys()\n outputs = []\n for item in items:\n handler = self._items_to_handlers[item]\n keys_to_tensors = {\n key: context[key] if key in context else feature_list[key]\n for key in handler.keys\n }\n outputs.append(handler.tensors_to_item(keys_to_tensors))\n return outputs", "title": "" }, { "docid": "93466f57932c826193987e0172e323fa", "score": "0.4909176", "text": "def process_example(self, example):\n (label, data) = example\n return (map(float, label.split(' ')), map(float, data.split(' ')))", "title": "" } ]
e034371cf2f71c0b059b61452b9388f5
stops the running process
[ { "docid": "d7d36dd564e8ddaceec19438abbc6509", "score": "0.73107016", "text": "def stop(self):\n self.network.deregister(self.pid)\n self.reader_task.cancel()\n self.writer_task.cancel()\n self.subproc.terminate()", "title": "" } ]
[ { "docid": "f8d24493529d9cb44476dfeb0b9f4e86", "score": "0.8454015", "text": "def stop(self):\n # Terminate the process\n self.proc.terminate() # sends a SIGTERM", "title": "" }, { "docid": "e97a3af72b6deb2246430e5a2e9ea23f", "score": "0.8393155", "text": "def stop_process(self):\r\n PROCESSES.current.close()", "title": "" }, { "docid": "487cdb49bbdbb6725ed2e76c81faadb9", "score": "0.7961588", "text": "def stop(self):\n pid = self.pid.get()\n if pid != None:\n subprocess.call(['kill', str(pid)])", "title": "" }, { "docid": "aa57c3d30eb4cdf319aa92f742230a08", "score": "0.7940781", "text": "def stop():\n os.kill(os.getppid(), signal.SIGKILL)", "title": "" }, { "docid": "5af2c9155430aa71b49792b96a254615", "score": "0.78892803", "text": "def stop(self):\r\n if self.pid:\r\n if os.name == 'nt':\r\n # Untested\r\n res = call([\"taskkill\", \"/f\", \"/pid\", str(self.pid)],\r\n stdin=PIPE, stdout=PIPE, stderr=PIPE)\r\n else:\r\n os.kill(self.pid, signal.SIGTERM)\r\n try:\r\n os.waitpid(self.pid, 0)\r\n except OSError, e:\r\n if e.errno != errno.ESRCH:\r\n raise", "title": "" }, { "docid": "41d2becdffb0921c14eecf0c6d8860ea", "score": "0.7881741", "text": "def stop():\n\n sys.exit()", "title": "" }, { "docid": "3da8487549a36fc3d14d2d67945360a8", "score": "0.78599954", "text": "def _stop_process(self):\n try:\n if self._ds9_process:\n # none means not yet terminated\n if self._ds9_process.poll() is None:\n self.set(\"exit\")\n if self._ds9_process in self._process_list:\n self._process_list.remove(self._ds9_process)\n\n except XpaException as e:\n print(\"XPA Exception: {0}\".format(e))", "title": "" }, { "docid": "b7d70d9d233baf516d45e2bfc48ba77b", "score": "0.7836727", "text": "def stop(self):\n with self._closed_semaphore:\n if self._closed:\n return\n self._closed = True\n\n if self._process:\n self._process.terminate()\n self._process.wait() # Wait for termination\n self._current_file.unlink()", "title": "" }, { "docid": "0b785bc521e5b012171487218910c6cf", "score": "0.7829543", "text": "def stop(self) -> None:\n self.proc.send_signal(signal.SIGTERM)\n self.proc.wait()", "title": "" }, { "docid": "cc885cfc6b2ce292ed38f0238d828da3", "score": "0.77835435", "text": "def stop(self):\n self.p.terminate()\n import os\n os.system(\"kill -9 {}\".format(str(self.p.pid)))\n return None", "title": "" }, { "docid": "ac6626a13982ba80797662bff3fcad23", "score": "0.77564305", "text": "def Stop(self):\n if not self.process:\n return\n self.stop_event.set()\n self.wait_event.wait()", "title": "" }, { "docid": "4361643d59e051196e3c7a83794199e3", "score": "0.7739023", "text": "def stop(self):\n self._started = False\n self._monitor.stop()\n if self._current:\n self.__end_process()", "title": "" }, { "docid": "5f55e3ba3151573c3e5acfb85fab690e", "score": "0.77014124", "text": "def _stop_running_process(cls):\n while cls._process_list:\n process = cls._process_list.pop()\n if process.poll() is None:\n process.terminate()", "title": "" }, { "docid": "16a1f40ea8f438637d1e2a9301c957c4", "score": "0.7692393", "text": "def stop(process):\n\t\n\tsubprocess.call(\"taskkill /f /t /im %s.exe\" % process.replace(\".exe\", \"\"))", "title": "" }, { "docid": "81ee52dad7ddabe59f920e96cd9a833b", "score": "0.7684933", "text": "def stop():", "title": "" }, { "docid": "62442fb8e6c5ca23ac463b7f59d369cb", "score": "0.768293", "text": "def stop_webots(self):\n self.process.terminate()", "title": "" }, { "docid": "c1d9337c61b75410f55c394a7d968923", "score": "0.7662175", "text": "def stop(self):\n\t\tself.running = False", "title": "" }, { "docid": "778ee2910777dbfa1833215b99315f94", "score": "0.7634107", "text": "def stop(self):\n if self.__running:\n self.__running = False\n if self.__proc and self.__proc.poll( ) == None:\n #print \"%s => proc %s is being stopped\" % (strftime(\"%y-%m-%d %H:%M:%S\", localtime()), self.tag)\n try:\n self.__proc.terminate()\n sleep(0.5)\n if self.__proc.poll( ) is None:\n print \"%s => proc %s is being killed\" % (strftime(\"%y-%m-%d %H:%M:%S\", localtime()), self.tag)\n self.__proc.kill()\n if self.__watchdog.poll( ) is None:\n self.__watchdog.kill()\n except OSError:\n pass", "title": "" }, { "docid": "00d2b09d44ced35cde72bfd000117648", "score": "0.76153314", "text": "def stop(self):\n\n self.running_flag = False", "title": "" }, { "docid": "4e8697d2b40551fdfd3e1446ae47e6e3", "score": "0.7598245", "text": "def stop(self):\n if not (self.proc is None):\n self.proc.kill()\n self.stop_event.set()\n self.proc = None", "title": "" }, { "docid": "d3306c4f53c68d1cef39e3228ebd8b14", "score": "0.7570652", "text": "def stop(self):\n\t\tsys.exit(1)", "title": "" }, { "docid": "321d7ec6d8898b35b8a035b6e63af985", "score": "0.7544312", "text": "def stop(self):\n self.running = False\n \n if self.active_process.poll() is None:\n self.active_process.terminate()\n self.active_process.wait()\n # It should be ok, but give the system a moment to free the port\n time.sleep(STOP_DELAY)", "title": "" }, { "docid": "9e3aae3c791cd42167c42ec4b4326c7b", "score": "0.75346094", "text": "def stop(self):\n self._is_running = False", "title": "" }, { "docid": "bce0648854dc8b1a5c2a512d080aa952", "score": "0.75270325", "text": "def stop(self):\n self._registry_proc.terminate()\n self._registry_proc.wait()", "title": "" }, { "docid": "09f47643f2d6d1fb56897cdce820611d", "score": "0.7521144", "text": "def stop(self):\n self.__running = False", "title": "" }, { "docid": "bfbc435f6a155cdb990ff1590fcbd9e4", "score": "0.751974", "text": "def stop(self):\n self._run_flag = False\n self.wait()", "title": "" }, { "docid": "bfbc435f6a155cdb990ff1590fcbd9e4", "score": "0.751974", "text": "def stop(self):\n self._run_flag = False\n self.wait()", "title": "" }, { "docid": "bfbc435f6a155cdb990ff1590fcbd9e4", "score": "0.751974", "text": "def stop(self):\n self._run_flag = False\n self.wait()", "title": "" }, { "docid": "cdb44d57de6903f5c206db16dc97e20e", "score": "0.7497897", "text": "def stop(self):\n self.running = False\n self.join(2)", "title": "" }, { "docid": "66f44af591e57a781aef375c6a15a729", "score": "0.7482049", "text": "def stop(self):\n self._running = False", "title": "" }, { "docid": "66f44af591e57a781aef375c6a15a729", "score": "0.7482049", "text": "def stop(self):\n self._running = False", "title": "" }, { "docid": "be2e42fa574a48ac4610720b442fa98b", "score": "0.7477113", "text": "def stop(self, name=\"geodns_main\"):\n pm = j.builders.system.processmanager.get()\n pm.stop(name)", "title": "" }, { "docid": "0b508507e2f79fd241724001e2ef13ab", "score": "0.7471089", "text": "def stop(self):\n self.running = False", "title": "" }, { "docid": "0b508507e2f79fd241724001e2ef13ab", "score": "0.7471089", "text": "def stop(self):\n self.running = False", "title": "" }, { "docid": "67c0ee2ae22b719ffe252425270539bd", "score": "0.74667054", "text": "def stop(self):\n self.killed = True", "title": "" }, { "docid": "bae1fc84951047be0539c94cc128405f", "score": "0.7435726", "text": "def stop_program(self):\n self.stop = True", "title": "" }, { "docid": "7398c9b51ceb83a59afaf8c2b197233f", "score": "0.73912823", "text": "async def _stop(self):\n cancel_all()\n self.specific.process_control(None, \"close\", close=True)\n self.close()", "title": "" }, { "docid": "8e2d4521fb1b02fbb739a8b907b5479d", "score": "0.7360154", "text": "def stop(self) -> None:\n ...", "title": "" }, { "docid": "ace0c9e85b620d03bbe156d66048048b", "score": "0.73519045", "text": "def stop(self) -> None:", "title": "" }, { "docid": "ace0c9e85b620d03bbe156d66048048b", "score": "0.73519045", "text": "def stop(self) -> None:", "title": "" }, { "docid": "ad5e832f86c495e3a5b77b5906d5ccbe", "score": "0.7336983", "text": "def stop(self):\n self.quit()", "title": "" }, { "docid": "13e42bf7b827d15899a871879764c339", "score": "0.73333836", "text": "def stop(self):\n if self.processes == []:\n return\n self.shutdown.set()\n for ps in self.processes:\n ps.join()\n self.processes = []\n self.shutdown.clear()", "title": "" }, { "docid": "bdd912781b2d44c4fc3857f9f11e648c", "score": "0.7329233", "text": "def stop(self):\n return self._cmd('stop')", "title": "" }, { "docid": "c2e1f7e7ddf95d8d73048142bb77c5eb", "score": "0.73056966", "text": "def stop(self, signum=None, frame=None):\n\n db.disconnect()\n try:\n self.process.terminate()\n except AttributeError:\n pass\n sys.exit(0)", "title": "" }, { "docid": "8be7c588796f43fcbf773ac7e80d5b83", "score": "0.7302298", "text": "def stop(self):\n self._core.stop()", "title": "" }, { "docid": "7adc92ec75e3948cc6d8ed67cf2df247", "score": "0.72951865", "text": "def kill(self):\n self.raiseAWarning(\"Terminating \"+self.__process.pid+' '+self.command)\n self.__process.terminate()", "title": "" }, { "docid": "9bd93cb957304f6f8708664b7ad355bb", "score": "0.7289984", "text": "def stop(self):\n self.keep_running = False", "title": "" }, { "docid": "ce81c0eae7535537fe16355f312346fd", "score": "0.7287878", "text": "def stop(self):\n if args.quiet==False:\n print \"shutting down...\"\n for t in self.threads:\n t.stop()\n self.active = False", "title": "" }, { "docid": "ad08463af6262f7551e852d7278a8379", "score": "0.7278004", "text": "def stop(self):\n pass", "title": "" }, { "docid": "ad08463af6262f7551e852d7278a8379", "score": "0.7278004", "text": "def stop(self):\n pass", "title": "" }, { "docid": "ad08463af6262f7551e852d7278a8379", "score": "0.7278004", "text": "def stop(self):\n pass", "title": "" }, { "docid": "ad08463af6262f7551e852d7278a8379", "score": "0.7278004", "text": "def stop(self):\n pass", "title": "" }, { "docid": "ad08463af6262f7551e852d7278a8379", "score": "0.7278004", "text": "def stop(self):\n pass", "title": "" }, { "docid": "ad08463af6262f7551e852d7278a8379", "score": "0.7278004", "text": "def stop(self):\n pass", "title": "" }, { "docid": "ad08463af6262f7551e852d7278a8379", "score": "0.7278004", "text": "def stop(self):\n pass", "title": "" }, { "docid": "ad08463af6262f7551e852d7278a8379", "score": "0.7278004", "text": "def stop(self):\n pass", "title": "" }, { "docid": "ad08463af6262f7551e852d7278a8379", "score": "0.7278004", "text": "def stop(self):\n pass", "title": "" }, { "docid": "08229083d825754036e55c7f94d97031", "score": "0.7265368", "text": "def stop(self):\n self._send_cmd('STOP')", "title": "" }, { "docid": "e81d39152b4c45d5971311988bc11b03", "score": "0.72629225", "text": "def stop(self):", "title": "" }, { "docid": "e81d39152b4c45d5971311988bc11b03", "score": "0.72629225", "text": "def stop(self):", "title": "" }, { "docid": "e81d39152b4c45d5971311988bc11b03", "score": "0.72629225", "text": "def stop(self):", "title": "" }, { "docid": "5bba69ad9e66a3bd9d0874879371a218", "score": "0.725663", "text": "def stop(self) -> None:\n pid = self.pid_file.load()\n os.kill(pid, signal.SIGTERM)", "title": "" }, { "docid": "6aa2ea60656b5df818d671ae31db7891", "score": "0.72525346", "text": "def stop(self):\n self.script_handler.stop_current_script()\n self.matrix_serial.stop()\n if not self.disable_discovery:\n self.discovery_server.stop()\n self.zeroconf_discovery_server.stop_advertising()\n self.server.stop()\n if self.gui is not None:\n self.gui.destroy()\n self.gui = None\n self.logger.info(\"shut down\")\n self.join()\n sys.exit(0)", "title": "" }, { "docid": "7b5b147619a43b2fa8102a06fa734849", "score": "0.7248743", "text": "def stop(self):\n kill_from_pid_file(self.pid_file() + '-waiting', signal.SIGHUP)\n kill_from_pid_file(self.pid_file(), signal.SIGWINCH)", "title": "" }, { "docid": "1084de9f4cd286a577bfb2a21f205253", "score": "0.7234568", "text": "def stop(self):\n self._logger.info(\"Terminating VPP...\")\n self.kill()\n self._logger.info(\"VPP...Terminated.\")\n dpdk.cleanup()", "title": "" }, { "docid": "dde150b861e94704f98add5fac034f82", "score": "0.7233431", "text": "def close(self):\n self.proc.terminate()", "title": "" }, { "docid": "21258823f4ba35456b07ae0c57375984", "score": "0.7217262", "text": "def DoProcessExit(self, code=0):\n self.Stop()", "title": "" }, { "docid": "ba5f082819052247f54cbc72aec0046f", "score": "0.719608", "text": "def stop(self) -> None:\n\t\tif self.launched:\n\t\t\tself.launched = False\n\t\t\tself.USBcomm.close()", "title": "" }, { "docid": "5376a8dc68ff6f1165baea25fc8e43c2", "score": "0.7184622", "text": "def stop(self):\n se_info(\"{0} deactivated\".format(self))\n self.sync_gen_seeds() # syncs the seeds from /tmp/.../queuefolder to .../muse-djpeg-sync/qsym_instance_conc_000xxx\n for pid, task in self.jobs.iteritems():\n if task['processed']:\n continue\n task['processed'] = True\n se_info(\"Terminting qsym instance: {0} {1} real pid:{2}\".format(pid, task['instance'], task['real_pid']))\n utils.terminate_proc_tree(task['real_pid']) # Maybe needs docker implementation?\n self.jobs[pid]['processed'] = True", "title": "" }, { "docid": "542000d1c0e707c290b6dc208e8136d5", "score": "0.7178662", "text": "def terminate(self) -> None:\n self.process.terminate()", "title": "" }, { "docid": "be71478cf87f2121f41f6c690cfe3fa0", "score": "0.7168121", "text": "def stop(self):\n self.msg.put('STOP')", "title": "" }, { "docid": "d0a4e3fd92cb355a5879edff53748779", "score": "0.7164675", "text": "def stop(*args):\n log.error('Caught exit signal - aborting')\n while len(processes) > 0:\n for p in processes:\n os.kill(p.pid, signal.SIGKILL)\n if not p.is_alive():\n processes.remove(p)\n sys.exit(1) # safe to do exit() here because we are a worker", "title": "" }, { "docid": "a49b4995ac5abb165b6c7e7c3e567266", "score": "0.71594685", "text": "def stop(self):\n self._send_command('Stop')", "title": "" }, { "docid": "bf2d64134e1ceae72d430b94ba3c82dd", "score": "0.71551234", "text": "def shutdown(self):\n if self.__running:\n for p in self.__procs:\n self.__procs[p].stop( )\n self.__running = False", "title": "" }, { "docid": "46266448bc0b20ea682264a51afbdc3a", "score": "0.71507365", "text": "def stop(self):\n self._can_run = False\n self.join()", "title": "" }, { "docid": "a676fd3be6a6c37c815f88bdb59344a7", "score": "0.71496934", "text": "def stop(self):\n return self.__exit__()", "title": "" }, { "docid": "a676fd3be6a6c37c815f88bdb59344a7", "score": "0.71496934", "text": "def stop(self):\n return self.__exit__()", "title": "" }, { "docid": "491ba764dfcba2161a0ea3f0da22f099", "score": "0.7145619", "text": "def stop(self) -> None:\n logger.debug(\"stop\")\n self._run = False", "title": "" }, { "docid": "8d748bb5a1737b81f7358d72f379a977", "score": "0.7142146", "text": "def stop(self):\n self.sm.stop()", "title": "" }, { "docid": "30a4403f6a943b77676e099336e046a3", "score": "0.7138432", "text": "def stop(self):\n self.stop_flag = True", "title": "" }, { "docid": "30a4403f6a943b77676e099336e046a3", "score": "0.7138432", "text": "def stop(self):\n self.stop_flag = True", "title": "" }, { "docid": "7b70b5fd78923849e575c0bc303f4310", "score": "0.7126957", "text": "def shutdowm(self):\n self.pipeline.stop()", "title": "" }, { "docid": "9cd211b6e78916402a7aca8a23e99b25", "score": "0.712608", "text": "def stopRunning(self):\r\n self.running = False", "title": "" }, { "docid": "098358d9eeabafa33ff73a774588ac5e", "score": "0.71162915", "text": "def shutdown(self):\n if self.proc and self.proc.pid:\n kill_process_nicely(self.proc.pid)", "title": "" }, { "docid": "f273ce04ad9e144d83a872040a2cb0f2", "score": "0.7115421", "text": "def kill_proc(self):\n self.proc.terminate()\n try:\n self.proc.wait(5)\n except subprocess.TimeoutExpired:\n self.proc.kill()\n self.proc = None", "title": "" }, { "docid": "8090a284b1b795418a7a981dc874203a", "score": "0.71110773", "text": "def stop(self):\n if self.pidfile and os.path.exists(self.pidfile):\n pid = int(open(self.pidfile).read())\n try:\n os.kill(pid, signal.SIGTERM)\n # wait for a moment to see if the process dies\n for n in xrange(10):\n time.sleep(0.25)\n os.kill(pid, 0)\n except OSError as err:\n pass\n else:\n sys.exit(u\"pid %d did not die\" % pid)\n else:\n sys.exit(u\"not running\")", "title": "" }, { "docid": "fbc533e72911bca14d2c0e1c8d6966b4", "score": "0.71102214", "text": "def terminate(self):\n self._running = False", "title": "" }, { "docid": "9fc39e4a2ec7217a9c1f32257d04243d", "score": "0.7105538", "text": "def stop():\n pass # TODO", "title": "" }, { "docid": "8de2baa8b6c6fb5d684c288a5fed8728", "score": "0.70996535", "text": "def stop(self):\n cmds.commandPort(name=self.cmdport_name,\n sourceType='python', close=True)\n self.status = cmds.commandPort(self.cmdport_name,\n query=True)\n print 'closing %s' % self.cmdport_name", "title": "" }, { "docid": "4a93e57982fd012adbdf96db17d623bf", "score": "0.7084651", "text": "def do_stop(self, arg):\n ActorSystem().shutdown()\n if self.system:\n print ('Erasing previous ActorSystem')\n del self.system\n self.system = None", "title": "" }, { "docid": "a9213ee99f40bba57f2d590e7c1ac1f7", "score": "0.7081572", "text": "async def stop(self, exit_code=0):\n # don't process scheduler anymore\n self.scheduler.stop()\n\n # process stop tasks\n self.websession.close()\n await self.api.stop()\n\n self.exit_code = exit_code\n self.loop.stop()", "title": "" }, { "docid": "0a7104cc55adfc2e9a471f7528d3f4e9", "score": "0.7078864", "text": "def stop(self):\n self.stopped = True", "title": "" }, { "docid": "f02043a353439c7203812b68b2529f8f", "score": "0.7078776", "text": "def stop(self):\n self._thread.stop()", "title": "" }, { "docid": "0408101d0b16cec40c815f1ca43677a9", "score": "0.70715845", "text": "def __del__(self):\n if self.is_running:\n self.stop()", "title": "" }, { "docid": "a39109363ce9342b858403fe450e27b6", "score": "0.7066023", "text": "def stop(self, stop_mode=None):", "title": "" }, { "docid": "abb5e30d1e04c20bfa113daddc8a1ff8", "score": "0.7063825", "text": "def close(self):\n if self.__process is not None:\n self.__process.kill()", "title": "" }, { "docid": "c1b4293d0f5babcd6309640435a17b79", "score": "0.7055387", "text": "def stop_program(self):\n self.command('stop_application', 0, 0, 0)", "title": "" }, { "docid": "be82464a7fb7bf416636141f5a582af0", "score": "0.7048565", "text": "def stop(self):\n if not self.running:\n return\n CHK(nidaq.DAQmxStopTask(self.taskHandle))\n self.ser.write('AS0\\r')#Tell the detector to stop taking data\n sleep(0.1)\n response = self.ser.read(80)\n self.running=False", "title": "" }, { "docid": "3c5df92663329211d047aec3637877bf", "score": "0.7046112", "text": "def stop(self):\n self.__worker.join()\n self.node_request_to_stop()\n self.terminate_flag.set()", "title": "" }, { "docid": "96feca483715db3e8e4d1a3adc52a958", "score": "0.70460355", "text": "def stopProcessMonitor(self, monitor):\n\t\tif monitor.running: monitor.stop()", "title": "" } ]
c075a0b9bee4f53b1a1973941890ba65
since gateways don't always start at the same time, some secods don't have corresponding rssi and hence have NaN instead. Currenty machine learning lagorithms don't know how to handle NaN. As a temp fix, we're removing all records that have NaN in them.
[ { "docid": "f517c54d00537dd43ad203abd3dec444", "score": "0.71056527", "text": "def remove_nan(self):\n\t\tdense_data = copy.deepcopy(self.data_frame)\n\t\ttimestamps_to_remove = []\n\t\tfor index, row in self.data_frame.iterrows():\n\t\t\tfor gateway in self.gateway_list:\n\n\t\t\t\tif np.isnan(row[gateway]):\n\t\t\t\t\t# print 'removing row for ', index\n\t\t\t\t\ttimestamps_to_remove.append(index)\n\n\t\tdense_data = self.data_frame.drop(timestamps_to_remove)\n\t\tprint \"removed {} rows containing NaN\".format(len(timestamps_to_remove))\n\t\treturn MatchedTimestamps(data_frame=dense_data, gateway_list=self.gateway_list)", "title": "" } ]
[ { "docid": "8361c7d6bb8408ccdc93d98e06906cb5", "score": "0.63010675", "text": "def _removerawnans(self):\n for band in self.data[self.root]:\n for i, f in enumerate(self.data[self.root][band]['frequency']):\n if math.isnan(f):\n for param in self.data[self.root][band]:\n if len(self.data[self.root][band][param]) == \\\n len(self.data[self.root][band]['frequency']):\n np.delete(self.data[self.root][band][param],i)", "title": "" }, { "docid": "96cebc2aed0cbec825c4d54bfa1ccdd9", "score": "0.62637925", "text": "def cleanNaN (raw_df):\n #first step:cleaning the NaN values when the whole row or whole columns is empty:\n raw_df.dropna(axis=1, how='all' )\n raw_df.dropna(axis=0, how='all')\n \n\n while True:\n\n #removing indicators (columns)\n NaN_per_col = raw_df.apply(numMissing, axis=0).to_frame().T #axis=0 defines that function is to be applied on each column\n max_NaN_per_col = np.amax(NaN_per_col.iloc[0,:].values)\n min_NaN_per_col=np.amin(NaN_per_col.iloc[0,:].values)\n if max_NaN_per_col == min_NaN_per_col+2 or max_NaN_per_col == min_NaN_per_col+1 : \n threshold1=len(raw_df.index)-max_NaN_per_col+1\n else:\n threshold1=len(raw_df.index)-max_NaN_per_col+3 #if rate of 2 here, check the condition at +1\n raw_df=raw_df.dropna(axis=1, thresh=threshold1)\n \n \n # check conditions to stop the while loop\n if raw_df.isnull().sum().sum() == 0:\n break\n \n \n #removing countries (rows)\n NaN_per_row = raw_df.apply(numMissing, axis=1).to_frame().T #axis=1 defines that function is to be applied on each row\n max_NaN_per_row = np.amax(NaN_per_row.iloc[0,:].values)\n threshold0=len(raw_df.columns)-max_NaN_per_row +1\n raw_df=raw_df.dropna(axis=0, thresh=threshold0)\n \n\n # check conditions to stop the while loop\n if raw_df.isnull().sum().sum() == 0:\n break\n \n return raw_df", "title": "" }, { "docid": "c7414a0fef1fec27eac411ce030507b3", "score": "0.6119339", "text": "def _remove_nan_inf(self):\n # infinite and nan check\n self.TB10A[np.isfinite(self.TB10A) is False] = self.bad_data\n self.TB19A[np.isfinite(self.TB19A) is False] = self.bad_data\n self.TB37A[np.isfinite(self.TB37A) is False] = self.bad_data\n self.TB85A[np.isfinite(self.TB85A) is False] = self.bad_data\n if hasattr(self, 'TB10B'): # Assume if one, all are there\n self.TB10B[np.isfinite(self.TB10B) is False] = self.bad_data\n self.TB19B[np.isfinite(self.TB19B) is False] = self.bad_data\n self.TB37B[np.isfinite(self.TB37B) is False] = self.bad_data\n self.TB85B[np.isfinite(self.TB85B) is False] = self.bad_data\n else: # Set all the B channels to bad, these are single-channel data\n for freq in FREQS:\n setattr(self, 'TB' + freq + 'B',\n self.bad_data * np.ones((self.nscans, self.ncross),\n dtype='float'))\n # Address geolocations if available\n if hasattr(self, 'Latitude'):\n self.Latitude[np.isfinite(self.Latitude) is False] = self.bad_data\n self.Longitude[np.isfinite(self.Longitude) is False] = \\\n self.bad_data", "title": "" }, { "docid": "756ddf8bd32157a47110f845a19de702", "score": "0.6115779", "text": "def removeNans(data: Dict):\n # find nan in the dataset and removes the values\n for chan in data:\n data[chan] = removeNansSingle(data[chan])\n return data", "title": "" }, { "docid": "77edef415c0e04d3ef1580f96c9c6df4", "score": "0.6110685", "text": "def handle_missing_values(df):\n print(\"just drop all the samples with missing values, consider a better approach\")\n newdf= df.dropna()\n return newdf", "title": "" }, { "docid": "c3266ab1d07f9f838365bd8fbde7a1f7", "score": "0.604752", "text": "def remove_nans(self):\n s = \"::: removing NaNs from %s :::\" % self.name\n print_text(s, self.color)\n\n self.x = self.x[self.good_x]\n self.y = self.y[self.good_y]\n self.x_min = self.x.min()\n self.x_max = self.x.max()\n self.y_min = self.y.min()\n self.y_max = self.y.max()\n self.nx = len(self.x)\n self.ny = len(self.y)\n\n for i in self.data.keys():\n self.data[i] = self.data[i][self.good_y, : ]\n self.data[i] = self.data[i][:, self.good_x]", "title": "" }, { "docid": "c8e963843fe13ec0998f958dfce64dfc", "score": "0.5992643", "text": "def dropna(df):\n df = df[df < math.exp(709)] # big number\n df = df[df != 0.0]\n df = df.dropna()\n return df", "title": "" }, { "docid": "deb517438e75d14790c556d6c26530e4", "score": "0.5991817", "text": "def dropna(self):\n self.transformed_data = list(\n filter(lambda line_obj: line_obj.has_null is False, self.orig_data))\n return self", "title": "" }, { "docid": "52259136ad5716d8e4e3adceb77bace3", "score": "0.5938397", "text": "def remove_nans(sig):\n\n sig_nans = np.isnan(sig)\n sig_removed = sig[np.where(~np.isnan(sig))]\n\n return sig_removed, sig_nans", "title": "" }, { "docid": "59b786a4ada6804cec21d78e75eb0698", "score": "0.59266084", "text": "def dropna(df):\n df = df[df < math.exp(709)] # big number\n df = df[df != 0.0]\n df = df.dropna()\n return df", "title": "" }, { "docid": "b5c0c67abd61e6060f97e87d779570eb", "score": "0.5856776", "text": "def clean_nans(record):\n floats = [field for field in record if isinstance(record[field], float)]\n for field in floats:\n if np.isnan(record[field]):\n del record[field]", "title": "" }, { "docid": "82fb941e9652d6c2263194543d36ee88", "score": "0.57872057", "text": "def prune_records(self,rec):\n return {k: v for k, v in json.loads(rec).items() if not (v is None or (isinstance(v, list) and sum(pd.isnull(v) > 0)) or\n (isinstance(v, float) and math.isnan(v))\n or isinstance(v, pd.tslib.NaTType))}", "title": "" }, { "docid": "9ccc1c304ac258099fe94a5f4329c384", "score": "0.5760188", "text": "def _strip(session) -> tuple:\n eeg, *rest = session\n ind = -next(i for i, value in enumerate(eeg[0, ::-1]) if not np.isnan(value))\n if ind == 0:\n ind = None\n return tuple((eeg[:, :ind], *rest))", "title": "" }, { "docid": "cc1c465dc06a8c0c07afedbcc6d05af9", "score": "0.57554376", "text": "def remove_null_values(dataframe):\r\n dataframe.dropna()", "title": "" }, { "docid": "968d84511f33f982e53d2dced989779c", "score": "0.57538193", "text": "def filter_values_1(self):\n inf = float(\"inf\")\n last_mea = list(self.scan_values[-1])\n for i in range(len(last_mea)):\n if last_mea[i] == inf:\n for k in reversed(xrange(MEASUREMENTS-1)):\n if self.scan_values[k][i] != inf:\n last_mea[i] = self.scan_values[k][i]\n break\n scan = LaserScan()\n scan.ranges = last_mea\n self.laser_filtered_pub.publish(scan)\n self.counter = 0\n self.scan_values = []", "title": "" }, { "docid": "d3d79578ca01616242fae7179a5eef34", "score": "0.5741", "text": "def prune (self):\r\n #Remove excess zero data points characteristic of a multi-event spectrum acquisition structure that occur inside peaks.\r\n #Thermo data sometimes does this depending on acquisition parameters.\r\n if self.msevents > 1:\r\n zstart = None\r\n zend = None\r\n for x,intensity in enumerate(self.i):\r\n if intensity == 0:\r\n if not zstart:\r\n zstart = x\r\n zend = x\r\n else:\r\n if zend and zstart:\r\n length = zend-zstart+1\r\n if length == self.msevents-1:\r\n self.i[zstart:zend+1] = list(itertools.repeat(-1,length))\r\n zstart = None\r\n zend = None\r\n while True:\r\n try:\r\n where = self.i.index(-1)\r\n except:\r\n break\r\n self.i.pop(where)\r\n self.t.pop(where)\r\n self.time.pop(where)", "title": "" }, { "docid": "413b28d3b07ead5cb86d8f3729fcefd2", "score": "0.57369703", "text": "def nan_filter(self):\r\n \r\n # filter out nan values in numerical attributes\r\n for att in self.catalog_atts:\r\n if getattr(self, att).shape[0] == 0:\r\n pass\r\n elif (type(getattr(self, att)[0]) == str) or (type(getattr(self, att)[0]) == bytes):\r\n # FIXME: intent here unclear: \r\n # note float('nan') is an IEEE NaN, getattr(.) is a str, and != on NaNs is special\r\n i = np.where(getattr(self, att) != float('nan'))[0]\r\n self.revise_lists(i)\r\n # exclude non-numerical types\r\n elif type(getattr(self, att)[0]) not in (np.unicode_, np.string_, np.bool_, bytes):\r\n if att == 'coords':\r\n i1 = np.where(~np.isnan(self.coords.ra.to('deg').value))[0]\r\n i2 = np.where(~np.isnan(self.coords.dec.to('deg').value))[0]\r\n i = np.intersect1d(i1,i2)\r\n else:\r\n i = np.where(~np.isnan(getattr(self, att)))[0]\r\n self.revise_lists(i)", "title": "" }, { "docid": "1763b3f28d3ff59fd197556ba56b060a", "score": "0.5727697", "text": "def remove_data_for_training_from_dataframe(data_df,Mask_NA,missing_fraction=0.3):\n\n def convert_series_to_diag_df(Series):\n return pd.DataFrame(columns=Series.index, index=Series.index, data=np.diag(Series.values))\n \n #removed_vals = (Mask_NA == 1).any(axis=0).astype(int)\n NAs = (Mask_NA == 1).any(axis=1).astype(int)\n NA_diag = convert_series_to_diag_df(NAs)\n\n NA_map = PandasFunctions.CompressDataFrame(NA_diag,[1])\n Val_map = PandasFunctions.CompressDataFrame(Mask_NA,[1])\n #############################################################\n np.random.seed(0)\n remover = np.random.rand(data_df.shape[0], NA_map.shape[0])\n remover = np.where( missing_fraction>remover , 1, 0)\n ############################################################# \n NA_1s = np.matmul(remover, NA_map.values)\n Values_0s = np.matmul(remover, Val_map.values)\n ############################################################# \n data_missing_df = data_df.copy()\n matrix = data_missing_df.values\n matrix[NA_1s ==1] = 1\n matrix[Values_0s==1] = 0\n data_missing_df[:] = matrix\n #############################################################\n return data_missing_df", "title": "" }, { "docid": "ed86b9e8b99ac7b505b79ccb5753c9a8", "score": "0.57137537", "text": "def remove_nan_rows(micro_df: pd.DataFrame):\n data_cols = micro_df.columns.tolist()[1:]\n micro_df.dropna(how = 'all', subset = data_cols, inplace=True)\n micro_df.reset_index(inplace=True, drop=True)", "title": "" }, { "docid": "13cf93e8603b92c72a804674e53a47cc", "score": "0.5667109", "text": "def filter_nan(s,o):\n '''\n data = np.array([s.flatten(),o.flatten()])\n data = np.transpose(data)\n #data = data[~np.isnan(data).any(1)]\n data = data[~pd.isnull(data)]\n #data = data[~np.isnan(data)]\n return data[:,0],data[:,1]\n #return data\n '''\n s = s.flatten()\n o = o.flatten()\n s = np.transpose(s)\n o = np.transpose(o)\n s[np.invert(~np.isnan(o))] = np.nan\n o[np.invert(~np.isnan(s))] = np.nan\n s = s[~pd.isnull(s)]\n o = o[~pd.isnull(o)]\n return s,o", "title": "" }, { "docid": "7bbdbe56e3df32572a4fa12f73449eb4", "score": "0.56610817", "text": "def wash_out_all_nan(data):\n # wash out the data with nan\n return data[~np.isnan(np.sum(data, 1))]", "title": "" }, { "docid": "689966188b7db502832026a5b59f8021", "score": "0.5639008", "text": "def clean_data(df):\n df = df[~df.location.isnull()]\n return df", "title": "" }, { "docid": "88cb616fb2e42ac45b5a8d4bd9062a4f", "score": "0.56378675", "text": "def process_na_data_in_df(df: 'DataFrame') -> 'DataFrame':\n\n # Remove Weekends: Sat & Sun\n df = df[(df.index.dayofweek != 5)&(df.index.dayofweek != 6)]\n\n # Removing all columns with NA values > 10% entries\n df = df.loc[:, df.isna().sum()/df.shape[0] <= 0.1]\n\n # Dropping rows with all NA values\n df = df.dropna(axis=0,how='all')\n\n return df", "title": "" }, { "docid": "208cc3adf0e73a81cd66b4d8257283ab", "score": "0.56377816", "text": "def clean_dataset(df):\n assert isinstance(df, pd.DataFrame), \"df needs to be a pd.DataFrame\"\n df.dropna(inplace=True)\n indices_to_keep = ~df.isin([np.nan, np.inf, -np.inf]).any(1)\n return df[indices_to_keep].astype(np.float64)", "title": "" }, { "docid": "b2d825919fec8aa76095fd7978826676", "score": "0.5636558", "text": "def _filter_na_for_fit(self, X: pd.DataFrame, y):\n # let's also treat infinite values as NA\n # scikit-learn's check_estimator might throw those at us\n with pd.option_context(\"mode.use_inf_as_na\", True):\n flt = pd.isna(X).values\n X_out = X[~flt]\n if y is not None and len(y) > 0:\n y_out = y[~flt]\n else:\n y_out = y\n return X_out, y_out", "title": "" }, { "docid": "d8d89a4a2ee9109d29df82591e9344b0", "score": "0.5592618", "text": "def clean_null_values(data_frame):\n half_count = len(data_frame) / 2\n # axis = 1 indicates remove of cols, and axis = 0 indicates remove of rows\n data_frame = data_frame.dropna(thresh=half_count, axis=1) # Drop any column with more than 50% missing values\n # drop any row with null values\n data_frame = data_frame.dropna(axis=0, how='any')\n return data_frame", "title": "" }, { "docid": "f2cf87fea3e49c01afa9f06ca623a419", "score": "0.5588972", "text": "def remove_nan_entries(df):\n\tdf_initsize = len(df)\n\tprint('Initial dataset size: ', df.shape)\n\tdisplay(HTML(df.tail().to_html()))\n\tprint('\\nRemoving the NaN values...')\n\t\n\tfor col in df.columns:\n\t col_type = df[col].dtype\n\t #print('col infos: {} dtype={}'.format(col, col_type))\n\t if(col_type != np.dtype('int64')):\tdf.drop(df[~df[col].notna()].index, inplace=True)\n\n\tdisplay(HTML(df.tail().to_html()))\n\tprint('\\nDataset size after NaN removing: ', df.shape)\n\tred_per = 100*(df_initsize - len(df)) / df_initsize\n\tprint(\"=> %.0f%% reduction of data.\" % red_per)\n\n\treturn df", "title": "" }, { "docid": "90230baf2be6f9623288c70f2a631e38", "score": "0.5581642", "text": "def _getnonconnectedsensors(self):\n currentcams=[]\n currenttemp=[]\n currentni=[]\n overcams=[]\n overtemp=[]\n overni=[]\n for par in self._paramlist:\n if par[\"type\"]==\"camera\":\n currentcams.append((par[\"vendor\"],par[\"camid\"]))\n if par[\"type\"]==\"temperature\":\n currenttemp.append(par[\"handle\"])\n if par[\"type\"]==\"nianalog\":\n currentni.append(par[\"devstr\"])\n \n currentcamsset=set(currentcams)\n currenttempset=set(currenttemp)\n currentniset=set(currentni)\n \n if not self._connectedcams is None:\n camintersection=currentcamsset.intersection(self._connectedcams)\n for cam in list(camintersection):\n currentcamsset.remove(cam)\n\n if not self._connectedtemp is None:\n tempintersection=currenttempset.intersection(self._connectedtemp)\n for temp in list(tempintersection):\n currenttempset.remove(temp)\n \n if not self._connectedni is None:\n niintersection=currentniset.intersection(self._connectedni)\n for ni in list(niintersection):\n currentniset.remove(ni)\n \n missing={\"camera\":list(currentcamsset),\"temperature\":list(currenttempset),\"nianalog\":list(currentniset)}\n \n return missing", "title": "" }, { "docid": "ff1dbb732594bd0adf24b12f262abf13", "score": "0.55662936", "text": "def test_filter_missing_default(self, meas):\n print(meas.data.columns)\n meas.set_regression_cols(\n power='meter_power',\n poa='met1_poa_refcell',\n t_amb='met2_amb_temp',\n w_vel='met1_windspeed',\n )\n assert all(meas.rview('all', filtered_data=True).isna().sum() == 0)\n assert meas.data_filtered.shape[0] == 1440\n meas.data_filtered.loc['10/9/90 12:00', 'meter_power'] = np.NaN\n meas.data_filtered.loc['10/9/90 12:30', 'met1_poa_refcell'] = np.NaN\n meas.data_filtered.loc['10/10/90 12:35', 'met2_amb_temp'] = np.NaN\n meas.data_filtered.loc['10/10/90 12:50', 'met1_windspeed'] = np.NaN\n meas.filter_missing()\n assert meas.data_filtered.shape[0] == 1436", "title": "" }, { "docid": "50765df3afe4062e887fd495f7d20620", "score": "0.5561974", "text": "def eliminate_singles(files):\n # allocate memory for the output\n filtered_traces = files.copy()\n # for all the columns\n for col in np.arange(files.shape[1]):\n # get the target trace\n original_trace = files.iloc[:, col]\n # find the derivative of the nan trace\n nan_positions = np.diff(np.isnan(original_trace).astype(np.int32), n=2)\n # find the coordinates of the singles\n single_positions = np.argwhere(nan_positions == 2) + 1\n # single_positions = np.argwhere((nan_positions[:-1] == 1) & (nan_positions[1:] == -1))\n # nan the singles\n filtered_traces.iloc[single_positions, col] = np.nan\n\n return filtered_traces", "title": "" }, { "docid": "b7d28291e59ab3392cb75585f375154b", "score": "0.5544502", "text": "def clean_df(self, df):\n df = df.dropna(axis='columns', how='all')\n df = df.dropna(axis='rows', how='any')\n df = df.astype('float64')\n df.index = df.index.astype('int64')\n return df", "title": "" }, { "docid": "7beeecf20699561c694c5c237529de88", "score": "0.5533206", "text": "def filter_nan(s,o):\n data = np.array([s.flatten(),o.flatten()])\n data = np.transpose(data)\n data = data[~np.isnan(data).any(1)]\n #data = data[~np.isnan(data)]\n return data[:,0],data[:,1]", "title": "" }, { "docid": "6deb83b721aaaea9e4f2f2aac5032b3f", "score": "0.5526761", "text": "def drop_na(self):\n self.df = self.df.dropna(axis=0, how='any')", "title": "" }, { "docid": "535115677c38ae302efaebcf05b89078", "score": "0.55242294", "text": "def clean_non_targets(self, df):\n # drop contaminants\n df = df[df[\"Potential contaminant\"] != \"+\"]\n # drop reverse\n df = df[df[\"Reverse\"] != \"+\"]\n return df", "title": "" }, { "docid": "302ac812d6a4d6da03d1689fbe1ac5e8", "score": "0.55231535", "text": "def clean_nan_inf(ts):\n ts = to_np_array(ts)\n search = (np.isinf(ts) | np.isnan(ts))\n ts[search] = 0\n\n return ts", "title": "" }, { "docid": "d584df708be962da48a56ddd6b8d43d2", "score": "0.5511816", "text": "def remove_nulls(self):\n \n print('Removing Null Data...')\n \n for column in ['Open', 'Close']:\n for index in self.df[column].index[self.df[column].apply(np.isnan)].tolist():\n self.df.drop(labels=index,axis=0,inplace=True)\n print('NULL ELEMENT REMOVED!')\n\n print('All Null Data Removed Successfully!')", "title": "" }, { "docid": "a224e4e86808e5779ba93a7e2c895b22", "score": "0.5505433", "text": "def remove_rows_with_wrong_values(self):\n print(\" removing rows with wrong values...\")\n self.df.dropna(inplace=True)", "title": "" }, { "docid": "0945fb57da5b13eac3a5116d028644a8", "score": "0.54922837", "text": "def drop_row_with_NA(self):\n\n # self.main_df = self.main_df[self.main_df[columns] != 0]\n self.main_df = self.main_df.dropna()", "title": "" }, { "docid": "310836c18424bdaa13136de14364ea51", "score": "0.54829514", "text": "def no_modify(data):\n # wash out the data with nan\n return data", "title": "" }, { "docid": "e827c22aa3c1ef2d5d904975bd256876", "score": "0.5474302", "text": "def remove_empty_features(self, tx):\n tx = np.where(tx == -999, np.NaN, tx)\n tx = tx[:, ~np.all(np.isnan(tx), axis=0)]\n return tx", "title": "" }, { "docid": "da6d97796efe7051da7f69e82026797c", "score": "0.54705954", "text": "def drop_na_reactions_or_single_row(df):\n new_df = df.dropna(subset=['reaction'])\n new_df = new_df[new_df.reaction_count_equiv > 0]\n if len(new_df) == 0:\n new_df = df.head(1)\n new_df['reaction'] = np.nan\n new_df['label'] = new_df['gene']\n return new_df", "title": "" }, { "docid": "8f15ae29fe533b404dc846a9c9957e64", "score": "0.5465945", "text": "def drop_nan(self):\n self.df = self.df.dropna(axis=0, how='any')\n return self.df", "title": "" }, { "docid": "c2d52c882873b9b394ba08aa17a39614", "score": "0.54497117", "text": "def remove_rows_with_missing_values(self):\n clean_data = []\n for row in self.data:\n hasMissing = False\n for col in row:\n if col == \"NA\" or col == \"\":\n hasMissing = True\n if not hasMissing:\n clean_data.append(row)\n self.data = clean_data", "title": "" }, { "docid": "ae8ead52a7b3c87ce6d0aa2874c477a8", "score": "0.54421103", "text": "def make_log_no_trips(self):\n self.df['log_trips'] = self.df[(self.df['trips_in_first_30_days'] != 0)].trips_in_first_30_days.apply(np.log)\n self.df['log_trips'] = self.df['log_trips'].apply(lambda x: 0 if np.isnan(x) else x)", "title": "" }, { "docid": "6a2a3655355cff1d9e36930017d6a14d", "score": "0.5433729", "text": "def remove_empty_values_marks(self, data):\n\n data_new = np.ndarray(shape=data.shape, dtype=data.dtype)\n\n __ACCURACY = 0.00001\n\n sample_number = 0\n for features in data:\n feature_index = 0\n for value in features:\n if abs(self.__EMPTY_VALUE - value) > __ACCURACY:\n data_new[sample_number][feature_index] = value\n else:\n data_new[sample_number][feature_index] = None\n feature_index += 1\n sample_number += 1\n print(\"Remove marks about empty values has done!\")\n return data_new", "title": "" }, { "docid": "2ddec8050279af60f2721fd30cf43fe3", "score": "0.54108644", "text": "def _remove_nan(values: TypeValList) -> TypeValList:\n values = np.asarray(values, dtype=float)\n return values[~np.isnan(values)]", "title": "" }, { "docid": "85ffd26d43c6c9bd7e9ff00c77464868", "score": "0.541016", "text": "def check_for_nan(df):\n return df.dropna()", "title": "" }, { "docid": "f070ce783a1e1cc0ab8923b612b26e5e", "score": "0.54039186", "text": "def filter_all(data):\n #filter individual blocks of continuous data\n datablocks = [list(v) for k,v in groupby(data,np.isfinite) if k]\n filtereddatablocks = [filterfunction(v) for v in datablocks]\n if np.any(np.isnan(data)): #if there are gaps\n #get information about gaps\n gap_indeces = np.where(np.isnan(data))[0]\n gap_locs = []\n gap_lengths = []\n for k, g in groupby(enumerate(gap_indeces), lambda ix:ix[1]-ix[0]):\n temprun = list(map(itemgetter(1), g))\n gap_lengths.append(np.size(temprun))\n gap_locs.append(temprun[0])\n #build filtered data with gaps\n filteredrun = np.array([]);\n if gap_locs[0]==0: #if the first space is a gap, append and remove\n filteredrun = np.append(filteredrun, np.full(gap_lengths[0],np.NaN) )\n gap_locs = np.delete(gap_locs,0)\n gap_lengths = np.delete(gap_lengths,0)\n for i in range(np.size(filtereddatablocks)):\n filteredrun = np.append(filteredrun,filtereddatablocks[i])\n if i<np.size(gap_locs):\n filteredrun = np.append(filteredrun,np.full(gap_lengths[i],np.NaN))\n else: #if no gaps\n filteredrun = filterfunction(data)\n return filteredrun", "title": "" }, { "docid": "8ff79d1cf1232b2cd2bee9677a15f802", "score": "0.5403428", "text": "def drop_missing_data(df):\n\n return df.dropna()", "title": "" }, { "docid": "58ac718730e57ad72fbd52481e985402", "score": "0.5393061", "text": "def drop_irrelevant(self):\r\n self.df = self.df.drop(['id2', 'state', 'year', 'asthma_rate'], axis=1)", "title": "" }, { "docid": "0a9a5ca95990bcb1ddfe7419ab9b2409", "score": "0.5385711", "text": "def drop_lowz(df):\n df = df.dropna(subset=['FIELD'])\n return df", "title": "" }, { "docid": "bfb57879edba1a2b3b4ef96649e53326", "score": "0.53795296", "text": "def clean_data(df):\n # removes first row time = 0\n df.dropna(how='any', inplace=True)\n df.drop_duplicates(inplace=True)\n\n # remove points where speed is greater than > 100 mph\n df = df[df['speed'] < 100]\n\n # replace unknowns with mean of column\n df = df.replace(-99999, np.nan)\n df = df.fillna(df.mean())\n return df", "title": "" }, { "docid": "0430e6d2ce3529d2c69f6a112c139784", "score": "0.5377258", "text": "def _clean_missing(data, params):\n attributes = params['attributes']\n cleaning_mode = params['cleaning_mode']\n frag = params['id_frag']\n\n if cleaning_mode == \"REMOVE_COLUMN\":\n\n thresh = params['thresh']\n if thresh:\n subset = []\n cols = params['columns_drop']\n for c in cols.index:\n if cols.loc[c] > thresh:\n subset.append(c)\n else:\n subset = params['columns_drop']\n data = data.drop(subset, axis=1)\n\n elif cleaning_mode == \"MEAN\":\n\n sizes, sums = params['values']\n values = np.divide(sums, sizes)\n\n for v, a in zip(values, attributes):\n data[a] = data[a].fillna(value=v)\n\n elif cleaning_mode == \"MODE\":\n dict_mode = params['dict_mode']\n for att in dict_mode:\n mode = dict_mode[att].idxmax()\n data[att] = data[att].fillna(value=mode)\n\n elif cleaning_mode == 'MEDIAN':\n medians = params['values']\n for att in medians:\n data[att] = data[att].fillna(value=medians[att])\n\n data.reset_index(drop=True, inplace=True)\n info = generate_info(data, frag)\n return data, info", "title": "" }, { "docid": "bc3286b60c67caabbf08b38675cf0de7", "score": "0.53675956", "text": "def drop_missing(X):\n nonmissing=X[0].copy()\n nonmissing['Nonmissing']=True\n nonmissing=nonmissing['Nonmissing']\n for x in X:\n nonmissing.where(pd.notnull(x).all(axis=1),False,inplace=True)\n\n for i in range(len(X)):\n X[i] = X[i].loc[nonmissing,:]\n\n return tuple(X)", "title": "" }, { "docid": "ed07a105be7b32edd490f39e6279fde8", "score": "0.53658754", "text": "def flag_na(df,remove_rows=False):\n df = df.copy()\n n_bad = sum(np.ravel(df.isnull().values))\n print('{} bad values in the dataset.')\n if remove_rows == True:\n df.dropna()\n return(df)", "title": "" }, { "docid": "d3ccf0fca21cb7d020e01721e6d73f4f", "score": "0.5357542", "text": "def _getmissingsensors(self):\n currentcams=[]\n currenttemp=[]\n currentni=[]\n missingcams=[]\n missingtemp=[]\n missingni=[]\n for par in self._paramlist:\n if par[\"type\"]==\"camera\":\n currentcams.append((par[\"vendor\"],par[\"camid\"]))\n if par[\"type\"]==\"temperature\":\n currenttemp.append(par[\"handle\"])\n if par[\"type\"]==\"nianalog\":\n currentni.append(par[\"devstr\"])\n \n for cam in self._connectedcams:\n if not cam in currentcams:\n missingcams.append(cam)\n if not self._connectedtemp is None:\n for temp in self._connectedtemp:\n if not temp in currenttemp:\n missingtemp.append(temp)\n if not self._connectedni is None:\n for ni in self._connectedni:\n if not ni in currentni:\n missingni.append(ni)\n missing={\"camera\":missingcams, \"temperature\":missingtemp,\"nianalog\":missingni}\n return missing", "title": "" }, { "docid": "1c24a661c654de184a93b6d3fcc684cf", "score": "0.53526807", "text": "def nan_trim(data):\r\n data_diff = np.diff(data)\r\n\r\n for i, diff in reversed(list(enumerate(data_diff))):\r\n if diff == 1:\r\n data.pop(i)\r\n\r\n return data", "title": "" }, { "docid": "d773f973c4e24a2274159b9e08fef1b8", "score": "0.53520674", "text": "def nans_remove(self):\n msk = np.isfinite(self.y)\n return self.__class__(self.x[msk], self.y[msk])", "title": "" }, { "docid": "11f98e0c9343893178195cbd3f034352", "score": "0.5342439", "text": "def remove_na_lines(self, max_na_proportion:float=None):\n\n if max_na_proportion:\n drop_lines = list(self.df.loc[self.df.isna().sum(axis=1)>len(self.df.columns)*max_na_proportion].index) \n else:\n drop_lines = self.na_lines\n #print(f'Lines removed:{drop_lines[0:4]}...')\n self.df = self.df.drop(drop_lines)\n return self.df", "title": "" }, { "docid": "075d66d00dfa6a68a225f3dacc5ab7fe", "score": "0.5338691", "text": "def cleanData(dataToClean):\n for i in range(len(dataToClean)):\n for j in range(len(dataToClean[0])):\n if np.isnan(dataToClean[i][j]):\n dataToClean[i][j]=0", "title": "" }, { "docid": "a12091d6b20c49b7725690d923460001", "score": "0.5331909", "text": "def clean_df(df):\n df = df.where(pd.notnull(df), None)\n df = df.fillna(value=0)\n return df", "title": "" }, { "docid": "3d6bb36d5febd036ea6d7c4c4267a6a5", "score": "0.533119", "text": "def _drop_obs(df):\n drop_subj = []\n for sid, row in df.iterrows():\n try:\n if sum(row.dropna()) == 0:\n drop_subj.append(sid)\n elif row.count() == 0:\n drop_subj.append(sid)\n except TypeError: # compatible to ados\n if sum(row.astype(float).dropna()) == 0:\n drop_subj.append(sid)\n elif row.astype(float).count() == 0:\n drop_subj.append(sid)\n return drop_subj", "title": "" }, { "docid": "260be91c0b8510b71ae7a1d740960b03", "score": "0.53306866", "text": "def NaN_cleaning(df):\n df = df.replace(np.nan, 'unknown')\n return df.reset_index(drop=True)", "title": "" }, { "docid": "5be06db39233a32605860ac8b3689a5c", "score": "0.53283554", "text": "def _remove_missing_data(\n data_values: NDArray,\n geometry_array: geopandas.GeoSeries,\n) -> tuple[NDArray, geopandas.GeoSeries]:\n not_missing_data = ~pandas.isnull(data_values)\n geometry_array = geometry_array[not_missing_data]\n data_values = data_values[not_missing_data]\n return data_values, geometry_array", "title": "" }, { "docid": "aeb9b42a6eaa6253ee3b8d523139e299", "score": "0.5324406", "text": "def quick_clean(data):\n rows_missing_data = (data == 0).sum(1)\n return data[rows_missing_data == 0, :]", "title": "" }, { "docid": "82d9e2a4681978a2e091275a72005b3c", "score": "0.5316683", "text": "def clean_nan(data, data_column):\r\n data.dropna(subset=[data_column, \\\r\n 'Quality'], inplace=True)\r\n return data", "title": "" }, { "docid": "a7f38a2777c41e92eecb61343ef10b22", "score": "0.53023773", "text": "def filter_zeroes(self):\n self.U[self.U == 0] = np.nan\n self.V[self.V == 0] = np.nan\n self.W[self.W == 0] = np.nan", "title": "" }, { "docid": "985aa5c0aa5f067935e58f73b2100dbc", "score": "0.5297176", "text": "def remove_nans(array_like):\n data = array_like.copy()\n \n # Astropy table object\n if type(data) is Table:\n data = data.filled(NAN_VALUE)\n # Pandas DataFrame object\n elif type(data) is pd.DataFrame:\n data = data.fillna(NAN_VALUE)\n else:\n print(\"Unrecognized data format:\", type(data))\n return None\n \n # Return final data structure\n return data", "title": "" }, { "docid": "914a197db781cb12a5fe19ab43161b5d", "score": "0.5292508", "text": "def test_DataCleaner_big_nan_handler_warning(self):\n dc = DataCleaner(max_na_frac=0.01)\n df = pd.DataFrame(\n np.random.randint(0, 100, size=(100, 4)), columns=list(\"ABCD\")\n )\n dc.fit(df, \"D\")\n self.assertEqual(len(dc.warnings), 0)\n\n df[\"A\"].iloc[10:20] = np.nan\n df[\"B\"].iloc[:99] = np.nan\n dc.fit(df, \"D\")\n self.assertEqual(len(dc.warnings), 1)", "title": "" }, { "docid": "dc851de57a11012e4155217bb31adc96", "score": "0.52870226", "text": "def delete_rows(df):\r\n\r\n df = df.drop(df[df.img.isna()].index)\r\n df = df.drop(df[df.cost.isna()].index)\r\n df = df.drop(df[df.rarity.isna()].index)\r\n return df", "title": "" }, { "docid": "7fbcab9e0a3ed7e49af6f0c03573e528", "score": "0.5284864", "text": "def test_nans_replaced_none():\n\n #generate test data\n dataframe = pandas.DataFrame(numpy.random.randint(-100, 100, size=(100, 6)), columns=list('ABCDEF'))\n dataframe['A'][5] = numpy.nan\n\n #run function on dataframe\n result_dataframe = cleaners.handle_nans(dataframe, data_replacement=\"none\", removal_time_frame=\"day\",\n fault_placement=\"calendar\")\n\n assert dataframe.size == result_dataframe.size, \"nan has not been dropped\"\n assert result_dataframe['A'][5] != numpy.nan, \"nan was removed but shouldn't have\"", "title": "" }, { "docid": "08006fa93818a2f08ef6539d0e78d6b8", "score": "0.5269665", "text": "def remove_duplicates_and_full_nulls(data):\n data = data.copy()\n data = data.drop_duplicates()\n data = data.dropna(how ='all')\n return data", "title": "" }, { "docid": "d9cd6b277461a674b517515b1a4294eb", "score": "0.52642363", "text": "def remove_null(dataframe):\n\n dataframe = dataframe.dropna()\n return dataframe", "title": "" }, { "docid": "52409db8b7b10f8ebaa0732ca7d73c16", "score": "0.52546704", "text": "def _rmDataWithoutBrightStar(self, neighborStarMap, starMap,\n wavefrontSensors):\n\n # Collect the sensor list without the bright star\n noStarSensorList = []\n for detector, stars in neighborStarMap.items():\n if (len(stars.getId()) == 0):\n noStarSensorList.append(detector)\n\n # Remove the data in map\n for detector in noStarSensorList:\n neighborStarMap.pop(detector)\n starMap.pop(detector)\n wavefrontSensors.pop(detector)", "title": "" }, { "docid": "3392f154927a0b3ea4e7a68b784f6179", "score": "0.5254399", "text": "def filter_nan(s,o):\n data = np.array([s.flatten(),o.flatten()])\n data = np.transpose(data)\n data = data[~np.isnan(data).any(1)]\n\n return data[:,0],data[:,1]", "title": "" }, { "docid": "bbe7cf61ecf67b86675bc79f599d13c2", "score": "0.5251705", "text": "def test_filter_by_NaN(self):\n test_scope = scope.Scope(-4.9, -4.70, 48, 49, 0, 1500000000)\n test_preprocessing = preprocessing.Preprocessing(test_scope, self.test_ships)\n\n self.assertEqual(len(self.test_ships), 15)\n test_preprocessing.filter_by_NaN()\n self.assertEqual(len(self.test_ships), 7)\n\n number_states = {'245257000': 29, '228037600': 259, '304091000': 1,\n '227705102': 249, '227443000': 92, '228931000': 21,\n '228131600': 350, '228064900': 4, '37100300': 7}\n\n for ship in self.test_ships:\n self.assertEqual(len(ship.ais_states), number_states[ship.sourcemmsi],\n msg=\"sourcemmsi is \" + ship.sourcemmsi)", "title": "" }, { "docid": "9ee4fe5bb4730d0e6d8c78730fa48d55", "score": "0.52495563", "text": "def remove_rows_with_missing_values(self):\n row_length = len(self.column_names)\n table_copy = []\n \n for i in range(len(self.data)):\n has_NA = False\n for j in range(row_length):\n if self.data[i][j] == \"NA\" or self.data[i][j] == \"\":\n has_NA = True\n if has_NA == False: \n table_copy.append(self.data[i])\n \n self.data.clear()\n self.data = copy.deepcopy(table_copy) \n pass # TODO: fix this", "title": "" }, { "docid": "16ead744219067e1a2c3a93e51f6bfe2", "score": "0.5245453", "text": "def _clean_useless_rows(self):\r\n self.temp_stock_data = pandas.DataFrame()\r\n dates = []\r\n for i in xrange(0, self.stock_data[self.company_list.Companies.irow(0)].count()):\r\n #print i\r\n row = self.stock_data.irow(i)\r\n if len(row[numpy.isnan(row)]) == len(row):\r\n dates.append(self.date_list[i])\r\n \r\n for single_date in dates:\r\n self.stock_data = self.stock_data.drop(pandas.Timestamp(single_date))\r\n print \"Data cleaned from useless rows.\"", "title": "" }, { "docid": "6fc475ee4cc92954c4f70406962b6150", "score": "0.52325326", "text": "def data_cleaning(df: DataFrame) -> DataFrame:\n print(\"Cleaning the data to remove problems\\n \")\n\n nulls = df.isnull().sum()\n nulls = nulls[nulls != 0]\n\n if not nulls.empty:\n print(f\"The columns below some 'NaN' values:\\n{nulls}\")\n\n # for index_with_nan in enumerate(nulls.keys()[:]):\n # print(f\"Column: {index_with_nan} \\n \")\n\n if 'Birth Year' in df.columns.values.tolist():\n df = dob_trim(df)\n\n time.sleep(2)\n\n no_user_type: float = format(df['User Type'].isna().sum()/df.shape[0]*100, '.2g')\n print(f\"A few rows don't have any information for 'User Type' but it is only {no_user_type}% so we'll just drop them\")\n df = df.dropna(axis = 0, subset=['User Type'])\n\n if 'Gender' in df.columns.values.tolist():\n gender_neutral: float = format(df['Gender'].isna().sum()/df.shape[0]*100, '.2g')\n print(f\"{gender_neutral}% of trips don't have any information for 'Gender' so we'll forward-fill.\")\n df.fillna(method=\"ffill\")\n # gender_neutral: float = format(nulls['Gender']/df.shape[0]*100, '.2g')\n # print(f\"A few rows don't have any information for 'Gender' but it is only {gender_neutral}% so we'll just drop them\")\n # df = df.dropna(axis = 0, subset=['Gender'], inplace=True)\n \n return df", "title": "" }, { "docid": "658b7ab760716acff016b703ca146fca", "score": "0.5225665", "text": "def removeNansSingle(data):\n # set an x array\n x = np.arange(data.size)\n # find locations of nans - this is a bool array with True in locations with nan values\n nanLocs = np.isnan(data)\n # if no nans, do nothing\n if not np.any(nanLocs):\n return data # no nans to remove\n # create mask\n mask = np.ones(data.size, np.bool)\n mask[nanLocs] = 0 # using numpy indexing with bool arrays\n # no need to group, want to remove every nan\n data[nanLocs] = np.interp(x[nanLocs], x[mask], data[mask])\n return data", "title": "" }, { "docid": "5cb6066ce4a1f9dc1a890971b21dec1f", "score": "0.5221285", "text": "def remove_notes(data):\n return data[pd.to_numeric(data.iloc[:, 0], errors='coerce').notnull()]", "title": "" }, { "docid": "6a937fac4a9481e0cde52d043b27035f", "score": "0.52128845", "text": "def filter_spikes(df:pd.DataFrame):\n \"\"\"first return argument is non-spike signals, the second one for the spikes\"\"\"\n new_df=pd.DataFrame()\n spike_cluster=pd.DataFrame()\n for i in range(len(df.columns)):\n tmp=df.iloc[:, i] \n uniques=tmp.unique()\n uniques = [x for x in uniques if str(x) != 'nan']\n uniques = [x for x in uniques if x not in [0.0,1.0]]\n if(len(uniques)<=1): #spike like\n spike_cluster.insert(len(spike_cluster.columns),tmp.name,tmp)\n else:\n new_df.insert(len(new_df.columns),tmp.name,tmp)\n return new_df,spike_cluster", "title": "" }, { "docid": "4a372da4cfe63a9a92a7f200bb86f20a", "score": "0.5209513", "text": "def nan_clean(array):\n for i in range(len(array)):\n if np.isnan(array[i]) == True:\n array[i] = 0.0\n return array", "title": "" }, { "docid": "6a41fc2d468a3aa9657429af2f10331b", "score": "0.52041674", "text": "def remove_nan_observations(x, y, z):\n x_ = x[~np.isnan(z)]\n y_ = y[~np.isnan(z)]\n z_ = z[~np.isnan(z)]\n return x_, y_, z_", "title": "" }, { "docid": "7b824e07c74f481cca378320af481262", "score": "0.5198521", "text": "def filterNaN(instr, datacol, outfile, logfile, verbose):\n try:\n nanclean = instr[1].header['NANCLEAN']\n except:\n naxis2 = 0\n for i in range(len(instr[1].columns.names)):\n if 'time' in instr[1].columns.names[i].lower():\n timecol = instr[1].columns.names[i]\n try:\n instr[1].data.field(datacol)\n except:\n msg = (\"ERROR -- KEPIO.FILTERNAN: cannot find column {}\"\n \"in the infile\".format(datacol))\n kepmsg.err(logfile, msg, verbose)\n try:\n for i in range(len(instr[1].data.field(0))):\n if (str(instr[1].data.field(timecol)[i]) != '-inf' and\n str(instr[1].data.field(datacol)[i]) != '-inf'):\n instr[1].data[naxis2] = instr[1].data[i]\n naxis2 += 1\n instr[1].data = instr[1].data[:naxis2]\n comment = 'NaN cadences removed from data'\n kepkey.new('NANCLEAN', True, comment, instr[1], outfile, logfile,\n verbose)\n except:\n errmsg = ('ERROR -- KEPIO.FILTERNAN: Failed to filter NaNs from '\n + outfile)\n kepmsg.err(logfile, errmsg, verbose)\n return instr", "title": "" }, { "docid": "e14120618e0caa9c1a1adf8bf742894f", "score": "0.51975524", "text": "def fix_na_values(df):\n ignored = [\"SUBJECT_ID\", \"HADM_ID\", \"ADMITTIME\", \"DISCHTIME\"]\n df_core = df.drop(ignored, axis=1)\n\n while df_core.isna().sum().sum():\n # get column with least amount of missing values\n cols_with_na = df_core.isna().sum()\n col = cols_with_na[cols_with_na > 0].idxmin()\n # impute that column\n df_core.loc[df_core[col].isna(), col] = impute_column(df_core, col)\n\n return pd.concat([df_core, df[ignored]], axis=1)", "title": "" }, { "docid": "13995298c15ddffef72188fbf4ae9e9e", "score": "0.51944923", "text": "def test_nans_dropped():\n\n #generate test data\n dataframe = pandas.DataFrame(numpy.random.randint(-100, 100, size=(100, 6)), columns=list('ABCDEF'))\n dataframe['A'][5] = numpy.nan\n\n #run function on dataframe\n result_dataframe = cleaners.handle_nans(dataframe, data_replacement=\"drop\", removal_time_frame=\"day\", fault_placement=\"calendar\")\n assert dataframe.size != result_dataframe.size, \"nan have not been dropped\"", "title": "" }, { "docid": "9105baa116f0859c24acfe0688351a8b", "score": "0.51941323", "text": "def drop_missing(self):\n df1 = self.df\n df2 = df1.dropna(axis=0)\n\n self.df = df2", "title": "" }, { "docid": "fe78430afcdbf63973f5fb1eef658c15", "score": "0.5192824", "text": "def remove_missing_values(x):\n x_clean = x.copy()\n x_clean = x_clean[:, [np.count_nonzero(x.T[i] == -999) == 0 for i in range (x.shape[1])]]\n return x_clean", "title": "" }, { "docid": "26e1f1380c66022fe6c1a3408b34b606", "score": "0.51923764", "text": "def remove_rcna(df):\n df.dropna(how = 'all', axis = 'columns', inplace = True)\n df.dropna(how = 'all', axis = 'rows', inplace = True)\n return df", "title": "" }, { "docid": "97e3227a5225488f9568a8c415801ea9", "score": "0.51922977", "text": "def generate_query_ratesnull(self):\n pass", "title": "" }, { "docid": "934058862a1706b0deffe110b8780021", "score": "0.518677", "text": "def _series_cleaner(self, obs_data: Union[dict, list]) -> pd.DataFrame:\n series = pd.json_normalize(obs_data[\"observations\"])\n series.drop(columns=[\"realtime_start\", \"realtime_end\"], inplace=True)\n series[\"value\"] = series[\"value\"].replace(self.nan_char, np.nan)\n return series", "title": "" }, { "docid": "eabc39526af7b6e52689bfe97d78ffba", "score": "0.5184404", "text": "def drop_missing(self):\n self.miss = np.rint(len(self.data.columns)*self.miss)\n self.data = self.data[self.data.isnull().sum(axis=1)<=self.miss]", "title": "" }, { "docid": "b6e550833aaa3e203af1bd5115ecd7f9", "score": "0.5179664", "text": "def clean_data(df):\n data_types = get_missing_columns()\n \n for idx in data_types.index:\n column = idx\n if column in df.columns:\n df[column] = df[column].replace(data_types.loc[column]['nan_vals'], np.nan)\n\n \n # remove selected columns\n df.drop(['AGER_TYP', 'ALTER_HH', 'ALTER_KIND1', 'ALTER_KIND2', 'ALTER_KIND3', 'ALTER_KIND4', 'ALTERSKATEGORIE_FEIN',\\\n 'D19_BANKEN_ONLINE_QUOTE_12', 'D19_GESAMT_ONLINE_QUOTE_12', 'D19_KONSUMTYP', 'D19_LETZTER_KAUF_BRANCHE',\\\n 'D19_LOTTO', 'D19_SOZIALES', 'D19_TELKO_ONLINE_QUOTE_12', 'D19_VERSAND_ONLINE_QUOTE_12',\\\n 'D19_VERSI_ONLINE_QUOTE_12', 'EXTSEL992','GEBURTSJAHR', 'KBA05_BAUMAX', 'KK_KUNDENTYP', 'TITEL_KZ', 'LNR'],\n axis=1, inplace=True)\n \n print(\"Removal of Columns with over 20% null values complete...\")\n \n # remove selected rows\n df = df[df.isnull().sum(axis=1) < 9].reset_index(drop=True)\n \n # remove selected columns again\n features_not_in_feat_info_42 = ['AKT_DAT_KL', 'ANZ_KINDER', 'ANZ_STATISTISCHE_HAUSHALTE', 'CJT_KATALOGNUTZER','CJT_TYP_1',\\\n 'CJT_TYP_2', 'CJT_TYP_3', 'CJT_TYP_4', 'CJT_TYP_5', 'CJT_TYP_6', 'DSL_FLAG',\\\n 'EINGEZOGENAM_HH_JAHR','FIRMENDICHTE', 'GEMEINDETYP', 'HH_DELTA_FLAG', 'KBA13_ANTG1',\\\n 'KBA13_ANTG2', 'KBA13_ANTG3','KBA13_ANTG4', 'KBA13_BAUMAX', 'KBA13_CCM_1401_2500',\\\n 'KBA13_GBZ', 'KBA13_HHZ', 'KBA13_KMH_210','KONSUMZELLE', 'MOBI_RASTER', 'RT_KEIN_ANREIZ',\\\n 'RT_SCHNAEPPCHEN', 'RT_UEBERGROESSE', 'STRUKTURTYP','UMFELD_ALT', 'UMFELD_JUNG',\\\n 'UNGLEICHENN_FLAG', 'VERDICHTUNGSRAUM', 'VHA', 'VHN', 'VK_DHT4A','VK_DISTANZ',\\\n 'VK_ZG11','D19_KONSUMTYP_MAX', 'KOMBIALTER','EINGEFUEGT_AM']\n \n df.drop(features_not_in_feat_info_42,axis=1, inplace=True)\n \n print(\"Removal of undocumented columns complete...\")\n \n \n # select, re-encode, and engineer column values.\n # # feature engineering of categorical features\n\n df['OST_WEST_KZ'].replace(['W','O'], [1, 0], inplace=True)\n \n multi = ['CAMEO_DEU_2015', 'CAMEO_DEUG_2015', 'CJT_GESAMTTYP', 'D19_BANKEN_ANZ_12', 'D19_BANKEN_ANZ_24',\\\n 'D19_BANKEN_DATUM','D19_BANKEN_OFFLINE_DATUM', 'D19_BANKEN_ONLINE_DATUM', 'D19_GESAMT_ANZ_12',\\\n 'D19_GESAMT_ANZ_24', 'D19_GESAMT_DATUM','D19_GESAMT_OFFLINE_DATUM', 'D19_GESAMT_ONLINE_DATUM',\\\n 'D19_TELKO_DATUM', 'D19_TELKO_OFFLINE_DATUM', 'D19_TELKO_ONLINE_DATUM','D19_VERSAND_DATUM',\\\n 'D19_VERSAND_OFFLINE_DATUM', 'D19_VERSAND_ONLINE_DATUM', 'D19_VERSI_DATUM', 'D19_VERSI_OFFLINE_DATUM',\\\n 'D19_VERSI_ONLINE_DATUM', 'FINANZTYP', 'GEBAEUDETYP', 'GFK_URLAUBERTYP','LP_FAMILIE_FEIN','LP_FAMILIE_GROB',\\\n 'LP_STATUS_FEIN', 'LP_STATUS_GROB', 'NATIONALITAET_KZ', 'SHOPPER_TYP', 'ZABEOTYP']\n \n df = pd.get_dummies(df, columns=multi, prefix=multi)\n \n \n # feature engineering of mixed features\n df['PRAEGENDE_JUGENDJAHRE_decade'] = df['PRAEGENDE_JUGENDJAHRE'].apply(make_decade)\n df['PRAEGENDE_JUGENDJAHRE_movement'] = df['PRAEGENDE_JUGENDJAHRE'].apply(make_movement)\n df.drop('PRAEGENDE_JUGENDJAHRE', axis=1, inplace=True)\n \n print(\"Feature Engineering PRAEGENDE_JUGENDJAHRE complete...\")\n \n \n df['CAMEO_INTL_2015_wealth'] = df['CAMEO_INTL_2015'].apply(make_wealth) \n df['CAMEO_INTL_2015_life_stage'] = df['CAMEO_INTL_2015'].apply(make_life_stage)\n df.drop('CAMEO_INTL_2015', axis=1, inplace=True)\n \n print(\"Feature Engineering CAMEO_INTL_2015 complete...\")\n \n df['WOHNLAGE_rural'] = df['WOHNLAGE'].map({0:0,1:0,2:0,3:0,4:0,5:0,7:1,8:1})\n df['WOHNLAGE_rating_class'] = df['WOHNLAGE'].map({0:0,1:1,2:2,3:3,4:4,5:5,7:0,8:0})\n \n print(\"Feature Engineering WOHNLAGE complete...\")\n \n df = df.drop(['LP_LEBENSPHASE_FEIN','LP_LEBENSPHASE_GROB','WOHNLAGE','PLZ8_BAUMAX'], axis=1)\n \n print(\"Finished!\")\n \n # Return the cleaned dataframe.\n return df", "title": "" }, { "docid": "50b15f9a5bc9f58c3f1fda20bd23a98b", "score": "0.5174193", "text": "def clean_data(self):\n # Mask files with lat = 0 or lon = 0\n mask = np.where(np.logical_or(self.data['VEH_LAT']==0, \n self.data['VEH_LONG']==0))\n\n for k in self.data.keys():\n if isinstance(self.data[k][0], float):\n self.data[k][mask] = np.nan", "title": "" }, { "docid": "0fb2a849eb9288c2238968e9235b9752", "score": "0.51717937", "text": "def drop_null_rows(self, x: bool):\n if x:\n total_null_rows = self.df[self.target].isin(\n [' ', 'NULL', np.nan]).sum()\n if total_null_rows > 0:\n print(\"Dropping rows having [' ', 'NULL', numpy.nan] values\")\n self.df.dropna(inplace=True)\n self.df.reset_index(drop=True, inplace=True)\n self.null_values_present = False\n print_in_red(f\"Total Null rows dropped: {total_null_rows}\\n\")\n self._start_analysis()\n else:\n print(colored(\"There is no null rows present.\\n\", \"green\"))", "title": "" }, { "docid": "52449af6e301d2d873cf47553c2b38b8", "score": "0.5167622", "text": "def filter_nan(s, o):\n data = np.array([s.flatten(), o.flatten()])\n data = np.transpose(data)\n data = data[~np.isnan(data).any(1)]\n return data[:, 0], data[:, 1]", "title": "" }, { "docid": "6512e72d0658c2f1c0378e699cc56170", "score": "0.51613533", "text": "def clean_timeseries(self):\n if 'tb_incidence' in self.timeseries_log.keys():\n self.timeseries_log['tb_incidence'][0] = 0.", "title": "" }, { "docid": "248a5d7d28357321ce39d2c77fe712a7", "score": "0.5160788", "text": "def filter_values_2(self, counter):\n inf = float(\"inf\")\n last_mea = list(self.scan_values_2[counter])\n for i in range(len(last_mea)):\n if last_mea[i] == inf:\n for k in range(1, MEASUREMENTS):\n if self.scan_values_2[counter-k] != inf:\n last_mea[i] = self.scan_values_2[k][i]\n break\n scan = LaserScan()\n scan.ranges = last_mea\n self.laser_filtered_pub.publish(scan)", "title": "" }, { "docid": "16cc667bcf39fc41ca499a7580cf48d0", "score": "0.51605296", "text": "def _get_instr_after_remove(self, remove_size_null, remove_adj_cap_nan):\n if any([remove_size_null, remove_adj_cap_nan]): # will update instr_df\n instr_df = self.concat_instr_df.copy()\n else: # nothing will be changed in self.concat_instr_df so no need to copy\n instr_df = self.concat_instr_df\n if instr_df.empty:\n raise Exception('concat_instr_df is empty. Cannot display metrics')\n if remove_size_null:\n no_instrs = len(instr_df)\n idx_size_null = instr_df['stake'] == 0\n if sum(idx_size_null) > 0:\n self.logger.warning('Dropping {}/{} instructions with stake 0'.format(\n sum(idx_size_null), no_instrs))\n instr_df = instr_df.loc[~idx_size_null]\n if remove_adj_cap_nan:\n if 'adj_cap' not in instr_df.columns:\n raise Exception('concat_instr_df do not have column '\n 'adj_cap and so cannot remove_adj_cap_nan')\n no_instrs = len(instr_df)\n idx_nan = instr_df['adj_cap'].isnull()\n if sum(idx_nan) > 0:\n self.logger.warning('Keeping {}/{} instructions with known capital adjustment'\n .format(no_instrs - sum(idx_nan), no_instrs))\n # remove nan in adjst_capital\n instr_df = instr_df.loc[~idx_nan]\n return instr_df", "title": "" } ]
014a5481438d0e9b71966a48d4d8bbf6
Generate a NL or LP file from Pyomo, and then do subsequent conversions.
[ { "docid": "b50fba180d6eee76d9713189ba7dcb27", "score": "0.56642395", "text": "def apply(self, *args, **kwds):\n\n import pyomo.scripting.convert\n\n capabilities = kwds.pop(\"capabilities\", None)\n\n # all non-consumed keywords are assumed to be options\n # that should be passed to the writer.\n io_options = {}\n for kwd, value in iteritems(kwds):\n io_options[kwd] = value\n kwds.clear()\n\n # basestring is gone in Python 3.x, merged with str.\n if PY3:\n compare_type = str\n else:\n compare_type = basestring\n\n if isinstance(args[2], compare_type):\n instance = None\n else:\n instance = args[2]\n\n if args[1] == ProblemFormat.cpxlp:\n problem_filename = pyutilib.services.TempfileManager.\\\n create_tempfile(suffix = '.pyomo.lp')\n if instance is not None:\n if isinstance(instance, IBlock):\n symbol_map_id = instance.write(\n problem_filename,\n format=ProblemFormat.cpxlp,\n _solver_capability=capabilities,\n _called_by_solver=True,\n **io_options)\n else:\n (problem_filename, symbol_map_id) = \\\n instance.write(\n filename=problem_filename,\n format=ProblemFormat.cpxlp,\n solver_capability=capabilities,\n io_options=io_options)\n return (problem_filename,), symbol_map_id\n else:\n\n #\n # I'm simply exposing a fatal issue with\n # this code path. How would we convert the\n # collected keywords into command-line\n # arguments that can be sent to the writer?\n #\n if len(io_options):\n raise ValueError(\n \"The following io_options will be ignored \"\n \"(please create a bug report):\\n\\t\" +\n \"\\n\\t\".join(\"%s = %s\" % (k,v)\n for k,v in iteritems(io_options)))\n\n ans = pyomo.scripting.convert.\\\n pyomo2lp(['--output',problem_filename,args[2]])\n if ans.errorcode:\n raise RuntimeError(\"pyomo2lp conversion \"\n \"returned nonzero error code \"\n \"(%s)\" % ans.errorcode)\n\n model = ans.retval\n problem_filename = model.filename\n symbol_map = model.symbol_map\n return (problem_filename,),symbol_map\n\n elif args[1] == ProblemFormat.bar:\n problem_filename = pyutilib.services.TempfileManager.\\\n create_tempfile(suffix = '.pyomo.bar')\n if instance is not None:\n if isinstance(instance, IBlock):\n symbol_map_id = instance.write(\n problem_filename,\n format=ProblemFormat.bar,\n _solver_capability=capabilities,\n _called_by_solver=True,\n **io_options)\n else:\n (problem_filename, symbol_map_id) = \\\n instance.write(\n filename=problem_filename,\n format=ProblemFormat.bar,\n solver_capability=capabilities,\n io_options=io_options)\n return (problem_filename,), symbol_map_id\n else:\n\n #\n # I'm simply exposing a fatal issue with\n # this code path. How would we convert the\n # collected keywords into command-line\n # arguments that can be sent to the writer?\n #\n if len(io_options):\n raise ValueError(\n \"The following io_options will be ignored \"\n \"(please create a bug report):\\n\\t\" +\n \"\\n\\t\".join(\"%s = %s\" % (k,v)\n for k,v in iteritems(io_options)))\n\n ans = pyomo.scripting.convert.\\\n pyomo2bar(['--output',problem_filename,args[2]])\n if ans.errorcode:\n raise RuntimeError(\"pyomo2bar conversion \"\n \"returned nonzero error code \"\n \"(%s)\" % ans.errorcode)\n model = ans.retval\n problem_filename = model.filename\n symbol_map = model.symbol_map\n return (problem_filename,),symbol_map\n\n elif args[1] in [ProblemFormat.mps, ProblemFormat.nl]:\n if args[1] == ProblemFormat.nl:\n problem_filename = pyutilib.services.TempfileManager.\\\n create_tempfile(suffix = '.pyomo.nl')\n if io_options.get(\"symbolic_solver_labels\", False):\n pyutilib.services.TempfileManager.add_tempfile(\n problem_filename[:-3]+\".row\",\n exists=False)\n pyutilib.services.TempfileManager.add_tempfile(\n problem_filename[:-3]+\".col\",\n exists=False)\n else:\n assert args[1] == ProblemFormat.mps\n problem_filename = pyutilib.services.TempfileManager.\\\n create_tempfile(suffix = '.pyomo.mps')\n if instance is not None:\n if isinstance(instance, IBlock):\n symbol_map_id = instance.write(\n problem_filename,\n format=args[1],\n _solver_capability=capabilities,\n _called_by_solver=True,\n **io_options)\n else:\n (problem_filename, symbol_map_id) = \\\n instance.write(\n filename=problem_filename,\n format=args[1],\n solver_capability=capabilities,\n io_options=io_options)\n return (problem_filename,), symbol_map_id\n else:\n\n #\n # I'm simply exposing a fatal issue with\n # this code path. How would we convert the\n # collected keywords into command-line\n # arguments that can be sent to the writer?\n #\n if len(io_options):\n raise ValueError(\n \"The following io_options will be ignored \"\n \"(please create a bug report):\\n\\t\" +\n \"\\n\\t\".join(\"%s = %s\" % (k,v)\n for k,v in iteritems(io_options)))\n\n ans = pyomo.scripting.convert.\\\n pyomo2nl(['--output',problem_filename,args[2]])\n if ans.errorcode:\n raise RuntimeError(\"pyomo2nl conversion \"\n \"returned nonzero error \"\n \"code (%s)\" % ans.errorcode)\n model = ans.retval\n problem_filename = model.filename\n symbol_map = model.symbol_map\n\n if args[1] == ProblemFormat.nl:\n return (problem_filename,),symbol_map\n #\n # Convert from NL to MPS\n #\n # TBD: We don't support a variable map file when going\n # from NL to MPS within the PICO converter.\n # NOTE: this is a problem with the MPS writer that is\n # provided by COIN-OR\n # NOTE: we should generalize this so it doesn't strictly\n # depend on the PICO converter utility.\n #\n ans = self.pico_converter.apply(ProblemFormat.nl,\n ProblemFormat.mps,\n problem_filename)\n os.remove(problem_filename)\n return ans\n\n elif args[1] == ProblemFormat.osil:\n if False:\n problem_filename = pyutilib.services.TempfileManager.\\\n create_tempfile(suffix='pyomo.osil')\n if instance:\n if isinstance(instance, IBlock):\n symbol_map_id = instance.write(\n problem_filename,\n format=ProblemFormat.osil,\n _solver_capability=capabilities,\n _called_by_solver=True,\n **io_options)\n else:\n (problem_filename, symbol_map_id) = \\\n instance.write(\n filename=problem_filename,\n format=ProblemFormat.osil,\n solver_capability=capabilities,\n io_options=io_options)\n return (problem_filename,), None\n else:\n raise NotImplementedError(\n \"There is currently no \"\n \"script conversion available from \"\n \"Pyomo to OSiL format.\")", "title": "" } ]
[ { "docid": "67db92a1a3821a68008dc249af915482", "score": "0.5871873", "text": "def create_model(self):\r\n\r\n self.try_load_model()\r\n\r\n self.set_correct_problem_instance_name()\r\n\r\n self.add_objective_and_budget_constraints()\r\n self.add_starting_ending_nodes_constraints()\r\n\r\n self.docplex_model.print_information()\r\n\r\n # DEBUG\r\n # for constraint in self.docplex_model.iter_constraints():\r\n # print(constraint)\r", "title": "" }, { "docid": "95e1197a8a29532e1bb1bc651f420051", "score": "0.5701343", "text": "def formula_generator(known_args, other_args): # pylint: disable=W0613\n poly = Poly(*known_args.format)\n\n if known_args.smt2:\n with open(known_args.smt2, 'w') as smt2_file:\n with redirect_stdout(smt2_file):\n poly.problem(known_args.degree)\n else:\n poly.problem(known_args.degree)", "title": "" }, { "docid": "1717949860fb45cd73863fd5f97acc0e", "score": "0.56868994", "text": "def __init__(self, model, output_path, wanted_lorentz = [],\n wanted_couplings = [], replace_dict={}):\n\n self.model = model\n self.model_name = export_cpp.ProcessExporterCPP.get_model_name(model['name'])\n self.aloha_model = create_aloha.AbstractALOHAModel(self.model_name)\n\n\n self.dir_path = output_path\n self.default_replace_dict = dict(replace_dict)\n # List of needed ALOHA routines\n self.wanted_lorentz = wanted_lorentz\n self.wanted_couplings = wanted_couplings\n\n # For dependent couplings, only want to update the ones\n # actually used in each process. For other couplings and\n # parameters, just need a list of all.\n self.coups_dep = {} # name -> base_objects.ModelVariable\n self.coups_indep = [] # base_objects.ModelVariable\n self.params_dep = [] # base_objects.ModelVariable\n self.params_indep = [] # base_objects.ModelVariable\n self.p_to_cpp = None \n\n # Prepare parameters and couplings for writeout in C++\n self.prepare_parameters()\n self.prepare_couplings(wanted_couplings)", "title": "" }, { "docid": "c5525bfc1f28e0a667830ec014bdb725", "score": "0.56353825", "text": "def convertProcessToLcia(O,p):\r\n \r\n # the players\r\n P = O.findUuid(os.path.splitext(p)[0])\r\n print 'Importing: ' + P.El('baseName').text\r\n L = O.createLciaFromTemplate()\r\n \r\n lcia_ns = L.getroot().nsmap\r\n \r\n # the basic data\r\n L.El('name','common').text = P.El('baseName').text\r\n L.El('methodology').text = 'TRACI 2.0'\r\n L.El('impactCategory').text = selectFromList(impactCategories)\r\n L.El('impactIndicator').text = raw_input('Impact Indicator: ')\r\n L.El('referenceYear').text = P.El('referenceYear','common').text\r\n L.El('duration').text = 'indefinite'\r\n L.El('interventionLocation').text = 'US'\r\n L.El('impactLocation').text = 'US'\r\n\r\n L.El('typeOfDataSet').text = selectFromList(indicatorTypes)\r\n L.El('normalisation').text= 'false'\r\n L.El('weighting').text = 'false'\r\n \r\n QR=L.El('quantitativeReference')\r\n CF=L.El('characterisationFactors')\r\n \r\n # the exchanges\r\n Xs = P.El('exchanges').getchildren()\r\n for x in Xs:\r\n flow_el = nonemap(x,'referenceToFlowDataSet')\r\n flow_uuid = flow_el.attrib['refObjectId']\r\n flow_comment = flow_el.getchildren()[0]\r\n if nonemap(x, 'exchangeDirection').text == 'Input':\r\n # Input exchange = reference flow\r\n ref_uuid = getReferenceFlow(O.findUuid(flow_uuid), nsmap = lcia_ns)\r\n RQ = ET.Element('referenceQuantity',\r\n attrib = {'type':'flow property data set',\r\n 'refObjectId':ref_uuid,\r\n 'uri':'../flowproperties/{0}.xml'.format(ref_uuid)},\r\n nsmap = lcia_ns)\r\n RQ.append(flow_comment)\r\n QR.append(RQ)\r\n print 'reference flow: {0} {1}'.format(ref_uuid,flow_comment.text)\r\n \r\n else:\r\n # Output exchange = new factor\r\n mean_value = 1 / float(nonemap(x, 'resultingAmount').text) # note inversion!\r\n F = ET.Element('factor',nsmap = lcia_ns)\r\n FL = ET.Element('referenceToFlowDataSet', \r\n attrib={'type':'flow data set',\r\n 'refObjectId':flow_uuid,\r\n 'uri':'../flows/{0}.xml'.format(flow_uuid)},\r\n nsmap = lcia_ns)\r\n FL.append(flow_comment)\r\n F.append(FL)\r\n ET.SubElement(F, 'exchangeDirection',nsmap = lcia_ns).text='Output'\r\n ET.SubElement(F, 'meanValue', nsmap = lcia_ns).text = str( mean_value ) \r\n CF.append(F)\r\n print 'exchange: {0} {1}'.format(flow_uuid,flow_comment.text)\r\n\r\n return L", "title": "" }, { "docid": "b0f2020c341a65e6214adebd68e7903e", "score": "0.56281185", "text": "def generate_and_save_all_interpretations_for_model(pci_obj, X, y, mdl, mdl_name):\n \n ## Generate dataset-level interpretation for Logistic Regression model\n print('Dataset-level interpretation generation is in process for ' + mdl_name + '.')\n dataset_level_interpretation = pci_obj.dataset_level_interpretation(X, y, mdl)\n np.savetxt('./result/' + 'ACES_dataset_level_interpretation_' + mdl_name + '.txt', dataset_level_interpretation, fmt='%1.8e')\n print('Dataset-level interpretation generation is finished for' + mdl_name + '.')\n \n \n ## Generate whole dataset signed interpretation for Logistic Regression model\n print('Whole dataset signed interpretation generation is in process for ' + mdl_name + '.')\n whole_dataset_signed_interpretation = pci_obj.whole_dataset_signed_interpretation(X, y, mdl)\n np.savetxt('./result/' + 'ACES_whole_dataset_signed_interpretation_' + mdl_name + '.txt', whole_dataset_signed_interpretation, fmt='%1.8e')\n print('Whole dataset signed interpretation generation is finished for ' + mdl_name + '.')\n \n ## Generate whole dataset signed interpretation for Logistic Regression model\n print('Whole dataset unsigned interpretation generation is in process for ' + mdl_name + '.')\n whole_dataset_unsigned_interpretation = pci_obj.whole_dataset_unsigned_interpretation(X, y, mdl)\n np.savetxt('./result/' + 'ACES_whole_dataset_unsigned_interpretation_' + mdl_name + '.txt', whole_dataset_unsigned_interpretation, fmt='%1.8e')\n print('Whole dataset unsigned interpretation generation is finished for ' + mdl_name + '.')", "title": "" }, { "docid": "95c5748803c3d19cc5d921a875e0b086", "score": "0.5581633", "text": "def convert(prototxt_path, caffemodel_path, phase='test', output_model_name='freeze_graph_model.pb'):\n assert_exist_files(prototxt_path, caffemodel_path)\n\n data_output_path = os.path.join(os.path.dirname(prototxt_path), 'output.mat')\n code_output_path = os.path.join(os.path.dirname(prototxt_path), 'output.py')\n standalone_output_path = os.path.join(os.path.dirname(prototxt_path), output_model_name)\n\n try:\n with tf.Session() as sess:\n transformer = TensorFlowTransformer(prototxt_path, caffemodel_path, phase=phase)\n print_stderr('Converting data...')\n if data_output_path is not None:\n data = transformer.transform_data()\n print_stderr('Saving data...')\n with open(data_output_path, 'wb') as data_out:\n np.save(data_out, data)\n if code_output_path is not None:\n print_stderr('Saving source...')\n with open(code_output_path, 'wb') as src_out:\n src_out.write(transformer.transform_source())\n\n if standalone_output_path:\n filename, _ = os.path.splitext(os.path.basename(standalone_output_path))\n temp_folder = os.path.join(os.path.dirname(standalone_output_path), 'tmp')\n\n if not os.path.exists(temp_folder):\n os.makedirs(temp_folder)\n\n if data_output_path is None:\n data = transformer.transform_data()\n print_stderr('Saving data...')\n data_output_path = os.path.join(temp_folder, filename) + '.npy'\n with open(data_output_path, 'wb') as data_out:\n np.save(data_out, data)\n\n if code_output_path is None:\n print_stderr('Saving source...')\n code_output_path = os.path.join(temp_folder, filename) + '.py'\n with open(code_output_path, 'wb') as src_out:\n src_out.write(transformer.transform_source())\n\n checkpoint_path = os.path.join(temp_folder, filename + '.ckpt')\n graph_name = os.path.basename(standalone_output_path)\n graph_folder = os.path.dirname(standalone_output_path)\n input_node = transformer.graph.nodes[0].name\n output_node = transformer.graph.nodes[-1].name\n tensor_shape = transformer.graph.get_node(input_node).output_shape\n tensor_shape_list = [tensor_shape.batch_size, tensor_shape.height, tensor_shape.width, tensor_shape.channels]\n\n sys.path.append(os.path.dirname(code_output_path))\n module = os.path.splitext(os.path.basename(code_output_path))[0]\n class_name = transformer.graph.name\n KaffeNet = getattr(__import__(module), class_name)\n\n data_placeholder = tf.placeholder(tf.float32, tensor_shape_list, name=input_node)\n net = KaffeNet({input_node: data_placeholder})\n\n # load weights stored in numpy format\n net.load(data_output_path, sess)\n\n print_stderr('Saving checkpoint...')\n\n saver = tf.train.Saver()\n saver.save(sess, checkpoint_path)\n\n print_stderr('Saving graph definition as protobuf...')\n tf.train.write_graph(sess.graph.as_graph_def(), graph_folder, graph_name, as_text=False)\n\n input_graph_path = standalone_output_path\n input_saver_def_path = \"\"\n input_binary = True\n input_checkpoint_path = checkpoint_path\n output_node_names = output_node\n restore_op_name = 'save/restore_all'\n filename_tensor_name = 'save/Const:0'\n output_graph_path = standalone_output_path\n clear_devices = True\n\n print_stderr('Saving standalone model...')\n freeze_graph(input_graph_path, input_saver_def_path,\n input_binary, input_checkpoint_path,\n output_node_names, restore_op_name,\n filename_tensor_name, output_graph_path,\n clear_devices, '')\n\n shutil.rmtree(temp_folder)\n\n print_stderr('Done.')\n except KaffeError as err:\n fatal_error('Error encountered: {}'.format(err))", "title": "" }, { "docid": "870fd0325523f72544b469d902b7c2cc", "score": "0.5544181", "text": "def convert_odml():\n f_in = join(ROOT, \"tmp.xml\")\n f_out = join(ROOT, \"tmpout.xml\")\n\n o_ver_conv = VersionConverter(f_in)\n\n # either write to file\n o_ver_conv.write_to_file(f_out)\n\n # or pass it to the python xml parser\n doc = ODMLReader(parser='XML').from_string(str(o_ver_conv))\n doc.pprint()", "title": "" }, { "docid": "5d270ed9448ab6ff02368268de72a91b", "score": "0.5542094", "text": "def compare_lp_files(self, filename, ignored=None, my_om=None):\n if my_om is None:\n om = self.get_om()\n else:\n om = my_om\n tmp_filename = filename.replace('.lp', '') + '_tmp.lp'\n new_filename = ospath.join(self.tmppath, tmp_filename)\n om.write(new_filename, io_options={'symbolic_solver_labels': True})\n logging.info(\"Comparing with file: {0}\".format(filename))\n with open(ospath.join(self.tmppath, tmp_filename)) as generated_file:\n with open(ospath.join(ospath.dirname(ospath.realpath(__file__)),\n \"lp_files\",\n filename)) as expected_file:\n\n def chop_trailing_whitespace(lines):\n return [re.sub(r'\\s*$', '', ln) for ln in lines]\n\n def remove(pattern, lines):\n if not pattern:\n return lines\n return re.subn(pattern, \"\",\n \"\\n\".join(lines))[0].split(\"\\n\")\n\n expected = remove(ignored,\n chop_trailing_whitespace(\n expected_file.readlines()))\n generated = remove(ignored,\n chop_trailing_whitespace(\n generated_file.readlines()))\n\n def normalize_to_positive_results(lines):\n negative_result_indices = [\n n for n, line in enumerate(lines)\n if re.match(\"^= -\", line)]\n equation_start_indices = [\n [n for n in reversed(range(0, nri))\n if re.match('.*:$', lines[n])][0]+1\n for nri in negative_result_indices]\n for (start, end) in zip(\n equation_start_indices,\n negative_result_indices):\n for n in range(start, end):\n lines[n] = (\n '-'\n if lines[n] and lines[n][0] == '+'\n else '+'\n if lines[n]\n else lines[n]) + lines[n][1:]\n lines[end] = '= ' + lines[end][3:]\n return lines\n\n expected = normalize_to_positive_results(expected)\n generated = normalize_to_positive_results(generated)\n\n eq_(generated, expected,\n \"Failed matching expected with generated lp file:\\n\" +\n \"\\n\".join(unified_diff(expected, generated,\n fromfile=ospath.relpath(\n expected_file.name),\n tofile=ospath.basename(\n generated_file.name),\n lineterm=\"\")))", "title": "" }, { "docid": "bffa7248d281833024e2e2237090aa94", "score": "0.55059963", "text": "def model_to_owl(model, fname):\n io_class = autoclass('org.biopax.paxtools.io.SimpleIOHandler')\n io = io_class(autoclass('org.biopax.paxtools.model.BioPAXLevel').L3)\n\n try:\n fileOS = autoclass('java.io.FileOutputStream')(fname)\n except JavaException:\n logger.error('Could not open data file %s' % fname)\n return\n l3_factory = autoclass('org.biopax.paxtools.model.BioPAXLevel').L3.getDefaultFactory()\n model_out = l3_factory.createModel()\n for r in model.getObjects().toArray():\n model_out.add(r)\n io.convertToOWL(model_out, fileOS)\n\n fileOS.close()", "title": "" }, { "docid": "016e6167d18373b2c352dac2e0aac10f", "score": "0.54671896", "text": "def run(in_obo_filename=IN_OBO_FILENAME, in_pkl_filename=IN_PKL_FILENAME, in_tsv_filename=IN_TSV_FILELANE,\n in_monomers_filename=IN_MONOMERS_FILENAME,\n max_num_proteins=None,\n out_pickle_filename=OUT_PICKLE_FILENAME, out_pickle_filename_2=OUT_PICKLE_FILENAME_2,\n out_tsv_filename=OUT_TSV_FILENAME, out_fasta_filename=OUT_FASTA_FILENAME,\n out_fig_filename=OUT_FIG_FILENAME, out_structure_dirname=OUT_STRUCTURE_DIRNAME,\n out_viz_dirname=OUT_VIZ_DIRNAME):\n # get the PRO ontology and extract the modified proteins from the ontology\n # proteins = get_pro_from_obo(obo_filename=in_obo_filename, pkl_filename=in_pkl_filename, max_num_proteins=max_num_proteins)\n proteins = get_pro_from_tsv(in_tsv_filename, max_num_proteins=max_num_proteins)\n\n # parse the modified proteins and retrieve their sequences\n if not os.path.isfile(out_pickle_filename):\n # parse the modified proteins and retrieve their sequences\n parsed_proteins = []\n for i_protein, protein in enumerate(proteins):\n if i_protein % 100 == 0:\n print('Parsing protein {} of {}'.format(i_protein + 1, len(proteins)))\n parsed_proteins.append(parse_protein(protein))\n\n # save the parsed proteins in pickle format\n with open(out_pickle_filename, 'wb') as file:\n pickle.dump(parsed_proteins, file)\n else:\n # load saved parsed proteins in pickle format\n with open(out_pickle_filename, 'rb') as file:\n parsed_proteins = pickle.load(file)\n\n # read list of monomers\n monomers = {}\n with open(in_monomers_filename, 'r') as file:\n reader = csv.DictReader(file, dialect='excel')\n for row in reader:\n monomers[row['PRO id']] = {\n 'mod': bpforms.protein_alphabet.monomers.get(row['BpForms code'], None),\n 'origin': [],\n }\n if row['Base monomer']:\n monomers[row['PRO id']]['origin'] = row['Base monomer'].split(', ')\n\n # generate list of modified monomeric forms\n for protein in parsed_proteins:\n for modification in protein['modifications']:\n if modification['monomer'] not in monomers:\n monomers[modification['monomer']] = {\n 'mod': None,\n 'origin': [],\n }\n\n # print list of unmapped monomers\n unmapped_monomers = []\n for monomer, code in monomers.items():\n if not code['mod']:\n unmapped_monomers.append(monomer)\n unmapped_monomers.sort()\n if unmapped_monomers:\n print('Several PRO monomeric forms have not been mapped to BpForms monomeric forms:\\n {}'.format(\n '\\n '.join(unmapped_monomers)))\n\n # check for inconsistencies between residue and modified monomeric form\n monomer_codes = {}\n for code, monomer in bpforms.protein_alphabet.monomers.items():\n monomer_codes[monomer] = code\n\n for protein in parsed_proteins:\n for modification in protein.get('modifications', []):\n if modification['residue'] and modification['monomer']:\n monomer = monomers.get(modification['monomer'], None)\n if (monomer['mod'] and monomer['mod'].get_canonical_code(monomer_codes) != modification['residue']) \\\n or (monomer['origin'] and modification['residue'] not in monomer['origin']):\n codes = set(monomer['origin'])\n if monomer['mod']:\n codes.add(monomer['mod'].get_canonical_code(monomer_codes))\n msg = 'Modified monomeric form {} potentially inconsistent with residue {} != {}'.format(\n modification['monomer'], modification['residue'],\n ', '.join(codes))\n print(protein['id'] + ': ' + msg)\n\n # generate BpForms for each protein\n if not os.path.isdir(out_structure_dirname):\n os.mkdir(out_structure_dirname)\n\n if not os.path.isdir(out_viz_dirname):\n os.mkdir(out_viz_dirname)\n\n if not os.path.isfile(out_pickle_filename_2):\n for i_protein, protein in enumerate(parsed_proteins):\n if i_protein % 100 == 0:\n print('Generating BpForms {} of {}'.format(i_protein + 1, len(parsed_proteins)))\n\n protein['modified_seq'] = None\n if not protein['uniprot_id']:\n continue\n if not protein['seq']:\n continue\n if protein['pro_errors']:\n continue\n\n processed_form = gen_bpform(protein, monomers, monomer_codes, apply_modifications=False)\n protein['processed_seq'] = str(processed_form)\n if not processed_form.validate():\n processed_formula = processed_form.get_formula()\n protein['processed_formula'] = str(processed_formula)\n protein['processed_mol_wt'] = processed_form.get_mol_wt()\n protein['processed_charge'] = processed_form.get_charge()\n\n if not protein['modifications']:\n continue\n\n modified_form = gen_bpform(protein, monomers, monomer_codes, include_annotations=False)\n protein['modified_seq'] = str(modified_form)\n\n modified_form = gen_bpform(protein, monomers, monomer_codes)\n if not modified_form.validate():\n modified_formula = modified_form.get_formula()\n protein['modified_full_seq'] = str(modified_form)\n protein['modified_formula'] = str(modified_formula)\n protein['modified_mol_wt'] = modified_form.get_mol_wt()\n protein['modified_charge'] = modified_form.get_charge()\n\n protein['modifications_formula'] = str(modified_formula - processed_formula)\n protein['modifications_mol_wt'] = protein['modified_mol_wt'] - protein['processed_mol_wt']\n protein['modifications_charge'] = protein['modified_charge'] - protein['processed_charge']\n\n # with open(os.path.join(out_structure_dirname, protein['id'] + '.cml'), 'w') as file:\n # file.write(modified_form.export('cml'))\n\n form = gen_bpform(protein, monomers, monomer_codes,\n apply_processing=False, include_annotations=True)\n seq_features = []\n if protein['processing']:\n seq_features.append({\n 'label': 'Processed',\n 'color': '#cccccc',\n 'positions': [],\n })\n last = 0\n for p in protein['processing']:\n seq_features[0]['positions'].append([last + 1, p['start'] - 1])\n last = p['end']\n seq_features[0]['positions'].append([\n protein['processing'][-1]['end'] + 1,\n len(form.seq),\n ])\n\n if protein['processing'][0]['start'] == 1:\n seq_features[0]['positions'].pop(0)\n if protein['processing'][-1]['end'] == len(form.seq):\n seq_features[0]['positions'].pop(len(seq_features[0]['positions']) - 1)\n\n with open(os.path.join(out_viz_dirname, protein['id'] + '.svg'), 'w') as file:\n file.write(form.get_genomic_image(seq_features, width=910))\n\n if modified_form.get_canonical_seq(monomer_codes) != protein['processed_seq']:\n protein['pro_errors'].append('Modified sequence for {} not compatible with the processed sequence'.format(\n protein['id']))\n\n # save the parsed proteins in pickle format\n with open(out_pickle_filename_2, 'wb') as file:\n pickle.dump(parsed_proteins, file)\n else:\n with open(out_pickle_filename_2, 'rb') as file:\n parsed_proteins = pickle.load(file)\n\n # save the proteoforms in TSV format\n with open(out_tsv_filename, 'w') as file:\n writer = csv.writer(file, dialect='excel-tab')\n writer.writerow(['PRO id', 'UniProt id', 'Organism',\n 'Unmodified sequence (IUBMB)',\n 'Processing', 'Deletions', 'Processsed sequence (IUBMB)', 'Processsed formula',\n 'Processsed molecular weight', 'Processsed charge',\n 'Modifications', 'Crosslinks', 'Modified sequence (abbreviated BpForms)', 'Modified sequence (BpForms)',\n 'Is modified sequence concrete', 'Modified formula', 'Modified molecular weight', 'Modified charge',\n 'Modifications formula', 'Modifications molecular weight', 'Modifications charge',\n 'PRO issues', 'Monomeric form issues'])\n\n for parsed_protein in parsed_proteins:\n if parsed_protein.get('pro_errors', None):\n pro_errors = '. '.join(parsed_protein['pro_errors']) + '.'\n else:\n pro_errors = None\n\n if parsed_protein.get('modified_errors', None):\n modified_errors = '. '.join(parsed_protein['modified_errors']) + '.'\n else:\n modified_errors = None\n\n writer.writerow([\n parsed_protein['id'],\n parsed_protein.get('uniprot_id', None),\n parsed_protein.get('organism', None),\n parsed_protein.get('seq', None),\n ', '.join('{}-{}'.format(p['start'], p['end']) for p in parsed_protein['processing']),\n ', '.join('{}-{}'.format(deletion[0], deletion[1]) for deletion in parsed_protein.get('deletions', [])),\n parsed_protein.get('processed_seq', None),\n parsed_protein.get('processed_formula', None),\n parsed_protein.get('processed_mol_wt', None),\n parsed_protein.get('processed_charge', None),\n ', '.join('{} --> {} ({})'.format(m['residue'] or '?', m['monomer'], ', '.join(str(p) for p in m['positions']))\n for m in parsed_protein['modifications']),\n ', '.join('{}{}-{}{}'.format(xlink[0][1], xlink[0][0], xlink[1][1], xlink[1][0])\n for xlink in parsed_protein.get('crosslinks', [])),\n parsed_protein.get('modified_seq', None),\n parsed_protein.get('modified_full_seq', None),\n parsed_protein.get('modified_concrete', False),\n parsed_protein.get('modified_formula', None),\n parsed_protein.get('modified_mol_wt', None),\n parsed_protein.get('modified_charge', None),\n parsed_protein.get('modifications_formula', None),\n parsed_protein.get('modifications_mol_wt', None),\n parsed_protein.get('modifications_charge', None),\n pro_errors,\n modified_errors,\n ])\n\n # save the proteoforms in FASTA format\n seqs = (SeqRecord(id='{} | {}'.format(protein['id'], protein['uniprot_id']),\n seq=Seq(protein['modified_seq']),\n description='')\n for protein in parsed_proteins\n if protein['modified_seq'])\n SeqIO.write(seqs, out_fasta_filename, \"fasta\")\n\n # analyze frequency of modifications\n plot_modifications(parsed_proteins, fig_filename=out_fig_filename)\n\n # return proteins\n return proteins, parsed_proteins", "title": "" }, { "docid": "776ae1cf149e78ae0a4114b21438d138", "score": "0.54315245", "text": "def build_LM(in_file):\r\n print 'building language models...'\r\n # This is an empty method\r\n # Pls implement your code in below\r\n\r\n # Open in_file and split into a list of sentences\r\n input_file = open(in_file, 'r')\r\n train_input_list = input_file.read().split(\"\\r\\n\")\r\n\r\n # Set up dictionaries to store the 4grams generated from input.train\r\n # ngram_count stores the total number of ngrams each language has generated\r\n four_gram_table = {}\r\n ngram_count = {'malaysian': 0, 'indonesian': 0, 'tamil': 0}\r\n\r\n '''\r\n NGRAM GENERATION\r\n Multiple for loops through each sentence and then each character for\r\n 4gram generation. Each sentence is padded with 3 'START's and 3 'END's\r\n '''\r\n for sentence in train_input_list:\r\n sentence_parts = sentence.split(None, 1)\r\n # To handle possible empty strings or weird strings in training input\r\n if (len(sentence_parts) != 2):\r\n break\r\n language = sentence_parts[0]\r\n sentence_body = sentence_parts[1]\r\n char_list = list(sentence_body)\r\n for index in range(3):\r\n char_list.insert(0, 'START')\r\n char_list.append('END')\r\n for index in range(len(char_list) - 4):\r\n four_gram = (char_list[index], char_list[index + 1],\r\n char_list[index + 2], char_list[index + 3])\r\n if four_gram in four_gram_table:\r\n four_gram_table[four_gram][language] += 1\r\n ngram_count[language] += 1\r\n else:\r\n # Add-one smoothing for new ngrams\r\n four_gram_table[four_gram] = {'malaysian': 1, 'indonesian': 1,\r\n 'tamil': 1}\r\n for value in ngram_count.itervalues():\r\n value += 1\r\n four_gram_table[four_gram][language] += 1\r\n ngram_count[language] += 1\r\n return [four_gram_table, ngram_count]\r\n\r\n input_file.close()", "title": "" }, { "docid": "8776c3c0636ee64e48b9236afde29d96", "score": "0.54051876", "text": "def run(args: argparse.Namespace) -> None:\n assert WaymoDataFileReader is not None, (\n \"Please install the requirements in scripts/optional.txt to use\"\n \"Waymo conversion.\"\n )\n if not os.path.exists(args.output):\n os.makedirs(args.output)\n result = from_waymo(\n args.input,\n args.output,\n args.save_images,\n args.use_lidar_labels,\n args.nproc,\n )\n save(os.path.join(args.output, \"scalabel_anns.json\"), result)", "title": "" }, { "docid": "89aa08012b425e9517160341b3b4c2b8", "score": "0.5397848", "text": "def __init__(self,pedfname = None,title='title',outfname='rgMakeGenModel.phe',\n xtradir=\"/foo/bar\", linkage=True, logf = None, model=additive,\n labels=['FounderNonAff','FounderAff','OffspringNonAff','OffspringAff']):\n ehet = '1'\n ehom1 = '0'\n ehom2 = '2'\n missval = '.'\n missing = ['N','0','.','-']\n if model == additive: # we have ehom2 = 2 major alleles\n modeldict = {ehom1:'2',ehet:'1',ehom2:'0',missval:missval}\n elif model == zadditive:\n modeldict = {ehom1:'1',ehet:'0',ehom2:'-1',missval:missval}\n elif model == dominant:\n modeldict = {ehom1:'1',ehet:'1',ehom2:'0',missval:missval}\n else:\n modeldict = {ehom1:'2',ehet:'0',ehom2:'0',missval:missval}\n # model code translation dicts\n\n swapdict = {ehom1:ehom2,ehom2:ehom1,ehet:ehet,missval:missval} # if initial ref allele was wrong, use these to swap\n mdict = dict(zip(missing,missing))\n pedf = '%s.ped' % pedfname\n f = file(pedf,'r')\n if linkage: # read map file\n map = readMap(pedfname)\n rslist = [x[1] for x in map] # get rs numbers\n else:\n head = f.next().strip()\n rslist = head.split()\n nrs = len(rslist) # number of markers\n elen = 2*nrs + 6 # expected # elements on each line\n logf.write('%s %s: found %d for nrs\\n' % (thisprog,timenow(),nrs))\n gm = {}\n gm['founders'] = [] # array.array('c',[]) for x in xrange(nrs)] # marker rows, subject cols\n gm['offspring'] = [] # [array.array('c',[]) for x in xrange(nrs)] # marker rows, subject cols\n adicts = [{} for x in xrange(nrs)] # count of alleles in a dict for each marker\n refallele = [None for x in xrange(nrs)] # list of first observed alleles\n nsubj = 0\n indiv = {'founders':[],'offspring':[]}\n for lnum,l in enumerate(f):\n ll = l.strip().split()\n if (lnum+1) % 200 == 0:\n logf.write('%s %s: Processing line %d\\n' % (thisprog, timenow(),lnum+1))\n if len(ll) < elen: # ? short ?\n logf.write('%s %s: Line %d is %d long, expected %d\\n' % (thisprog, timenow(),\n lnum,len(ll),elen))\n else:\n garray = array.array('c',[missval,]*nrs)\n nsubj += 1\n sid = '%s_%s' % (ll[0],ll[1])\n if sid == '1_1': # eesh\n sid = '%d_%d' % (nsubj,nsubj)\n isFounder = isOff = False\n status = labels[0] # founder unaff\n if ll[2] <> '0' and ll[3] <> '0': # has parent ids\n iclass = 'offspring'\n status = labels[2] # unaffected offspring\n if ll[5] == '2':\n status = labels[3] # affected offspring\n else:\n iclass = 'founders'\n if ll[5] == '2':\n status = labels[1] #change from unaff to aff founder label\n gender = 'M'\n if ll[4] == '2':\n gender = 'F'\n ped = ll[:6]\n indiv[iclass].append(ped) # for row wise output\n for snp in xrange(nrs):\n pos = 2*snp + 6 # first\n g1,g2 = ll[pos],ll[pos+1] # pair of genos\n if mdict.get(g1,None) or mdict.get(g2,None): # one or both missing\n esnp = missval # missing value\n else:\n if not refallele[snp]:\n refallele[snp] = g1 # first one we saw!\n for g in (g1,g2):\n n = adicts[snp].get(g,0)\n n += 1\n adicts[snp][g] = n\n if g1 == g2: # hom\n if g1 == refallele[snp]:\n esnp = ehom2 # 2 copies of current reference allele\n else:\n esnp = ehom1 # no copies\n else:\n esnp = ehet # het - always has one copy of reference allele\n garray[snp] = esnp\n gm[iclass].append(garray) # append genos for this new subject\n for ek in gm.keys():\n lek = len(gm[ek])\n if len(gm[ek]) > 0:\n lek0 = len(gm[ek][0])\n s = 'for %s, have %d subjects with %d markers' % (ek,lek,lek0)\n print s\n logf.write(s)\n for x in range(lek):\n if len(gm[ek][x]) <> lek0:\n s = 'for row %d, len = %d, not %d' % (x, len(gm[ek][x]),lek0)\n print s\n logf.write(s)\n logf.write('%s %s: Now checking major allele assignment and fixing as needed\\n' % (thisprog,timenow()))\n for iclass in gm.keys():\n for subject in xrange(len(gm[iclass])): # for each subject\n for snp in xrange(nrs): # now check to see if reference = major allele\n major = majAllele(adicts[snp])\n if major <> refallele[snp]: # either None or we need to change all the codes\n if major <> None:\n gm[iclass][subject][snp] = swapdict[gm[iclass][subject][snp]]\n for iclass in gm.keys():\n for subject in xrange(len(gm[iclass])): # now convert to genetic model wanted\n gm[iclass][subject] = [modeldict[x] for x in gm[iclass][subject]] # translate to model\n self.gm = gm\n self.model = model\n self.indiv = indiv\n self.nrs = nrs\n self.nsubj = nsubj\n self.logf = logf\n self.basename = title\n self.outfname = outfname\n self.rslist = rslist\n pedhead = 'famid\\tiid\\tfid\\tmid\\tgender\\taffection\\t'\n rs = '\\t'.join(rslist)\n self.outhead = '%s%s\\n' % (pedhead,rs)", "title": "" }, { "docid": "ce63391b0ba53484bcdbfa3c177f5093", "score": "0.5392235", "text": "def test_LM(in_file, out_file, LM):\n print(\"testing language models...\")\n\n # Parse inputs\n data = read_by_line(in_file)\n model = LM[0]\n languages = LM[1]\n\n # Create an empty probability reference sheet\n basic_probability_ref = dict()\n for l in languages:\n basic_probability_ref[l] = np.float64(1)\n\n # Clear file of any results, to prepare the output file for eval.py\n open(out_file, 'w').close()\n \n with open(out_file, 'a') as f:\n # Process each sentence and make a guess\n for sentence in data:\n p_ref = basic_probability_ref.copy()\n tokens = tokenise(sentence, token_size)\n num_undetected = 0\n\n # Multiple token probabilities for each token\n for t in tokens:\n if t in model:\n # Multiply the respective probability of each language\n counts = model[t]\n for l in languages:\n p_ref[l] += counts[l]\n else:\n num_undetected += 1\n\n # Find the best prediction using the maximum product\n if num_undetected / len(tokens) > other_threshold:\n prediction = other_language\n else:\n prediction = max(p_ref.items(), key=operator.itemgetter(1))[0]\n\n # Write the prediction to the output file\n f.write(prediction + \" \" + sentence + \"\\n\")", "title": "" }, { "docid": "11937e2179eb9e5be7829ede94d45adb", "score": "0.5387432", "text": "def test_issue4707():\n nlp = English()\n nlp.add_pipe(\"sentencizer\")\n nlp.add_pipe(\"entity_ruler\")\n assert nlp.pipe_names == [\"sentencizer\", \"entity_ruler\"]\n exclude = [\"tokenizer\", \"sentencizer\"]\n with make_tempdir() as tmpdir:\n nlp.to_disk(tmpdir, exclude=exclude)\n new_nlp = load_model_from_path(tmpdir, disable=exclude)\n assert \"sentencizer\" not in new_nlp.pipe_names\n assert \"entity_ruler\" in new_nlp.pipe_names", "title": "" }, { "docid": "fe5676fbca6769a80c88f1d8e1109454", "score": "0.5377364", "text": "def main(argv):\n parser = argparse.ArgumentParser(\n description='Create all possible determinizations of this domain.')\n parser.add_argument('-d', '--domain', required=True)\n parser.add_argument('-o', '--output', required=True) \n args = parser.parse_args()\n domain_file_name = args.domain\n output_file_name = args.output\n \n # Reading domain file.\n domain_str = ''\n try:\n with open(domain_file_name, 'r') as domain_file:\n for line in domain_file:\n if line.startswith(';;') or line.isspace():\n continue;\n domain_str += line\n except IOError:\n print \"Could not read file:\", domain_file_name\n sys.exit(-1) \n \n # Parsing the domain tree.\n domain_tree = ppddl_util.parse_sexp(domain_str)\n all_prob_effects_list = []\n \n # Getting all possible probabilistic effects.\n ppddl_util.get_all_probabilistic_effects(domain_tree, all_prob_effects_list)\n \n # Getting the most likely outcome determinization.\n mlo_determinization = []\n for probabilistic_effect_info in all_prob_effects_list:\n mlo_determinization.append(\n ppddl_util.get_mlo_determinization_effect(probabilistic_effect_info))\n \n # Writing the MLO determinization to a PPDDL file.\n determinization_ppddl_tree = copy.deepcopy(domain_tree)\n description_text = \"\"\n for effect_info in mlo_determinization:\n description_text += \"%s %d\\n\" % (effect_info[0], effect_info[1][0])\n ppddl_util.determinize_tree(mlo_determinization, determinization_ppddl_tree)\n ppddl_util.clean_up_tree(determinization_ppddl_tree)\n f = open('%s_mlo_det.pddl' % (output_file_name), 'w')\n fd = open('%s_mlo_det.desc' % (output_file_name), 'w')\n f.write(ppddl_util.make_str(determinization_ppddl_tree[0]))\n fd.write(description_text)\n f.close()\n fd.close()\n \n print 'Created MLO determinization for %s' % domain_file_name", "title": "" }, { "docid": "2b6f6722ce9ac5c08f4612e25ef7aec0", "score": "0.5376005", "text": "def make_model(pbm):\n for stmt in pbm.statements:\n try:\n # Skip statements with no subject\n if stmt.agent_list()[0] is None and \\\n not isinstance(stmt, Conversion):\n continue\n # Assemble statements\n if isinstance(stmt, Modification):\n pbm._assemble_modification(stmt)\n elif isinstance(stmt, RegulateActivity):\n pbm._assemble_regulate_activity(stmt)\n elif isinstance(stmt, RegulateAmount):\n pbm._assemble_regulate_amount(stmt)\n elif isinstance(stmt, Gef):\n pbm._assemble_gef(stmt)\n elif isinstance(stmt, Gap):\n pbm._assemble_gap(stmt)\n elif isinstance(stmt, ActiveForm):\n pbm._assemble_active_form(stmt)\n elif isinstance(stmt, Complex):\n pbm._assemble_complex(stmt)\n elif isinstance(stmt, Conversion):\n pbm._assemble_conversion(stmt)\n elif isinstance(stmt, Autophosphorylation):\n pbm._assemble_autophosphorylation(stmt)\n elif isinstance(stmt, Transphosphorylation):\n pbm._assemble_transphosphorylation(stmt)\n else:\n logger.info('Unhandled statement: %s' % stmt)\n except Exception:\n pass\n return pbm.model", "title": "" }, { "docid": "9045c7c73ca55f814b38834d2149df8b", "score": "0.5342216", "text": "def try_load_model(self):\r\n filename = 'constraints_problem_size_{0}'.format(self.get_real_n())\r\n path = '../cache_constraints/'\r\n\r\n solution_is_loop = self.sol_is_loop()\r\n\r\n if solution_is_loop:\r\n filename += '_loop'\r\n path_filename = path + filename + '.lp'\r\n\r\n model_reader = ModelReader()\r\n\r\n self.outer_model_interface.check_dir(path)\r\n\r\n try:\r\n # Load the constraints\r\n self.docplex_model = model_reader.read_model(path_filename)\r\n except:\r\n # If they are not cached, create them\r\n print(\"\\nFile {} not found :(\\nI'm gonna create it! :D\".format(path_filename))\r\n self.docplex_model = self.generate_general_constraints(path, filename)", "title": "" }, { "docid": "3c879101600ab140261ce79e742c8479", "score": "0.5335524", "text": "def generateModel(self):\n\t\tglobal startLoadModel, doneLoadModel\n\t\tprint(\"model type:\" + self.modeltype)\n\t\tstartLoadModel = time.time()\n\t\tif not os.path.exists(self.dictionaryfile):\n\t\t\tprint(\"creating dictionary \" + self.dictionaryfile)\n\t\t\tself.dictionary = self.makeDictionaryMemoryFriendly(self.corpusfile)\n\t\telse:\n\t\t\tprint(\"reading dictionary \" + self.dictionaryfile)\n\t\t\tself.dictionary = self.loadDictionary(self.dictionaryfile)\n\t\tif not os.path.exists(self.corpusmatrixfile):\n\t\t\tprint(\"serializing corpus \" + self.corpusmatrixfile)\n\t\t\tself.serializeCorpusMemoryFriendly(self.corpusfile, self.dictionary)\n\t\tif self.modeltype == \"LDA\":\n\t\t\tprint(\"creating lda model with \" + str(self.topics) + \" topics.\")\n\t\t\tif self.online:\n\t\t\t\tprint(\"Running in online mode with chunksize: \" + str(self.chunksize))\n\t\t\telse:\n\t\t\t\tprint(\"Running in offline mode with \" + str(self.passes) + \" passes\")\n\t\t\tif self.distributed:\n\t\t\t\tprint(\"Running in distributed mode\")\n\t\t\tself.generateLDAModel(self.topics, self.passes, self.distributed)\n\t\telif self.modeltype == \"LSI\":\n\t\t\tprint(\"creating lsi model\")\n\t\t\tself.generateLSIModel(topics)\n\t\telif self.modeltype == \"HDP\":\n\t\t\tprint(\"creating hdp model\")\n\t\t\tself.generateHDPModel()\n\t\tdoneLoadModel = time.time()\n\t\tprint(\"Time to generate model:\" + str(doneLoadModel - startLoadModel))", "title": "" }, { "docid": "91c5431c7473abefa61a82391d7c11c8", "score": "0.5323186", "text": "def test_LM(in_file, out_file, LM):\r\n print \"testing language models...\"\r\n # This is an empty method\r\n # Pls implement your code in below\r\n\r\n # Setting up data provided from given LM\r\n four_gram_table = LM[0]\r\n ngram_count = LM[1]\r\n\r\n # Open in_file and split into a list of sentences\r\n # Open out_file for writing predictions\r\n input_file = open(in_file, 'r')\r\n test_input_list = input_file.read().split(\"\\r\\n\")\r\n output_file = open(out_file, 'w')\r\n\r\n for sentence in test_input_list:\r\n # To handle possible empty strings or invalid input\r\n if sentence == \"\":\r\n break\r\n # Ngram probability score for current sentence\r\n ngram_score = {'malaysian': 0, 'indonesian': 0, 'tamil': 0}\r\n # Counters to keep track of number of unencountered ngrams generated by\r\n # the current sentence.\r\n invalid_ngram_count = 0\r\n total_ngram_count = 0\r\n # Sentence is padded with 3 'START's and 3 'END's\r\n char_list = list(sentence)\r\n for index in range(3):\r\n char_list.insert(0, 'START')\r\n char_list.append('END')\r\n for index in range(len(char_list) - 4):\r\n four_gram = (char_list[index], char_list[index + 1],\r\n char_list[index + 2], char_list[index + 3])\r\n # Log10 is used to prevent underflow of individual ngram_scores\r\n if four_gram in four_gram_table:\r\n for language, count in four_gram_table[four_gram].iteritems():\r\n ngram_score[language] += math.log10(count/ngram_count[language])\r\n else:\r\n invalid_ngram_count += 1\r\n total_ngram_count += 1\r\n # invalid_ngram_ratio represents the proportion of unencountered ngrams\r\n # out of all ngrams generated from current sentence\r\n invalid_ngram_ratio = invalid_ngram_count / total_ngram_count\r\n # The set threshold of 0.6 indicates that the sentence might not be a\r\n # valid language in the given LM\r\n if max(ngram_score.itervalues()) is 0 or invalid_ngram_ratio > 0.6:\r\n predicted_language = 'other'\r\n else:\r\n predicted_language = max(ngram_score.iterkeys(), key=(lambda language: ngram_score[language]))\r\n output_file.write(predicted_language + \" \" + sentence + \"\\n\")\r\n input_file.close\r\n output_file.close", "title": "" }, { "docid": "3c8230e8f56aabea2a2d18b33a22dab9", "score": "0.5304721", "text": "def generate(args):\n if args.cuda and torch.cuda.is_available():\n device = 0\n use_cuda = True\n elif args.cuda and not torch.cuda.is_available():\n print(\"You do not have CUDA, turning cuda off\")\n device = -1 \n use_cuda = False\n else:\n device = -1\n use_cuda=False\n\n #Load the vocab\n vocab = du.load_vocab(args.vocab)\n eos_id = vocab.stoi[EOS_TOK]\n pad_id = vocab.stoi[PAD_TOK]\n\n if args.ranking: # default is HARD one, the 'Inverse Narrative Cloze' in the paper\n dataset = du.NarrativeClozeDataset(args.valid_data, vocab, src_seq_length=MAX_EVAL_SEQ_LEN, min_seq_length=MIN_EVAL_SEQ_LEN, LM=False)\n # Batch size during decoding is set to 1\n batches = BatchIter(dataset, 1, sort_key=lambda x:len(x.actual), train=False, device=-1)\n else:\n dataset = du.SentenceDataset(args.valid_data, vocab, src_seq_length=MAX_EVAL_SEQ_LEN, min_seq_length=MIN_EVAL_SEQ_LEN, add_eos=False) #put in filter pred later\n # Batch size during decoding is set to 1\n batches = BatchIter(dataset, args.batch_size, sort_key=lambda x:len(x.text), train=False, device=-1)\n\n data_len = len(dataset)\n\n #Create the model\n with open(args.load, 'rb') as fi:\n if not use_cuda:\n model = torch.load(fi, map_location=lambda storage, loc : storage)\n else:\n model = torch.load(fi, map_location=torch.device('cuda'))\n\n if not hasattr(model.latent_root, 'nohier'):\n model.latent_root.set_nohier(args.nohier) #for backwards compatibility\n\n model.decoder.eval()\n model.set_use_cuda(use_cuda)\n\n #For reconstruction\n if args.perplexity:\n loss = calc_perplexity(args, model, batches, vocab, data_len)\n print(\"Loss = {}\".format(loss))\n elif args.schema:\n generate_from_seed(args, model, batches, vocab, data_len)\n elif args.ranking:\n do_ranking(args, model, batches, vocab, data_len, use_cuda)\n else:\n# sample_outputs(model, vocab)\n reconstruct(args, model, batches, vocab)", "title": "" }, { "docid": "6b20588231ce0fcb6d4f7434d73e68c3", "score": "0.52846843", "text": "def generateModel(modelName, pack):\n\n (params, run, cost, optim_date, var_set) = pack\n\n with open('%s_optimized.mo' % modelName, 'w') as file:\n header = \"\"\"model {mn}_optimized \"Generated by PostProcess/postprocess_optim.py {d} with cost {c} lowest at run {r}\"\n extends {mn} (\n \\n\"\"\".format(mn=modelName, d = optim_date, c = '%.6f' % cost, r = '%d' % run)\n footer = ');\\nend %s_optimized;\\n' % modelName\n\n param_lines = (' %s = %.6e' % param for param in params)\n # use join to avoid placing the comma after the last\n lines = ',\\n'.join(param_lines)\n\n\n file.write(header)\n file.writelines(lines)\n file.write(footer)", "title": "" }, { "docid": "4d6897ba3959406b0ee3b87b094e823e", "score": "0.5258285", "text": "def load_model(length=MAX_UTTERENCE_LENGTH):\n return torch.load(os.path.join(PATH_TO_SAVE, 'generator_{}.pt'.format(length))).to(DEVICE)", "title": "" }, { "docid": "078acbdaf7a42c95ccc2c6c684279376", "score": "0.5254087", "text": "def compile_po_file(po_path: Path) -> None:\n output_path = po_path.parent\n\n target_json = po_path.with_suffix(\".json\")\n if target_json.exists():\n target_json.unlink()\n convert_catalog_to_json(po_path, output_path, po_path.stem)\n\n target_mo = po_path.with_suffix(\".mo\")\n if target_mo.exists():\n target_mo.unlink()\n compile_to_mo(po_path)", "title": "" }, { "docid": "b3a0443fbd71bfab66c479b31441d52a", "score": "0.52386135", "text": "def test_LM(in_file, out_file, LM):\r\n print \"testing language models...\"\r\n # This is an empty method\r\n # Pls implement your code in below\r\n malay_LM = LM['malaysian']\r\n indon_LM = LM['indonesian']\r\n tamil_LM = LM['tamil']\r\n\r\n FD_in = open(in_file, 'r')\r\n FD_out = open(out_file, 'w')\r\n\r\n lines = FD_in.read()\r\n lines_array = lines.replace('\\r', ' ').split('\\n')\r\n lines_array.pop(len(lines_array) - 1)\r\n\r\n for line in lines_array:\r\n malay_count = 1\r\n indon_count = 1\r\n tamil_count = 1\r\n\r\n total_count = 0\r\n miss_count = 0\r\n\r\n four_char_array = get_four_char_array(line)\r\n\r\n for four_char in four_char_array:\r\n # key in malayLM = key in all LMs due to add one smoothing\r\n if four_char in malay_LM:\r\n malay_count += math.log10(malay_LM[four_char])\r\n indon_count += math.log10(indon_LM[four_char])\r\n tamil_count += math.log10(tamil_LM[four_char])\r\n else:\r\n miss_count += 1\r\n total_count += 1\r\n\r\n if (miss_count / total_count > 0.7):\r\n FD_out.write(\"other \" + line + \"\\n\")\r\n elif malay_count > indon_count and malay_count > tamil_count:\r\n FD_out.write(\"malaysian \" + line + \"\\n\")\r\n elif indon_count > malay_count and indon_count > tamil_count:\r\n FD_out.write(\"indonesian \" + line + \"\\n\")\r\n else:\r\n FD_out.write(\"tamil \" + line + \"\\n\")\r\n\r\n FD_in.close()\r\n FD_out.close()", "title": "" }, { "docid": "5a76f0d38ddb8ab17ec18597b44bc149", "score": "0.5236154", "text": "def to_lalinference(self, **kwargs):\n return self.write(file_format=\"lalinference\", package=\"gw\", **kwargs)", "title": "" }, { "docid": "35a86959eb9fc6a9c3356b7be538a93d", "score": "0.5229652", "text": "def make_input(\n wel_qin,\n wel_qin_1,\n wel_qout,\n wel_qout_1,\n wel_coordsin,\n wel_coordsin_1,\n wel_coordsout,\n wel_coordsout_1,\n model_path,\n name,\n exe_name='mf6',\n verbosity_level=0):\n # created to load, build and save the simulation\n sim = flopy.mf6.MFSimulation(\n sim_name=name,\n sim_ws=model_path, #working folder\n exe_name=exe_name,\n verbosity_level=verbosity_level, #standard output to be written - error messages should be included (=1)\n )\n # time discretization package\n times = (10.0, 120, 1.0)\n tdis_rc = [(1.0, 1, 1.0), times, times, times]\n flopy.mf6.ModflowTdis(\n sim, pname=\"tdis\",\n time_units=\"DAYS\",\n nper=4, # number of stress periods\n perioddata=tdis_rc, # [perlen(len stress period), nstp(time-steps in the stressperiod), tsmult(multiplier of of the lenght of sucessive time-steps]\n )\n #iterative model solution package\n flopy.mf6.ModflowIms(sim) # default solver - nearly linear models - confined or a single unconfined layer that is tick enough to contain the water table within a single layer\n # groundwater flow model\n gwf = flopy.mf6.ModflowGwf(sim, modelname=name, save_flows=True)\n # spatial discretization package\n flopy.mf6.ModflowGwfdis(gwf, nrow=10, ncol=10) # number ir rows and cols since the model has only one layer\n # Initial conditions package\n flopy.mf6.ModflowGwfic(gwf) #models using steady state in the beginning won't be affected\n # node property flow package pane - default is harmonic mean\n flopy.mf6.ModflowGwfnpf(\n gwf,\n save_flows=True,\n save_specific_discharge=True,\n icelltype=[0], # 0 unconfined, -1 confined\n k=[0.5], # initial value of k\n k33=[0.1], # vertical anisotropy\n )\n # storage package specifying sy - specific yield bigger than 0 means the cell is convertible\n sy = flopy.mf6.ModflowGwfsto.sy.empty(\n gwf,\n default_value=0.2 # convertible cells\n )\n #stirage package specifying ss - specific storage\n ss = flopy.mf6.ModflowGwfsto.ss.empty(\n gwf, default_value=0.000001\n )\n # storage package\n flopy.mf6.ModflowGwfsto(\n gwf,\n pname=\"sto\",\n save_flows=True,\n save_specific_discharge=True,\n iconvert=1, # flag for each cell that specifies whether or not a cell is convertible for the storage calculation. 0 indicates confined storage is used. >0 indicates confined storage is used when head is above cell top and a mixed formulation of unconfined and confined storage is used when head is below cell top\n ss=ss,\n sy=sy,\n transient={0: True}, # keyword to indicate that stress period IPER is transient. Transient conditions will apply until the STEADY-STATE keyword is specified in a subsequent BEGIN PERIOD block\n )\n #dictionary of boundaries Each well is defined through definition of layer (int), row (int), column (int), flux (float). The simplest form is a dictionary with a lists of boundaries for each stress period, where each list of boundaries itself is a list of boundaries. Indices of the dictionary are the numbers of the stress period\n # if the number of stress periods is larger than the dic than the last speficied will apply until the end\n stress_period_data = {\n 1: _make_wel_stress_period(gwf, wel_qin=wel_qin, wel_qin_1=wel_qin_1, wel_coordsin=wel_coordsin, wel_coordsin_1=wel_coordsin_1,\n wel_qout=wel_qout, wel_qout_1=wel_qout_1, wel_coordsout=wel_coordsout, wel_coordsout_1=wel_coordsout_1)[0],\n 2: _make_wel_stress_period(gwf, wel_qin=wel_qin, wel_qin_1=wel_qin_1, wel_coordsin=wel_coordsin, wel_coordsin_1=wel_coordsin_1,\n wel_qout=wel_qout, wel_qout_1=wel_qout_1, wel_coordsout=wel_coordsout, wel_coordsout_1=wel_coordsout_1)[0],\n 3: _make_wel_stress_period(gwf, wel_qin=wel_qin, wel_qin_1=wel_qin_1, wel_coordsin=wel_coordsin, wel_coordsin_1=wel_coordsin_1,\n wel_qout=wel_qout, wel_qout_1=wel_qout_1, wel_coordsout=wel_coordsout, wel_coordsout_1=wel_coordsout_1)[0]\n }\n # defines the well package\n flopy.mf6.ModflowGwfwel(\n gwf,\n stress_period_data=stress_period_data,\n )\n # constant head package\n flopy.mf6.ModflowGwfchd(\n gwf,\n stress_period_data=[\n [(0, 0, 0), 1.], # top left cell [cell_id, head, aux, boundname]\n [(0, 9, 9), 1.]], # bottom right cell\n )\n # variable for the output control\n budget_file = name + '.bud'\n head_file = name + '.hds'\n # output control package\n flopy.mf6.ModflowGwfoc(\n gwf,\n budget_filerecord=budget_file,\n head_filerecord=head_file,\n saverecord=[('HEAD', 'ALL'), ('BUDGET', 'ALL')])\n sim.write_simulation()", "title": "" }, { "docid": "ef942e4bbae34ac5e0e916536c3fd820", "score": "0.5222133", "text": "def main():\n args = parse_arguments()\n\n if args.output_name:\n output_name = args.output_name\n else:\n output_name = args.xodr_file.rpartition(\".\")[0]\n output_name = f\"{output_name}.xml\" # only name of file\n\n if os.path.isfile(output_name) and not args.force_overwrite:\n print(\n \"Not converting because file exists and option 'force-overwrite' not active\",\n file=sys.stderr,\n )\n sys.exit(-1)\n\n with open(\"{}\".format(args.xodr_file), \"r\") as file_in:\n opendrive = parse_opendrive(etree.parse(file_in).getroot())\n\n scenario = convert_opendrive(opendrive)\n\n if not args.osm:\n writer = ExtendedCommonRoadFileWriter(\n scenario, source=\"OpenDRIVE 2 Lanelet Converter\"\n )\n\n with open(f\"{output_name}\", \"w\") as file_out:\n writer.write_scenario_to_file_io(file_out)\n\n else:\n l2osm = OSMConverter(args.osm)\n osm = l2osm(scenario)\n with open(f\"{output_name}\", \"w\") as file_out:\n file_out.write(etree.tostring(osm, encoding=\"unicode\", pretty_print=True))", "title": "" }, { "docid": "f23cd82b5a7d336e4b8f27a862f78256", "score": "0.52209806", "text": "def main(model=Path(\"L:\\\\BigData\\\\Env\\\\SavedModels\"), output_dir=Path(\"L:\\\\BigData\\\\Env\\\\SavedModels\")):\n if model is not None:\n nlp = spacy.load(model) # load existing spaCy model\n print(\"Loaded model '%s'\" % model)\n\n ner = nlp.get_pipe(\"ner\")\n\n ner.add_label(\"Email\")# add new entity label to entity recognizer\n ner.add_label(\"Full Name\")\n ner.add_label(\"First Name\")\n ner.add_label(\"Updatedtime\")\n ner.add_label(\"City\")\n ner.add_label(\"Postcode\")\n ner.add_label(\"Date\")\n ner.add_label(\"Phone\")\n ner.add_label(\"State\")\n ner.add_label(\"Country\")\n ner.add_label(\"Street\")\n ner.add_label(\"Id\")\n ner.add_label(\"Last Name\")\n ner.add_label(\"username\")\n ner.add_label(\"Transaction Id\")\n\n move_names = list(ner.move_names)\n\n # test the trained model\n test_text = \"[email protected] , Glenna Olson , Corbin , 1999-07-05T12:33:47.066Z , Kingshire 77787 1/8/2013 | (695)464-6165 x9467 , Iowa Indonesia 038 Schumm Walks 0 Ortiz Frida 362 \"\n doc = nlp(test_text)\n print(\"Entities in '%s'\" % test_text)\n for ent in doc.ents:\n print(ent.label_, ent.text)\n\n # save model to output directory\n if output_dir is not None:\n output_dir = Path(output_dir)\n if not output_dir.exists():\n output_dir.mkdir() # rename model\n nlp.to_disk(output_dir)\n print(\"Saved model to\", output_dir)\n\n # test the saved model\n print(\"Loading from\", output_dir)\n nlp2 = spacy.load(output_dir)\n # Check the classes have loaded back consistently\n assert nlp2.get_pipe(\"ner\").move_names == move_names\n doc2 = nlp2(test_text)\n for ent in doc2.ents:\n print(ent.label_, ent.text)", "title": "" }, { "docid": "ba506d586a69c49c3403983dbd1f5025", "score": "0.52202463", "text": "def GenFile(DSLModel, Table, View):", "title": "" }, { "docid": "8666e838db88654723d809adce0d3d19", "score": "0.52056324", "text": "def generateModel(self,startPoint,start,end,tol,vel):\r\n\t\t\r\n\t\t(code,levels,cantPoints) = Loader().load(self.path,startPoint)\r\n\t\thaveParser = False\r\n\r\n\t\tadded = 0\r\n\t\tself.start = start\r\n\t\tself.end = end\r\n\t\t\r\n\t\tif( tol > 0):\t\t\r\n\t\t\tself.parser = PointsAdder(code,levels,cantPoints,tol)\r\n\t\t\t((code,levels,cantPoints),(self.start,self.end)) = self.parser.parse(self.start,self.end)\r\n\t\t\thaveParser = True\r\n\t\t\t\r\n\t\tif( vel > 0):\t\t\r\n\t\t\tself.parser = VelocityChanger(code,levels,cantPoints,vel)\r\n\t\t\t((code,levels,cantPoints),(self.start,self.end)) = self.parser.parse(self.start,self.end)\r\n\t\t\thaveParser = True\r\n\t\t\t\r\n\t\tif (not haveParser):\r\n\t\t\tself.parser = Parser(code,levels,cantPoints)\r\n\t\t\t(code,levels,cantPoints) = self.parser.parse()\t\r\n\t\r\n\t\tself.modifiedGCode = code\r\n\t\tself.levels = levels\r\n\t\tself.cantPoints = cantPoints", "title": "" }, { "docid": "e9a39b116920aebf857a1c3b136837b0", "score": "0.5203123", "text": "def compile():\n\n m = dir(modules)\n\n words = []\n for module_name in m:\n try:\n eval(\"words.extend(modules.%s.WORDS)\" % module_name)\n except:\n pass # module probably doesn't have the property\n\n words = list(set(words))\n\n # for spotify module\n words.extend([\"MUSIC\",\"SPOTIFY\"])\n\n # create the dictionary\n pronounced = g2p.translateWords(words)\n zipped = zip(words, pronounced)\n lines = [\"%s %s\" % (x, y) for x, y in zipped]\n\n with open(\"../client/dictionary.dic\", \"w\") as f:\n f.write(\"\\n\".join(lines) + \"\\n\")\n\n # create the language model\n with open(\"../client/sentences.txt\", \"w\") as f:\n f.write(\"\\n\".join(words) + \"\\n\")\n f.write(\"<s> \\n </s> \\n\")\n f.close()\n\n # make language model\n os.system(\n \"text2idngram -vocab ../client/sentences.txt < ../client/sentences.txt -idngram temp.idngram\")\n os.system(\n \"idngram2lm -idngram temp.idngram -vocab ../client/sentences.txt -arpa ../client/languagemodel.lm\")", "title": "" }, { "docid": "49e798a22449fe4ec327a89f2378b7cd", "score": "0.51998425", "text": "def main() -> None:\n\n args = get_args()\n package = Package()\n ontology = get_ontology(args.ontology_file)\n\n for i, fh in enumerate(args.data_file, start=1):\n print(f'{i:3}: {os.path.basename(fh.name)}')\n fh.close()\n package.infer(fh.name)\n\n for res in package.resources:\n if args.missing_values:\n res.descriptor['schema']['missingValues'].extend(\n args.missing_values)\n\n for field in res.descriptor['schema']['fields']:\n name = field.get('name')\n if not name:\n continue\n\n ont = ontology.get(name)\n if not ont:\n continue\n\n data_type = ont.get('frictionless type', field.get('type'))\n if data_type:\n field['type'] = data_type\n\n data_format = ont.get('frictionless format', field.get('format'))\n if data_format:\n field['format'] = data_format\n\n rdf_type = ont.get('rdf type purl', field.get('rdfType'))\n if rdf_type:\n field['rdfType'] = rdf_type\n\n units = ont.get('units purl', field.get('pm:unitRdfType'))\n if units:\n field['pm:unitRdfType'] = units\n\n source_url = ont.get('pm:source url', field.get('pm:sourceUrl'))\n if source_url:\n field['pm:source url'] = source_url\n\n measure_source = ont.get('measurement source purl',\n field.get('pm:measurementSourceRdfType'))\n if measure_source:\n field['pm:measurementSourceRdfType'] = measure_source\n\n measure_url = ont.get('pm:measurement source protocol',\n field.get('pm:measurement source protocol'))\n if measure_url:\n field['pm:measurementSourceProtocolUrl'] = measure_url\n\n # TODO: Not sure about this\n # is_searchable = ont.get('pm:searchable', field.get('pm:searchable'))\n # if is_searchable:\n # field['pm:searchable'] = is_searchable\n\n package.remove_resource(res.name)\n package.add_resource(res.descriptor)\n\n package.save(args.out_file)\n\n print(f'Done, see \"{args.out_file}\"')", "title": "" }, { "docid": "a3fe6b4a1d97a21eaef6d6f4d02a796d", "score": "0.51963997", "text": "def actualSolve(self, lp):\n\n if not self.executable(self.path):\n raise PulpSolverError(\"PuLP: cannot execute \" + self.path)\n tmpLp, tmpSol, tmpMst = self.create_tmp_files(lp.name, \"lp\", \"sol\", \"mst\")\n vs = lp.writeLP(tmpLp, writeSOS=1)\n try:\n os.remove(tmpSol)\n except:\n pass\n cmd = self.path\n options = self.options + self.getOptions()\n if self.timeLimit is not None:\n options.append((\"TimeLimit\", self.timeLimit))\n cmd += \" \" + \" \".join([f\"{key}={value}\" for key, value in options])\n cmd += f\" ResultFile={tmpSol}\"\n if self.optionsDict.get(\"warmStart\", False):\n self.writesol(filename=tmpMst, vs=vs)\n cmd += f\" InputFile={tmpMst}\"\n\n if lp.isMIP():\n if not self.mip:\n warnings.warn(\"GUROBI_CMD does not allow a problem to be relaxed\")\n cmd += f\" {tmpLp}\"\n if self.msg:\n pipe = None\n else:\n pipe = open(os.devnull, \"w\")\n\n return_code = subprocess.call(cmd.split(), stdout=pipe, stderr=pipe)\n\n # Close the pipe now if we used it.\n if pipe is not None:\n pipe.close()\n\n if return_code != 0:\n raise PulpSolverError(\"PuLP: Error while trying to execute \" + self.path)\n if not os.path.exists(tmpSol):\n # TODO: the status should be infeasible here, I think\n status = constants.LpStatusNotSolved\n values = reducedCosts = shadowPrices = slacks = None\n else:\n # TODO: the status should be infeasible here, I think\n status, values, reducedCosts, shadowPrices, slacks = self.readsol(tmpSol)\n self.delete_tmp_files(tmpLp, tmpMst, tmpSol, \"gurobi.log\")\n if status != constants.LpStatusInfeasible:\n lp.assignVarsVals(values)\n lp.assignVarsDj(reducedCosts)\n lp.assignConsPi(shadowPrices)\n lp.assignConsSlack(slacks)\n lp.assignStatus(status)\n return status", "title": "" }, { "docid": "148556ac6767aa89118518bfbccc312b", "score": "0.5196136", "text": "def create_pyomo_model(self, debug_mode=False, debug_cost=2e7):\n\n # work on a local copy of the dataframe\n if not debug_mode and self.df.index.str.contains('DBUG').any():\n # previously ran in debug mode, but now done\n df = self.remove_debug_links()\n df.to_csv(self.linksfile + '-final.csv')\n else:\n df = self.df\n\n self.log.info('Creating Pyomo Model (debug=%s)' % debug_mode)\n\n model = ConcreteModel()\n\n model.N = Set(initialize=self.nodes)\n model.k = Set(initialize=range(15))\n model.A = Set(within=model.N*model.N*model.k, \n initialize=self.links, ordered=True)\n model.source = Param(initialize='SOURCE')\n model.sink = Param(initialize='SINK')\n\n def init_params(p):\n if p == 'cost' and debug_mode:\n return (lambda model,i,j,k: debug_cost \n if ('DBUG' in str(i)+'_'+str(j))\n else 1.0)\n else:\n return lambda model,i,j,k: df.loc[str(i)+'_'+str(j)+'_'+str(k)][p]\n\n model.u = Param(model.A, initialize=init_params('upper_bound'), mutable=True)\n model.l = Param(model.A, initialize=init_params('lower_bound'), mutable=True)\n model.a = Param(model.A, initialize=init_params('amplitude'))\n model.c = Param(model.A, initialize=init_params('cost'))\n\n # The flow over each arc\n model.X = Var(model.A, within=Reals)\n\n # Minimize total cost\n def obj_fxn(model):\n return sum(model.c[i,j,k]*model.X[i,j,k] for (i,j,k) in model.A)\n model.total = Objective(rule=obj_fxn, sense=minimize)\n\n # Enforce an upper bound limit on the flow across each arc\n def limit_rule_upper(model, i, j, k):\n return model.X[i,j,k] <= model.u[i,j,k]\n model.limit_upper = Constraint(model.A, rule=limit_rule_upper)\n\n # Enforce a lower bound limit on the flow across each arc\n def limit_rule_lower(model, i, j, k):\n return model.X[i,j,k] >= model.l[i,j,k]\n model.limit_lower = Constraint(model.A, rule=limit_rule_lower)\n\n # To speed up creating the mass balance constraints, first\n # create dictionaries of arcs_in and arcs_out of every node\n # These are NOT Pyomo data, and Pyomo does not use \"model._\" at all\n arcs_in = {}\n arcs_out = {}\n\n def arc_list_hack(model, i,j,k):\n if j not in arcs_in:\n arcs_in[j] = []\n arcs_in[j].append((i,j,k))\n\n if i not in arcs_out:\n arcs_out[i] = []\n arcs_out[i].append((i,j,k))\n return [0]\n\n model._ = Set(model.A, initialize=arc_list_hack)\n\n # Enforce flow through each node (mass balance)\n def flow_rule(model, node):\n if node in [value(model.source), value(model.sink)]:\n return Constraint.Skip\n outflow = sum(model.X[i,j,k]/model.a[i,j,k] for i,j,k in arcs_out[node])\n inflow = sum(model.X[i,j,k] for i,j,k in arcs_in[node])\n return inflow == outflow\n model.flow = Constraint(model.N, rule=flow_rule)\n\n model.dual = Suffix(direction=Suffix.IMPORT)\n\n self.model = model", "title": "" }, { "docid": "f1d525b72f37243510024f58395907b3", "score": "0.5173822", "text": "def makeLibSource(self):\n\n # Make vector field (and event) file for compilation\n # This sets the field self._eventNames\n self._prepareEventSpecs()\n\n # Write the model.m file\n allfilestr = self._prepareModelContents()\n modelfile = os.path.join(self._target_dir, self._model_file)\n try:\n file = open(modelfile, 'w')\n file.write(allfilestr)\n file.close()\n except IOError as e:\n print(\"Error opening file \"+self._model_file+\" for writing\")\n raise IOError(e)\n\n # Write the events.m file\n if len(self._eventNames) > 0:\n allfilestr = self._prepareEventsFileContents() + self._prepareAuxContents()\n eventsfile = os.path.join(self._target_dir, self._events_file)\n try:\n file = open(eventsfile, 'w')\n file.write(allfilestr)\n file.close()\n except IOError as e:\n print(\"Error opening file \"+self._events_file+\" for writing\")\n raise IOError(e)\n\n\n # Write the initialconditions.m file\n allfilestr = self._prepareICContents()\n icfile = os.path.join(self._target_dir, self._ic_file)\n try:\n file = open(icfile, 'w')\n file.write(allfilestr)\n file.close()\n except IOError as e:\n print(\"Error opening file \"+self._ic_file+\" for writing\")\n raise IOError(e)\n\n # Write the pars.m file\n allfilestr = self._prepareParamContents()\n paramfile = os.path.join(self._target_dir, self._param_file)\n try:\n file = open(paramfile, 'w')\n file.write(allfilestr)\n file.close()\n except IOError as e:\n print(\"Error opening file \"+self._param_file+\" for writing\")\n raise IOError(e)\n\n # Write the get.m file\n allfilestr = self._prepareGetFileContents()\n getfile = os.path.join(self._target_dir, self._get_file)\n try:\n file = open(getfile, 'w')\n file.write(allfilestr)\n file.close()\n except IOError as e:\n print(\"Error opening file \"+self._get_file+\" for writing\")\n raise IOError(e)\n\n # Write the set.m file\n allfilestr = self._prepareSetFileContents()\n setfile = os.path.join(self._target_dir, self._set_file)\n try:\n file = open(setfile, 'w')\n file.write(allfilestr)\n file.close()\n except IOError as e:\n print(\"Error opening file \"+self._set_file+\" for writing\")\n raise IOError(e)\n\n # Write the vfield.m file\n# vfdefines = self._prepareVfieldDefines()\n# allfilestr = self._prepareVfieldContents(vfdefines)\n allfilestr = self.funcspec.spec[0] + self._prepareAuxContents()\n vffile = os.path.join(self._target_dir, self._vfield_file)\n try:\n file = open(vffile, 'w')\n file.write(allfilestr)\n file.close()\n except IOError as e:\n print(\"Error opening file \"+self._vfield_file+\" for writing\")\n raise IOError(e)", "title": "" }, { "docid": "870c6fd27ca456de9582a6aa9f045d53", "score": "0.51735485", "text": "def buildSolverModel(self, lp):\n log.debug(\"create the gurobi model\")\n lp.solverModel = gurobipy.Model(lp.name)\n log.debug(\"set the sense of the problem\")\n if lp.sense == constants.LpMaximize:\n lp.solverModel.setAttr(\"ModelSense\", -1)\n if self.timeLimit:\n lp.solverModel.setParam(\"TimeLimit\", self.timeLimit)\n gapRel = self.optionsDict.get(\"gapRel\")\n logPath = self.optionsDict.get(\"logPath\")\n if gapRel:\n lp.solverModel.setParam(\"MIPGap\", gapRel)\n if logPath:\n lp.solverModel.setParam(\"LogFile\", logPath)\n\n log.debug(\"add the variables to the problem\")\n for var in lp.variables():\n lowBound = var.lowBound\n if lowBound is None:\n lowBound = -gurobipy.GRB.INFINITY\n upBound = var.upBound\n if upBound is None:\n upBound = gurobipy.GRB.INFINITY\n obj = lp.objective.get(var, 0.0)\n varType = gurobipy.GRB.CONTINUOUS\n if var.cat == constants.LpInteger and self.mip:\n varType = gurobipy.GRB.INTEGER\n var.solverVar = lp.solverModel.addVar(\n lowBound, upBound, vtype=varType, obj=obj, name=var.name\n )\n if self.optionsDict.get(\"warmStart\", False):\n # Once lp.variables() has been used at least once in the building of the model.\n # we can use the lp._variables with the cache.\n for var in lp._variables:\n if var.varValue is not None:\n var.solverVar.start = var.varValue\n\n lp.solverModel.update()\n log.debug(\"add the Constraints to the problem\")\n for name, constraint in lp.constraints.items():\n # build the expression\n expr = gurobipy.LinExpr(\n list(constraint.values()), [v.solverVar for v in constraint.keys()]\n )\n if constraint.sense == constants.LpConstraintLE:\n relation = gurobipy.GRB.LESS_EQUAL\n elif constraint.sense == constants.LpConstraintGE:\n relation = gurobipy.GRB.GREATER_EQUAL\n elif constraint.sense == constants.LpConstraintEQ:\n relation = gurobipy.GRB.EQUAL\n else:\n raise PulpSolverError(\"Detected an invalid constraint type\")\n constraint.solverConstraint = lp.solverModel.addConstr(\n expr, relation, -constraint.constant, name\n )\n lp.solverModel.update()", "title": "" }, { "docid": "f807685d80245425900a66f5f81872f5", "score": "0.5164706", "text": "def import_model(self, file):\n felems = os.path.splitext(file)\n ext = felems[1].lower()\n if ext in (\".gz\", \".zip\",):\n ext = os.path.splitext(felems[0])[1].lower()\n\n if ext == \".cpo\":\n import docplex.cp.cpo.cpo_parser as cpo_parser\n prs = cpo_parser.CpoParser(self)\n prs.parse(file)\n\n elif ext == \".fzn\":\n import docplex.cp.fzn.fzn_parser as fzn_parser\n prs = fzn_parser.FznParser(self)\n prs.parse(file)\n # Get model to force compilation\n prs.get_model()\n\n elif ext == \".lp\":\n import docplex.cp.lp.lp_parser as lp_parser\n prs = lp_parser.LpParser(self)\n prs.parse(file)\n\n else:\n raise CpoException(\"Unknown '{}' file format. Only .cpo, .fzn and .lp are supported.\".format(ext))", "title": "" }, { "docid": "a3c04ce9f517817a518f872f20d83633", "score": "0.51574755", "text": "def to_automl(data, path='.', name='autodata'):\n # check if folder exists\n dir = os.path.join(path, name+'_automl')\n if not os.path.exists(dir):\n # create folder\n os.mkdir(dir)\n data.descriptors().to_csv(os.path.join(dir, name+'.info'), header=True) # some information\n if data.has_class():\n pd.DataFrame(data.indexes['X']).to_csv(os.path.join(dir, name+'_feat.name'), index=False, header=False) # feat name\n pd.DataFrame(data.indexes['y']).to_csv(os.path.join(dir, name+'_label.name'), index=False, header=False) # label name\n if 'train' in data.indexes: # train/test and X/y splits\n data.get_data('X_train').to_csv(os.path.join(dir, name+'_train.data'), sep=' ', index=False, header=False) # train data\n data.get_data('X_test').to_csv(os.path.join(dir, name+'_test.data'), sep=' ', index=False, header=False) # test data\n data.get_data('y_train').to_csv(os.path.join(dir, name+'_train.solution'), sep=' ', index=False, header=False) # train solution\n data.get_data('y_test').to_csv(os.path.join(dir, name+'_test.solution'), sep=' ', index=False, header=False) # test solution\n else: # only X/y split\n data.get_data('X').to_csv(os.path.join(dir, name+'.data'), sep=' ', index=False, header=False) # data\n data.get_data('y').to_csv(os.path.join(dir, name+'.solution'), sep=' ', index=False, header=False) # solution\n else:\n pd.DataFrame(data.columns).to_csv(os.path.join(dir, name+'_feat.name'), index=False, header=False) # feat name\n if 'train' in data.indexes: # only train/test split\n data.get_data('train').to_csv(os.path.join(dir, name+'_train.data'), sep=' ', index=False, header=False) # train data\n data.get_data('test').to_csv(os.path.join(dir, name+'_test.data'), sep=' ', index=False, header=False) # test data\n else: # no split at all\n data.to_csv(os.path.join(dir, name+'.data'), sep=' ', index=False, header=False) # data", "title": "" }, { "docid": "311b06e5d22497504730712619f4da06", "score": "0.5155293", "text": "def _make_input_file(self, teff_lims, teff_step, logg_lims, logg_step,\n feh_lims, feh_step, vsini_lims, vsini_step, \n vmicro_lims, vmicro_step, resolution):\n output_string = '{:.1f} {:.0f} {:.1f}\\n'.format(teff_lims[0], \n teff_step, \n teff_lims[-1])\n\n output_string += '{:.1f} {:.1f} {:.1f}\\n'.format(logg_lims[0],\n logg_step,\n logg_lims[1])\n\n output_string += '{:.1f} {:.1f} {:.1f}\\n'.format(vmicro_lims[0],\n vmicro_step,\n vmicro_lims[1])\n\n output_string += '{:.1f} {:.1f} {:.1f}\\n'.format(vsini_lims[0],\n vsini_step,\n vsini_lims[1])\n\n output_string += \"skip 0.03 0.02 0.07 !dilution factor\\n\"\n\n output_string += 'skip {:.1f} {:.1f} {:.1f}\\n'.format(feh_lims[0],\n feh_step,\n feh_lims[1])\n \n output_string += 'He 0.04 0.005 0.06 ! Individual abundance\\n'\n\n output_string += '0.0 {:.0f}\\n'.format(resolution)\n\n output_string += '{}\\n{}\\n'.format(self.abundance_table, self.model_dir)\n\n output_string += '2 1 !atmosphere model vmicro and mass\\n'\n output_string += 'ST ! model atmosphere chemical composition flag\\n'\n\n dx = self.data.x[1] - self.data.x[0]\n output_string += '1 {:.5f} fit\\n'.format(dx)\n\n output_string += 'data_sets/{}.txt\\n'.format(self.output_basename)\n\n output_string += '0.5 0.99 0.0 adjust ! RV determination stuff\\n'\n\n xmin, xmax = self.data.x[0]-1, self.data.x[-1]+1\n output_string += '{:.1f} {:.1f}\\n'.format(xmin, xmax)\n\n outfilename = '{}.inp'.format(self.output_basename)\n with open(outfilename, 'w') as outfile:\n outfile.write(output_string)\n\n return outfilename", "title": "" }, { "docid": "8ae6ddb4fe2249629c719de91f2cabee", "score": "0.51552427", "text": "def write(self, filename='pflotran.in'):\n\n if filename:\n self.filename = filename\n outfile = open(self.filename, 'w')\n\n # Presumes simulation.simulation_type is required\n if self.simulation.simulation_type:\n self._write_simulation(outfile)\n else:\n raise PyFLOTRAN_ERROR(\n 'simulation is required, it is currently reading as empty')\n\n if self.simulation.subsurface_flow or \\\n self.simulation.subsurface_transport:\n self._write_subsurface_simulation_begin(outfile)\n\n if self.reference_pressure:\n self._write_reference_pressure(outfile)\n\n if self.reference_temperature:\n self._write_reference_temperature(outfile)\n\n if self.co2_database:\n self._write_co2_database(outfile)\n\n if self.regression.cells \\\n or self.regression.cells_per_process \\\n or self.regression.all_cells:\n self._write_regression(outfile)\n\n if self.uniform_velocity.value_list:\n self._write_uniform_velocity(outfile)\n\n if self.specified_velocity:\n self._write_specified_velocity(outfile)\n\n if self.update_flow_permeability:\n self._write_update_flow_permeability(outfile)\n\n if self.reference_liquid_density:\n self._write_reference_liquid_density(outfile)\n\n if self.minimum_hydrostatic_pressure:\n self._write_minimum_hydrostatic_pressure(outfile)\n\n if self.reference_stress_state.value_list:\n self._write_reference_stress_state(outfile)\n\n if self.eoslist:\n self._write_eos(outfile)\n\n if self.klinkenberg_effect:\n self._write_klinkenberg_effect(outfile)\n\n if self.creep_closure_table:\n self._write_creep_closure_table(outfile)\n\n if self.nonuniform_velocity.filename:\n self._write_nonuniform_velocity(outfile)\n\n if self.simulation.mode.upper() == 'MPHASE' and self.co2_database:\n self._write_co2_database(outfile)\n\n if self.multiple_continuum:\n self._write_multiple_continuum(outfile)\n\n if self.overwrite_restart_flow_params:\n self._write_overwrite_restart_flow(outfile)\n\n if self.overwrite_restart_transport:\n self._write_overwrite_restart_transport(outfile)\n\n if self.initialize_flow_from_file is not None:\n self._write_initialize_flow_from_file(outfile)\n\n if self.initialize_transport_from_file is not None:\n self._write_initialize_transport_from_file(outfile)\n\n if self.isothermal:\n self._write_isothermal(outfile)\n\n if self.reference_porosity:\n self._write_reference_porosity(outfile)\n\n if self.datasetlist:\n self._write_dataset(outfile)\n # else: print 'info: dataset name not detected\\n'\n\n if self.chemistry:\n self._write_chemistry(outfile)\n # else: print 'info: chemistry not detected\\n'\n\n if self.grid:\n self._write_grid(outfile)\n else:\n raise PyFLOTRAN_ERROR(\n 'grid is required, it is currently reading as empty!')\n\n if self.timestepper_flow or self.timestepper_transport:\n self._write_timestepper(outfile)\n # else: print 'info: timestepper not detected\\n'\n\n if self.time:\n self._write_time(outfile)\n else:\n raise PyFLOTRAN_ERROR(\n 'time is required, it is currently reading as empty!')\n\n if self.proplist:\n self._write_prop(outfile)\n else:\n PyFLOTRAN_WARNING(\n 'material property list is empty! ' +\n ' Using default material property settings')\n self.add(pmaterial())\n self._write_prop(outfile)\n\n if self.lsolverlist:\n self._write_lsolver(outfile)\n # else: print 'info: lsolverlist (linear solver list) not detected\\n'\n\n if self.nsolverlist:\n self._write_nsolver(outfile)\n # else: print 'info: nsolverlist (newton solver list) not detected\\n'\n\n if self.output:\n self._write_output(outfile)\n else:\n raise PyFLOTRAN_ERROR(\n 'output is required, it is currently reading as empty!')\n\n if self.fluidlist:\n self._write_fluid(outfile)\n\n if self.saturationlist:\n self._write_saturation(outfile)\n elif self.charlist:\n self._write_characteristic_curves(outfile)\n else:\n if self.simulation.subsurface_flow:\n self.add(pcharacteristic_curves())\n self._write_characteristic_curves(outfile)\n PyFLOTRAN_WARNING(\n 'characteristic_curves list or saturation list ' +\n 'is required, it is currently reading as empty! ' +\n ' Using default characteristic_curves settings')\n\n if self.regionlist:\n self._write_region(outfile)\n else:\n raise PyFLOTRAN_ERROR(\n 'regionlist is required, it is currently reading as empty!')\n\n if self.integral_flux_list:\n self._write_integral_flux(outfile)\n\n if self.observation_list:\n self._write_observation(outfile)\n\n if self.flowlist:\n self._write_flow(outfile)\n\n if self.transportlist:\n self._write_transport(outfile)\n\n if self.initial_condition_list:\n self._write_initial_condition(outfile)\n else:\n raise PyFLOTRAN_ERROR(\n 'initial_condition_list is required,' +\n 'it is currently reading as empty!')\n\n if self.boundary_condition_list:\n self._write_boundary_condition(outfile)\n\n if self.source_sink_list:\n self._write_source_sink(outfile)\n\n if self.source_sink_sandbox_list:\n self._write_source_sink_sandbox(outfile)\n\n if self.reference_saturation:\n self._write_reference_saturation(outfile)\n\n if self.strata_list:\n self._write_strata(outfile)\n else:\n PyFLOTRAN_WARNING(\n 'stratigraphy_coupler is required, ' +\n 'it is currently reading as empty! ' +\n 'Using default settings')\n self.add(pstrata())\n self._write_strata(outfile)\n\n if self.constraint_list:\n self._write_constraint(outfile)\n\n if self.rt_mass_transfer_list:\n self._write_rt_mass_transfer(outfile)\n\n if self.flow_mass_transfer_list:\n self._write_flow_mass_transfer(outfile)\n\n if self.simulation.subsurface_flow or \\\n self.simulation.subsurface_transport:\n self._write_subsurface_simulation_end(outfile)\n\n\n # -----------------------------------------------\n\n if self.simulation.surface_subsurface:\n self._write_surface_subsurface_begin(outfile)\n\n if self.surface_flow:\n self._write_surface_flow(outfile)\n\n self._write_surface_subsurface_end(outfile)\n\n # -----------------------------------------------\n\n if self.simulation.simulation_type == 'hydroquake':\n self._write_hydroquake(outfile)\n\n if self.simulation.simulation_type.lower() == 'geomechanics_subsurface':\n self._write_geomechanics(outfile)\n\n if self.waste_form_general:\n self._write_wasteform_general(outfile)\n\n if self.ufd_decay:\n self._write_ufd_decay(outfile)\n\n if self.ufd_biosphere:\n self._write_ufd_biosphere(outfile)\n\n if self.wipp_source_sink:\n self._write_wipp_source_sink(outfile)\n\n outfile.close()", "title": "" }, { "docid": "6217bff10682c1129e9efb6eb05446d3", "score": "0.5149605", "text": "def write(solution):\n trimmed_solution = solution\n input_file_name_stripped = trimmed_solution.name\n cwd = os.getcwd()\n output_file_name = os.path.join(cwd,\n 'pym_' +\n input_file_name_stripped +\n '.inp')\n f = open(output_file_name, 'w+')\n\n #Work functions\n\n calories_constant = 4184.0 #number of calories in 1000 Joules of energy\n def eliminate(input_string, char_to_replace, spaces='single'):\n \"\"\"\n Eliminate characters from a string\n\n :param input_string\n string to be modified\n :param char_to_replace\n array of character strings to be removed\n \"\"\"\n for char in char_to_replace:\n input_string = input_string.replace(char, \"\")\n if spaces == 'double':\n input_string = input_string.replace(\" \", \" \")\n return input_string\n\n def section_break(title):\n \"\"\"\n Insert break and new section title into cti file\n\n :param title:\n title string for next section_break\n \"\"\"\n f.write('!'+ \"-\"*75 + '\\n')\n f.write('! ' + title +'\\n')\n f.write('!'+ \"-\"*75 + '\\n')\n\n def replace_multiple(input_string, replace_list):\n \"\"\"\n Replace multiple characters in a string\n\n :param input_string\n string to be modified\n :param replace list\n list containing items to be replaced (value replaces key)\n \"\"\"\n for original_character, new_character in replace_list.items():\n input_string = input_string.replace(original_character,\n new_character)\n return input_string\n\n def build_arrhenius(equation_object, equation_type):\n \"\"\"\n Builds Arrhenius coefficient string\n\n :param equation_objects\n cantera equation object\n :param equation_type:\n string of equation type\n \"\"\"\n coeff_sum = sum(equation_object.reactants.values())\n pre_exponential_factor = equation_object.rate.pre_exponential_factor\n temperature_exponent = '{:.3f}'.format(equation_object.rate.temperature_exponent)\n activation_energy = '{:.2f}'.format(equation_object.rate.activation_energy / calories_constant)\n if equation_type == 'ElementaryReaction':\n if coeff_sum == 1:\n pre_exponential_factor = str(\n '{:.3E}'.format(pre_exponential_factor))\n if coeff_sum == 2:\n pre_exponential_factor = str(\n '{:.3E}'.format(pre_exponential_factor*10**3))\n if coeff_sum == 3:\n pre_exponential_factor = str(\n '{:.3E}'.format(pre_exponential_factor*10**6))\n if equation_type == 'ThreeBodyReaction':\n if coeff_sum == 1:\n pre_exponential_factor = str(\n '{:.3E}'.format(pre_exponential_factor*10**3))\n if coeff_sum == 2:\n pre_exponential_factor = str(\n '{:.3E}'.format(pre_exponential_factor*10**6))\n if (equation_type != 'ElementaryReaction'\n and equation_type != 'ThreeBodyReaction'):\n pre_exponential_factor = str(\n '{:.3E}'.format(pre_exponential_factor))\n arrhenius = [pre_exponential_factor,\n temperature_exponent,\n activation_energy]\n return arrhenius\n\n def build_modified_arrhenius(equation_object, t_range):\n \"\"\"\n Builds Arrhenius coefficient strings for high and low temperature ranges\n\n :param equation_objects\n cantera equation object\n :param t_range:\n simple string ('high' or 'low') to designate temperature range\n \"\"\"\n if t_range == 'high':\n pre_exponential_factor = equation_object.high_rate.pre_exponential_factor\n temperature_exponent = '{:.3f}'.format(equation_object.high_rate.temperature_exponent)\n activation_energy = '{:.2f}'.format(equation_object.high_rate.activation_energy/calories_constant)\n if len(equation_object.products) == 1:\n pre_exponential_factor = str(\n '{:.5E}'.format(pre_exponential_factor*10**3))\n else:\n pre_exponential_factor = str(\n '{:.5E}'.format(pre_exponential_factor))\n arrhenius_high = [pre_exponential_factor,\n temperature_exponent,\n activation_energy]\n return arrhenius_high\n if t_range == 'low':\n\n pre_exponential_factor = equation_object.low_rate.pre_exponential_factor\n temperature_exponent = '{:.3f}'.format(equation_object.low_rate.temperature_exponent)\n activation_energy = '{:.2f}'.format(equation_object.low_rate.activation_energy/calories_constant)\n if len(equation_object.products) == 1:\n pre_exponential_factor = str(\n '{:.5E}'.format(pre_exponential_factor*10**6))\n else:\n pre_exponential_factor = str(\n '{:.5E}'.format(pre_exponential_factor*10**3))\n\n arrhenius_low = [pre_exponential_factor,\n temperature_exponent,\n activation_energy]\n return arrhenius_low\n\n\n def build_nasa(nasa_coeffs, row):\n \"\"\"\n Creates string of nasa polynomial coefficients\n\n :param nasa_coeffs\n cantera species thermo coefficients object\n :param row\n which row to write coefficients in\n \"\"\"\n line_coeffs = ''\n lines = [[1, 2, 3, 4, 5], [6, 7, 8, 9, 10], [11, 12, 13, 14]]\n line_index = lines[row-2]\n for ix, c in enumerate(nasa_coeffs):\n if ix in line_index:\n if c >= 0:\n line_coeffs += ' '\n line_coeffs += str('{:.8e}'.format(c))\n return line_coeffs\n\n def build_species_string():\n \"\"\"\n formats species string for writing\n \"\"\"\n species_list_string = ''\n line = 1\n for sp_index, sp_string in enumerate(trimmed_solution.species_names):\n sp = ' '\n #get length of string next species is added\n length_new = len(sp_string)\n length_string = len(species_list_string)\n total = length_new + length_string + 3\n #if string will go over width, wrap to new line\n if total >= 70*line:\n species_list_string += '\\n'\n line += 1\n species_list_string += sp_string + ((16-len(sp_string))*sp)\n return species_list_string\n\n #Write title block to file\n section_break('Chemkin File converted from Solution Object by pyMARS')\n\n #Write phase definition to file\n element_names = eliminate(str(trimmed_solution.element_names),\n ['[', ']', '\\'', ','])\n element_string = Template(\n 'ELEMENTS\\n' +\n '$element_names\\n' +\n 'END\\n')\n f.write(element_string.substitute(element_names=element_names))\n species_names = build_species_string()\n species_string = Template(\n 'SPECIES\\n' +\n '$species_names\\n'+\n 'END\\n')\n f.write(species_string.substitute(species_names=species_names))\n\n #Write species to file\n section_break('Species data')\n f.write('THERMO ALL' + '\\n' +\n ' 300.000 1000.000 5000.000' +'\\n')\n phase_unknown_list = []\n\n #write data for each species in the Solution object\n for sp_index in xrange(len(trimmed_solution.species_names)):\n d = 3.33564e-30 #1 debye = d coulomb-meters\n species = trimmed_solution.species(sp_index)\n name = str(trimmed_solution.species(sp_index).name)\n nasa_coeffs = trimmed_solution.species(sp_index).thermo.coeffs\n #Species attributes from trimmed solution object\n t_low = '{0:.3f}'.format(species.thermo.min_temp)\n t_max = '{0:.3f}'.format(species.thermo.max_temp)\n t_mid = '{0:.3f}'.format(species.thermo.coeffs[0])\n temp_range = str(t_low) + ' ' + str(t_max) + ' ' + t_mid\n species_comp = ''\n for atom in species.composition:\n species_comp += '{:<4}'.format(atom)\n species_comp += str(int(species.composition[atom]))\n if type(species.transport).__name__ == 'GasTransportData':\n species_phase = 'G'\n else:\n phase_unknown_list.append(name)\n species_phase = 'G'\n line_1 = (\n '{:<18}'.format(name) +\n '{:<6}'.format(' ') +\n '{:<20}'.format(species_comp) +\n '{:<4}'.format(species_phase) +\n '{:<31}'.format(temp_range) +\n '{:<1}'.format('1') +\n '\\n')\n f.write(line_1)\n line_2_coeffs = build_nasa(nasa_coeffs, 2)\n line_2 = line_2_coeffs + ' 2\\n'\n f.write(line_2)\n line_3_coeffs = build_nasa(nasa_coeffs, 3)\n line_3 = line_3_coeffs + ' 3\\n'\n f.write(line_3)\n line_4_coeffs = build_nasa(nasa_coeffs, 4)\n line_4 = line_4_coeffs + ' 4\\n'\n f.write(line_4)\n\n f.write('END\\n')\n\n #Write reactions to file\n section_break('Reaction Data')\n f.write('REACTIONS\\n')\n #write data for each reaction in the Solution Object\n for reac_index in xrange(len(trimmed_solution.reaction_equations())):\n equation_string = str(trimmed_solution.reaction_equation(reac_index))\n equation_string = eliminate(equation_string, ' ', 'single')\n equation_object = trimmed_solution.reaction(reac_index)\n equation_type = type(equation_object).__name__\n m = str(reac_index+1)\n if equation_type == 'ThreeBodyReaction':\n arrhenius = build_arrhenius(equation_object, equation_type)\n main_line = (\n '{:<51}'.format(equation_string) +\n '{:>9}'.format(arrhenius[0]) +\n '{:>9}'.format(arrhenius[1]) +\n '{:>11}'.format(arrhenius[2]) +\n '\\n')\n f.write(main_line)\n #trimms efficiencies list\n efficiencies = equation_object.efficiencies\n trimmed_efficiencies = equation_object.efficiencies\n for s in efficiencies:\n if s not in trimmed_solution.species_names:\n del trimmed_efficiencies[s]\n replace_list_2 = {\n '{':'',\n '}':'/',\n '\\'':'',\n ':':'/',\n ',':'/'}\n efficiencies_string = replace_multiple(\n str(trimmed_efficiencies),\n replace_list_2)\n secondary_line = str(efficiencies_string) + '\\n'\n if bool(efficiencies) is True:\n f.write(secondary_line)\n if equation_type == 'ElementaryReaction':\n arrhenius = build_arrhenius(equation_object, equation_type)\n main_line = (\n '{:<51}'.format(equation_string) +\n '{:>9}'.format(arrhenius[0]) +\n '{:>9}'.format(arrhenius[1]) +\n '{:>11}'.format(arrhenius[2]) +\n '\\n')\n f.write(main_line)\n if equation_type == 'FalloffReaction':\n arr_high = build_modified_arrhenius(equation_object, 'high')\n main_line = (\n '{:<51}'.format(equation_string) +\n '{:>9}'.format(arr_high[0]) +\n '{:>9}'.format(arr_high[1]) +\n '{:>11}'.format(arr_high[2]) +\n '\\n')\n f.write(main_line)\n arr_low = build_modified_arrhenius(equation_object, 'low')\n second_line = (\n ' LOW /' +\n ' ' + arr_low[0] +\n ' ' + arr_low[1] +\n ' ' + arr_low[2] + '/\\n')\n f.write(second_line)\n j = equation_object.falloff.parameters\n #If optional Arrhenius data included:\n try:\n third_line = (\n ' TROE/' +\n ' ' + str(j[0]) +\n ' ' + str(j[1]) +\n ' ' + str(j[2]) +\n ' ' + str(j[3]) +' /\\n')\n f.write(third_line)\n except IndexError:\n pass\n #trimms efficiencies list\n efficiencies = equation_object.efficiencies\n trimmed_efficiencies = equation_object.efficiencies\n for s in efficiencies:\n if s not in trimmed_solution.species_names:\n del trimmed_efficiencies[s]\n replace_list_2 = {\n '{':'',\n '}':'/',\n '\\'':'',\n ':':'/',\n ',':'/'}\n efficiencies_string = replace_multiple(\n str(trimmed_efficiencies),\n replace_list_2)\n\n fourth_line = str(efficiencies_string) + '\\n'\n if bool(efficiencies) is True:\n f.write(fourth_line)\n #dupluicate option\n if equation_object.duplicate is True:\n duplicate_line = ' DUPLICATE' +'\\n'\n f.write(duplicate_line)\n f.write('END')\n f.close()\n\n #Test mechanism file\n\n original_solution = solution\n #convert written chemkin file to cti, and get solution\n parser = ck2cti.Parser()\n outName = 'test_file.cti'\n parser.convertMech(output_file_name, outName=outName)\n new_solution = ct.Solution(outName)\n\n #test new solution vs original solutoin\n #test(original_solution, new_solution)\n os.remove(outName)\n return output_file_name\n\n\n \"\"\"\n def build_falloff(j):\n\n Creates falloff reaction Troe parameter string\n\n param j:\n Cantera falloff parameters object\n\n falloff_str = str(',\\n falloff = Troe(' +\n 'A = ' + str(j[0]) +\n ', T3 = ' + str(j[1]) +\n ', T1 = ' + str(j[2]) +\n ', T2 = ' + str(j[3]) +') )\\n\\n')\n return falloff_str\n \"\"\"", "title": "" }, { "docid": "bb9fe475543f8a500b17238e2c01e2a8", "score": "0.5131358", "text": "def writeDataFile(self):\n model = pe.PEChainBuilder(self.lenPa, self.numPa, self.lenPc, self.numPc,\\\n self.volRatio, self.chargeFraction)\n model.genChains()\n \n \n model.writeFiles(self.directory)\n self.dataFileName = \"data.pe.la{0}.na{1}.lc{2}.nc{3}.rho{4}.r{5}.lammps\".\\\n format(self.lenPa, self.numPa, self.lenPc, self.numPc, \\\n self.volRatio, self.chargeRepeat)", "title": "" }, { "docid": "dc59f59e013fe69fd099020e5707bc62", "score": "0.51224095", "text": "def createPrismModel(states, transitions, target, mode, nr_actions, reset_transitions, reset_cost):\n out_file = open(TMP_MODEL_PATH, 'w')\n # module\n out_file.write(\"mdp\\n\\nmodule tmp\\n\\n\")\n\n # number of state and initial state\n out_file.write(\"\\ts : [0..\" + str(len(states)) + \"] init \" + str(abs(mode)) + \";\\n\\n\")\n\n # transitions\n for state in range(len(transitions)):\n for action in range(len(transitions[state])):\n if sum(transitions[state][action]) != 0:\n out_file.write(\"\\t[\" + chr(97 + action) + \"] s=\" + str(state) + \"-> \")\n destinations = []\n for dest in range(len(transitions[state][action])):\n if transitions[state][action][dest] != 0:\n destinations.append(str(transitions[state][action][dest]) + \":(s'=\" + str(dest) + \")\")\n out_file.write(\" + \".join(destinations))\n out_file.write(\";\\n\")\n out_file.write(\"\\nendmodule\\n\\n\")\n\n # label target\n out_file.write('label \"target\" = ')\n target = list(set(target))\n target = [\"(s=\" + str(x) + \")\" for x in target]\n if len(target) == 0:\n out_file.write(\"(s=\" + str(len(states) + 1) + \");\\n\")\n else:\n out_file.write(\" | \".join(target))\n out_file.write(\";\\n\")\n\n # label states\n if mode == MODE_MAX:\n out_file.write('label \"sink\" = (s=0);\\n')\n\n for i in range(mode, len(states)):\n out_file.write('label \"s' + str(states[i][0]) + '_obs' + str(states[i][1]) + '\" = (s=' + str(i) + ');\\n')\n\n if mode == MODE_MIN:\n out_file.write(\"\\nrewards\\n\")\n if reset_cost == 1:\n out_file.write(\"\\ttrue:1;\\n\")\n else:\n for i in range(len(states)):\n for j in range(nr_actions):\n if j in reset_transitions[i]:\n out_file.write(\"\\t[\" + chr(97 + j) + \"] (s=\" + str(i) + \") : \" + str(reset_cost) + \";\\n\")\n else:\n out_file.write(\"\\t[\" + chr(97 + j) + \"] (s=\" + str(i) + \") : 1;\\n\")\n\n out_file.write(\"endrewards\\n\")\n\n out_file.close()", "title": "" }, { "docid": "f391c50f49d5b0eae5ebbf6c5f12961a", "score": "0.5118555", "text": "def build_LM(in_file):\r\n print 'building language models...'\r\n # This is an empty method\r\n # Pls implement your code in below\r\n LMs = {'malaysian': {}, 'indonesian': {}, 'tamil': {}}\r\n malay_LM = LMs['malaysian']\r\n indon_LM = LMs['indonesian']\r\n tamil_LM = LMs['tamil']\r\n\r\n FD = open(in_file, 'r')\r\n\r\n lines = FD.read()\r\n lines_array = lines.replace('\\r', ' ').split('\\n')\r\n lines_array.pop(len(lines_array) - 1)\r\n\r\n for line in lines_array:\r\n line_array = line.split(' ', 1)\r\n lang = line_array[0]\r\n text = line_array[1]\r\n current_LM = LMs[lang]\r\n four_char_array = get_four_char_array(text)\r\n\r\n for four_char in four_char_array:\r\n if four_char not in current_LM:\r\n # performs add one smoothing\r\n malay_LM[four_char] = 1\r\n indon_LM[four_char] = 1\r\n tamil_LM[four_char] = 1\r\n current_LM[four_char] += 1\r\n\r\n # calculates ratio of set frequency over total frequency\r\n # for all sets in each of the three LMs\r\n for LM in LMs.values():\r\n count = 0\r\n for freq_value in LM.values():\r\n count += freq_value\r\n for four_char_key in LM.keys():\r\n LM[four_char_key] /= count\r\n return LMs\r\n\r\n FD.close()", "title": "" }, { "docid": "c862647fcd5ba7f408d748abaf7ab617", "score": "0.5115003", "text": "def test_ImportUFONoSideEffectNLO(self):\n ufo_model = ufomodels.load_model(import_ufo.find_ufo_path('loop_sm'),False)\n original_all_particles = copy.copy(ufo_model.all_particles)\n original_all_vertices = copy.copy(ufo_model.all_vertices)\n original_all_couplings = copy.copy(ufo_model.all_couplings)\n original_all_lorentz = copy.copy(ufo_model.all_lorentz)\n original_all_parameters = copy.copy(ufo_model.all_parameters)\n original_all_orders = copy.copy(ufo_model.all_orders)\n original_all_functions = copy.copy(ufo_model.all_functions)\n original_all_CTvertices = copy.copy(ufo_model.all_CTvertices)\n original_all_CTparameters = copy.copy(ufo_model.all_CTparameters)\n\n\n ufo2mg5_converter = import_ufo.UFOMG5Converter(ufo_model)\n model = ufo2mg5_converter.load_model()\n parameters, couplings = import_ufo.OrganizeModelExpression(ufo_model).main() \n\n self.assertEqual(original_all_particles,ufo_model.all_particles)\n self.assertEqual(original_all_vertices,ufo_model.all_vertices)\n# self.assertEqual(original_all_couplings,ufo_model.all_couplings)\n self.assertEqual(original_all_lorentz,ufo_model.all_lorentz)\n self.assertEqual(original_all_parameters,ufo_model.all_parameters)\n self.assertEqual(original_all_orders,ufo_model.all_orders)\n self.assertEqual(original_all_functions,ufo_model.all_functions)\n self.assertEqual(original_all_CTvertices,ufo_model.all_CTvertices)\n self.assertEqual(original_all_CTparameters,ufo_model.all_CTparameters)", "title": "" }, { "docid": "dfe119951be54a560fcefe9057a921e1", "score": "0.5112634", "text": "def main(models, output_dir):\n\tfor model in models:\n\t\tobj = ParseOmsa(model)\n\t\tobj.bios_version()\n\t\tobj.perc_versions()\n\t\tobj.write_yaml(output_dir)", "title": "" }, { "docid": "00e7eaf7ed485ca9233fa3bbe1a1b96e", "score": "0.5110691", "text": "def solve_lp(self):\n self._initialize_model()\n self._initialize_node_variables_lp()\n self._initialize_edge_variables_lp()\n self._initialize_objective()\n self._initialize_contraint_nodes()\n self._initialize_constraint_edges()\n self._initialize_constraint_terminals()\n self._run_solver()\n self._calculate_cut_value()\n self._calculate_source_sets()\n self._calculate_possible_terminals_by_node_weak()\n self._calculate_possible_terminals_by_node_strong()", "title": "" }, { "docid": "6586aa3830f43b54dce72f69cbc0963d", "score": "0.5104416", "text": "def loadLoopToyModel():\n \n mypartlist = base_objects.ParticleList()\n myinterlist = base_objects.InteractionList()\n myloopmodel = loop_base_objects.LoopModel()\n\n # A gluon\n mypartlist.append(base_objects.Particle({'name':'g',\n 'antiname':'g',\n 'spin':3,\n 'color':8,\n 'mass':'ZERO',\n 'width':'ZERO',\n #'texname':'G',\n #'antitexname':'G',\n 'line':'curly',\n 'charge':0.,\n 'pdg_code':21,\n #'propagating':True,\n 'propagator':0, \n 'is_part':True,\n 'counterterm':{('QCD', ((6,),)): {0: 'UVWfct_G_1', -1: 'UVWfct_G_1_1eps'}, ('QCD', ((5,),)): {0: 'UVWfct_G_0', -1: 'UVWfct_G_0_1eps'}},\n 'self_antipart':True}))\n \n # A quark U and its antiparticle\n mypartlist.append(base_objects.Particle({'name':'u',\n 'antiname':'u~',\n 'spin':2,\n 'color':3,\n 'mass':'ZERO',\n 'width':'ZERO',\n #'texname':'u',\n #'antitexname':'u',\n 'line':'straight',\n 'charge':2. / 3.,\n 'pdg_code':2,\n #'propagating':True,\n 'propagator':'', \n 'is_part':True,\n 'self_antipart':False}))\n antiu = copy.copy(mypartlist[1])\n antiu.set('is_part', False)\n\n # A quark D and its antiparticle\n mypartlist.append(base_objects.Particle({'name':'d',\n 'antiname':'d~',\n 'spin':2,\n 'color':3,\n 'mass':'ZERO',\n 'width':'ZERO',\n #'texname':'d',\n #'antitexname':'d',\n 'line':'straight',\n 'charge':-1. / 3.,\n 'pdg_code':1,\n #'propagating':True,\n 'propagator':'',\n 'is_part':True,\n 'self_antipart':False}))\n antid = copy.copy(mypartlist[2])\n antid.set('is_part', False)\n\n myloopmodel.set('particles', mypartlist)\n myloopmodel.set('couplings', ['QCD']) \n myloopmodel.set('interactions', myinterlist)\n myloopmodel.set('perturbation_couplings', ['QCD'])\n myloopmodel.set('order_hierarchy', {'QCD':1})\n\n return myloopmodel", "title": "" }, { "docid": "695e98b499f902207ba35ccac8891bda", "score": "0.5104137", "text": "def generate(genconf_model, project_folder):\n\n gendesc = get_generator_desc(genconf_model.gen_name)\n meta = get_language(gendesc.lang)\n\n # Path for templates overrides\n templates_path = os.path.join(project_folder, 'templates',\n genconf_model.gen_name)\n\n output_root = os.path.join(project_folder, genconf_model.output)\n\n def _create_folder(output_file):\n try:\n os.makedirs(os.path.dirname(output_file))\n except FileExistsError:\n pass\n\n # For each model configured in the current genconf\n for model_path in genconf_model.models:\n\n click.echo('Processing model \"{}\"'.format(model_path))\n model = meta.model_from_file(os.path.join(project_folder, 'model',\n model_path))\n\n # Validate model using generator specific validation.\n if gendesc.validate:\n gendesc.validate(model)\n\n params = {}\n # Adding generator params\n for p in genconf_model.params:\n params[p.name] = p.value\n\n # Processing all rules\n for rule in genconf_model.rules:\n\n # Sanity check\n if len(rule.types) != len(rule.var_names):\n raise TextXToolsError('Number of variables don\\'t match'\n ' number of types in rule \"{}\"'\n .format(rule.name))\n\n # Collect all object of each given type\n type_objs = []\n for t in rule.types:\n type_objs.append(children_of_type(model, t))\n\n if rule.all:\n context = {'__builtins__':{}}\n for ind, obj in enumerate(type_objs):\n params[rule.var_names[ind]] = obj\n context.update(params)\n for t in rule.trans:\n target_path = eval(t.python_path_expr, context)\n output_file = os.path.join(output_root, target_path)\n\n click.echo(\"Generating {}\".format(output_file))\n\n _create_folder(output_file)\n with open(output_file, 'w') as f:\n f.write(gendesc.render(t.template_path, params,\n templates_path))\n else:\n if len(rule.types) > 1:\n raise TextXToolsError('Multiple types/variables are not'\n ' possible for \"non-all\" rules.')\n for obj in type_objs[0]:\n params[rule.var_names[0]] = obj\n context = {'__builtins__': {}}\n context.update(params)\n for t in rule.trans:\n target_path = eval(t.python_path_expr, context)\n output_file = os.path.join(output_root, target_path)\n click.echo(\"Generating {}\".format(output_file))\n _create_folder(output_file)\n\n with open(output_file, 'w') as f:\n f.write(gendesc.render(t.template_path, params,\n templates_path))", "title": "" }, { "docid": "8a1835d042cf66486ed099ce14f71906", "score": "0.51036656", "text": "def generate_pymods(the_parsed_topology_xml, xml_filename, opt):\n if the_parsed_topology_xml.get_namespace():\n if VERBOSE:\n print(\n \"Generating pymods for topology %s::%s\"\n % (\n the_parsed_topology_xml.get_namespace(),\n the_parsed_topology_xml.get_name(),\n )\n )\n else:\n if VERBOSE:\n print(\n \"Generating pymods for topology %s\"\n % (the_parsed_topology_xml.get_name())\n )\n model = TopoFactory.TopoFactory.getInstance()\n topology_model = model.create(the_parsed_topology_xml)\n\n # create list of used parsed component xmls\n parsed_xml_dict = {}\n for comp in the_parsed_topology_xml.get_instances():\n if comp.get_type() in topology_model.get_base_id_dict():\n parsed_xml_dict[comp.get_type()] = comp.get_comp_xml()\n else:\n if VERBOSE:\n print(\n \"Components with type {} aren't in the topology model.\".format(\n comp.get_type()\n )\n )\n\n #\n # Hack to set up deployment path for instanced dictionaries (if one exists remove old one)\n #\n if opt.dict_dir is None:\n os.environ[\"DICT_DIR\"] = os.getcwd()\n else:\n os.environ[\"DICT_DIR\"] = opt.dict_dir\n\n xml_list = []\n for parsed_xml_type in parsed_xml_dict:\n if parsed_xml_dict[parsed_xml_type] is None:\n print(\n \"ERROR: XML of type {} is being used, but has not been parsed correctly. Check if file exists or add xml file with the 'import_component_type' tag to the Topology file.\".format(\n parsed_xml_type\n )\n )\n raise Exception()\n xml_list.append(parsed_xml_dict[parsed_xml_type])\n temp_comp = parsed_xml_dict[parsed_xml_type].get_component()\n if VERBOSE:\n print(\n \"Generating component dicts for %s::%s\"\n % (temp_comp.get_namespace(), temp_comp.get_name())\n )\n write_pymods_from_comp(parsed_xml_dict[parsed_xml_type], opt, topology_model)\n if VERBOSE:\n print(\n \"Generated component dicts for %s::%s\"\n % (temp_comp.get_namespace(), temp_comp.get_name())\n )\n\n topology_model.set_instance_xml_list(xml_list)\n\n if the_parsed_topology_xml.get_namespace():\n if VERBOSE:\n print(\n \"Generated pymods for topology %s::%s\"\n % (\n the_parsed_topology_xml.get_namespace(),\n the_parsed_topology_xml.get_name(),\n )\n )\n else:\n if VERBOSE:\n print(\n \"Generated pymods for topology %s\"\n % (the_parsed_topology_xml.get_name())\n )", "title": "" }, { "docid": "8b7d04b25bee59ebf17f61215a787437", "score": "0.51035786", "text": "def main(input_dir: str, output_dir: str) -> None:\n input_dir = Path(input_dir)\n output_dir = Path(output_dir)\n output_dir.mkdir(parents=True, exist_ok=True)\n\n spacy_formatted_data = {\"train\": [], \"test\": []}\n\n for partition in spacy_formatted_data:\n note_dir = input_dir / partition / f\"{partition}_note\"\n if partition == \"train\":\n norm_dir = input_dir / partition / f\"{partition}_norm\"\n else:\n # TODO (John): Is this the name of the directory in the official download?\n norm_dir = input_dir / partition / f\"{partition}_norm_cui_replaced_with_unk\"\n\n for note_filepath in note_dir.iterdir():\n if not note_filepath.name.endswith(\".txt\"):\n continue\n\n norm_filepath = norm_dir / f\"{note_filepath.stem}.norm\"\n\n with open(note_filepath, \"r\") as f:\n spacy_formatted_data[partition].append((f.read(), {\"entities\": []}))\n with open(norm_filepath, \"r\") as f:\n for line in f:\n start, end = tuple(map(int, line.strip().split(\"||\")[-2:]))\n spacy_formatted_data[partition][-1][-1][\"entities\"].append((start, end, ENTITY_LABEL))\n\n with open(output_dir / \"spacy_formatted_data.pickle\", \"wb\") as f:\n pickle.dump(spacy_formatted_data, f)", "title": "" }, { "docid": "df99f942c3c7b8f03f573780597898e3", "score": "0.5099381", "text": "def pmodel(self):\r\n if not hasattr(self, '_pmodel'):\r\n pmodelname = self.get('pmodel-name')\r\n if pmodelname == 'ISO9613':\r\n # create environment\r\n G = tuple([float(x) for x in self.get('pmodel-iso9613-ground-coeffs').strip('()').split(',')])\r\n p = self.getFloat('pmodel-iso9613-atmospheric-pressure')\r\n t = self.getFloat('pmodel-iso9613-atmospheric-temperature')\r\n r = self.getFloat('pmodel-iso9613-atmospheric-humidity')\r\n self._environment = ISO9613Environment(G = G, p = p, t = t, r = r)\r\n # create propagation model\r\n self._pmodel = ISO9613Model(environment = self._environment)\r\n self._pmodel.correction['geometricDivergence'] = self.getBool('pmodel-iso9613-flag-geometric-divergence')\r\n self._pmodel.correction['atmosphericAbsorption'] = self.getBool('pmodel-iso9613-flag-atmospheric-absorption')\r\n self._pmodel.correction['groundEffect'] = self.getBool('pmodel-iso9613-flag-ground-effect')\r\n self._pmodel.correction['sourceDirectivity'] = self.getBool('pmodel-iso9613-flag-source-directivity')\r\n else:\r\n raise Exception('configuration file: Propagation model \"%s\" not known - use \"ISO9613\"' % pmodelname)\r\n return self._pmodel", "title": "" }, { "docid": "7bb2d23548d39b59677cfcfc9082e026", "score": "0.50863206", "text": "def main():\n\n parser = argparse.ArgumentParser(description='Duolingo shared task baseline model')\n parser.add_argument('--language', default='en_es', help='choose from [es_en, en_es, fr_en]', required=False)\n parser.add_argument('--dataset_path', default='../data/%s/', required=False)\n parser.add_argument('--outputs_path', default='./outputs/', required=False)\n args = parser.parse_args()\n\n dataset_path = args.dataset_path % args.language\n assert os.path.isdir(dataset_path)\n\n train_path = dataset_path + '%s.slam.20190204.train' % args.language\n dev_path = dataset_path + '%s.slam.20190204.dev' % args.language\n test_path = dataset_path + '%s.slam.20190204.test' % args.language\n assert os.path.isfile(train_path)\n assert os.path.isfile(dev_path)\n assert os.path.isfile(test_path)\n\n if not os.path.isdir(args.outputs_path): os.mkdir(args.outputs_path)\n\n # ============================== Hyper Parameter ==============================\n dbg = False\n from_path = None\n # from_path = './saved_model/seq2seq_nomlp_20'\n # from_path = './saved_model/seq2seq_exp_20'\n # from_path = './saved_model/attention_v2_20'\n # from_path = './saved_model/cnn_3'\n # from_path = './saved_model/seq2seq_c_7'\n # from_path = './saved_model/transformer_1'\n epochs = 10 if dbg else 10\n lang = Lang()\n\n # ============================== Data Loading ==============================\n\n print('Begin Data Loading')\n start_time = time.time()\n training_data, training_labels = load_data(train_path, lang, dbg=dbg, use_all_features=True)\n dev_data = load_data(dev_path, lang, use_all_features=True)\n test_data = load_data(test_path, lang, use_all_features=True)\n users = list(get_users(training_data).union(get_users(dev_data)).union(get_users(test_data)))\n lang.addUsers(users)\n for i in range(len(lang.letters)): lang.letter2Index[lang.letters[i]] = i\n end_time = time.time()\n print('Data Loaded\\t Time Taken %0.2fm' % ((end_time - start_time)/60))\n\n model = Model(lang)\n\n # ============================== Training ==============================\n if from_path == None:\n print('Begin Training')\n train_loader = get_dataloader(training_data, lang, training_labels)\n model.train(train_loader, epochs)\n\n # ============================== Inference ==============================\n # print('Begin Inference-Dev', end=' ')\n # start_time = time.time()\n # dev_loader = get_dataloader(dev_data, lang)\n # predictions = model.predict_for_set(dev_loader, from_path)\n # with open(args.outputs_path + '%s_dev_predictions.pred' % args.language, 'wt') as f:\n # for instance_id, prediction in iteritems(predictions):\n # f.write(instance_id + ' ' + str(prediction) + '\\n')\n # end_time = time.time()\n # print('| %0.2fm' % ((end_time-start_time)/60))\n \n print('Begin Inference-Test', end=' ')\n start_time = time.time()\n test_loader = get_dataloader(test_data, lang)\n predictions = model.predict_for_set(test_loader, from_path)\n with open(args.outputs_path + '%s_test_predictions.pred' % args.language, 'wt') as f:\n for instance_id, prediction in predictions.items():\n f.write(instance_id + ' ' + str(prediction) + '\\n')\n end_time = time.time()\n print('| %0.2fm' % ((end_time-start_time)/60))", "title": "" }, { "docid": "aa7982cf4b80468889131323fa94e9d7", "score": "0.50660026", "text": "def generate_language_model(**kwargs):\n\n lm = Session.query(model.MorphemeLanguageModel).get(kwargs['morpheme_language_model_id'])\n trie_path = lm.get_file_path('trie')\n trie_mod_time = lm.get_modification_time(trie_path)\n lm.generate_succeeded = False\n try:\n lm.write_corpus()\n except Exception, e:\n lm.generate_message = u'Error writing the corpus file. %s' % e\n try:\n lm.write_vocabulary()\n except Exception, e:\n lm.generate_message = u'Error writing the vocabulary file. %s' % e\n try:\n lm.write_arpa(kwargs['timeout'])\n except Exception, e:\n lm.generate_message = u'Error writing the ARPA file. %s' % e\n try:\n lm.generate_trie()\n except Exception, e:\n lm.generate_message = u'Error generating the LMTrie instance. %s' % e\n else:\n if lm.get_modification_time(trie_path) != trie_mod_time:\n lm.generate_succeeded = True\n lm.generate_message = u'Language model successfully generated.'\n else:\n lm.generate_message = u'Error generating the LMTrie instance.'\n lm.generate_attempt = unicode(uuid4())\n lm.modifier_id = kwargs['user_id']\n lm.datetime_modified = h.now()\n Session.commit()", "title": "" }, { "docid": "2cff1d19062392a49928573882a530bd", "score": "0.5064492", "text": "def load_model(model_path):\r\n nlp = spacy.blank('en')\r\n if 'ner' not in nlp.pipe_names:\r\n ner = nlp.create_pipe('ner')\r\n nlp.add_pipe(ner)\r\n\r\n ner = nlp.from_disk(model_path)\r\n return ner", "title": "" }, { "docid": "fd82b28979eb3edddc68ff36415c4e0c", "score": "0.5058911", "text": "def _write_imports(\n cls,\n pickle_type: Optional[str] = None,\n mojo_model: Optional[bool] = False,\n binary_h2o_model: Optional[bool] = False,\n tf_model: Optional[bool] = False,\n binary_string: Optional[str] = None,\n ) -> None:\n pickle_type = pickle_type if pickle_type else \"pickle\"\n cls.score_code += (\n f\"import math\\nimport {pickle_type}\\nimport pandas as pd\\n\"\n \"import numpy as np\\nfrom pathlib import Path\\n\\n\"\n )\n \"\"\"\nimport math\nimport pickle\nimport pandas as pd\nimport numpy as np\nfrom pathlib import Path\n\n\n \"\"\"\n\n try:\n if current_session().version_info() != 3.5:\n cls.score_code += \"import settings\\n\\n\"\n \"\"\"\nimport settings\n \n \n \"\"\"\n except AttributeError:\n warn(\n \"No current session connection was found to a SAS Viya server. Score \"\n \"code will be written under the assumption that the target server is \"\n \"SAS Viya 4.\"\n )\n\n if mojo_model or binary_h2o_model:\n cls.score_code += \"import h2o\\n\\nh2o.init()\\n\\n\"\n \"\"\"\nimport h2o\n\nh2o.init()\n\n \"\"\"\n elif tf_model:\n cls.score_code += \"import tensorflow as tf\\n\\n\"\n \"\"\"\nimport tensorflow as tf\n\n \"\"\"\n elif binary_string:\n cls.score_code += (\n f'import codecs\\n\\nbinary_string = \"{binary_string}\"'\n f\"\\nmodel = {pickle_type}.loads(codecs.decode(binary_string\"\n '.encode(), \"base64\"))\\n\\n'\n )\n \"\"\"\nimport codecs\n\nbinary_string = \"<binary string>\"\nmodel = pickle.load(codecs.decode(binary_string.encode(), \"base64\"))\n\n \"\"\"", "title": "" }, { "docid": "2c1108513704f5cbba7b473aa2906d50", "score": "0.50567424", "text": "def write_pymods_from_comp(the_parsed_component_xml, opt, topology_model):\n global BUILD_ROOT\n global DEPLOYMENT\n global VERBOSE\n\n parsed_port_xml_list = []\n parsed_serializable_xml_list = []\n # uses the topology model to process the items\n # checks if the topology model exists\n if topology_model is None:\n PRINT.info(\n \"Topology model was not specified. Please also input a topology model when running this command.\"\n )\n raise OSError\n\n port_type_files_list = the_parsed_component_xml.get_port_type_files()\n\n for port_file in port_type_files_list:\n port_file = search_for_file(\"Port\", port_file)\n xml_parser_obj = XmlPortsParser.XmlPortsParser(port_file)\n parsed_port_xml_list.append(xml_parser_obj)\n del xml_parser_obj\n\n serializable_type_files_list = (\n the_parsed_component_xml.get_serializable_type_files()\n )\n for serializable_file in serializable_type_files_list:\n serializable_file = search_for_file(\"Serializable\", serializable_file)\n xml_parser_obj = XmlSerializeParser.XmlSerializeParser(\n serializable_file\n ) # Telemetry/Params can only use generated serializable types\n # check to make sure that the serializables don't have things that channels and parameters can't have\n # can't have external non-xml members\n if len(xml_parser_obj.get_include_header_files()):\n print(\n \"ERROR: Component include serializables cannot use user-defined types. file: \"\n % serializable_file\n )\n sys.exit(-1)\n\n parsed_serializable_xml_list.append(xml_parser_obj)\n del xml_parser_obj\n\n model = CompFactory.CompFactory.getInstance()\n component_model = model.create(\n the_parsed_component_xml, parsed_port_xml_list, parsed_serializable_xml_list\n )\n\n instChannelWriter = InstChannelWriter.InstChannelWriter()\n instCommandWriter = InstCommandWriter.InstCommandWriter()\n instEventWriter = InstEventWriter.InstEventWriter()\n\n if opt.dict_dir is None:\n if VERBOSE:\n print(\"Dictionary output directory not specified!, defaulting to cwd\")\n opt.dict_dir = os.getcwd()\n os.environ[\"DICT_DIR\"] = opt.dict_dir\n\n # iterate through command instances\n for command_model in component_model.get_commands():\n if VERBOSE:\n print(\"Generating command dict %s\" % command_model.get_mnemonic())\n instCommandWriter.DictStartWrite(command_model, topology_model)\n instCommandWriter.DictHeaderWrite(command_model, topology_model)\n instCommandWriter.DictBodyWrite(command_model, topology_model)\n\n for parameter_model in component_model.get_parameters():\n if VERBOSE:\n print(\"Generating parameter dict %s\" % parameter_model.get_name())\n instCommandWriter.DictStartWrite(parameter_model, topology_model)\n instCommandWriter.DictHeaderWrite(parameter_model, topology_model)\n instCommandWriter.DictBodyWrite(parameter_model, topology_model)\n\n # iterate through command instances\n for event_model in component_model.get_events():\n if VERBOSE:\n print(\"Generating event dict %s\" % event_model.get_name())\n instEventWriter.DictStartWrite(event_model, topology_model)\n instEventWriter.DictHeaderWrite(event_model, topology_model)\n instEventWriter.DictBodyWrite(event_model, topology_model)\n\n # iterate through command instances\n for channel_model in component_model.get_channels():\n if VERBOSE:\n print(\"Generating channel dict %s\" % channel_model.get_name())\n instChannelWriter.DictStartWrite(channel_model, topology_model)\n instChannelWriter.DictHeaderWrite(channel_model, topology_model)\n instChannelWriter.DictBodyWrite(channel_model, topology_model)", "title": "" }, { "docid": "dbac7e9dbefed924c07d9e4bb763e9cb", "score": "0.5051691", "text": "def convert(self, path, version, target):\n source = self.comparer.get_representation(path)\n lines = [ '# <fortpy version=\"{}\"></fortpy>\\n'.format(version) ]\n\n for line in self.comparer.template.contents[version].preamble:\n lines.append(line.write(source.preamble, source.version, source.stored) + \"\\n\")\n\n for line in self.comparer.template.contents[version].body:\n for valueset in source.body:\n lines.append(line.write(valueset, source.version, source.stored) + \"\\n\")\n\n with open(os.path.expanduser(target), 'w') as f:\n f.writelines(lines)", "title": "" }, { "docid": "34c2bef9339db6de5f9e061ddef3fb47", "score": "0.5050992", "text": "def convert_to_onnx(pytorch_model, input_shape, output_file, input_names, output_names):\n\n pytorch_model.eval()\n onnx_input_shape = torch.randn(input_shape)\n torch.onnx.export(pytorch_model, onnx_input_shape, output_file,\n verbose=True, input_names=input_names, output_names=output_names)\n\n # Model check after conversion\n import onnx\n model_from_onnx = onnx.load(output_file)\n try:\n onnx.checker.check_model(model_from_onnx)\n print('ONNX check passed successfully.')\n except onnx.onnx_cpp2py_export.checker.ValidationError as exc:\n sys.exit('ONNX check failed with error: ' + str(exc))", "title": "" }, { "docid": "5f4a2a2fd203b35f20cc76a1f1693031", "score": "0.5048336", "text": "def _ppo(self, m):\n\n m[\"ppo1\"] = m[\"MemoryOp <: po_loc :> Write\"]\n m[\"ppo2\"] = m[\"rdw\"]\n m[\"ppo3\"] = m[\"(AMO + StoreConditional) <: (rf & po)\"]\n m[\"ppo4rr\"] = m[\"(Read <: po :> (FencePR & FenceSR)).(po :> Read)\"]\n m[\"ppo4rw\"] = m[\"(Read <: po :> (FencePR & FenceSW)).(po :> Write)\"]\n m[\"ppo4wr\"] = m[\"(Write <: po :> (FencePW & FenceSR)).(po :> Read)\"]\n m[\"ppo4ww\"] = m[\"(Write <: po :> (FencePW & FenceSW)).(po :> Write)\"]\n m[\"ppo4tso1\"] = m[\"(Read <: po :> (FenceTSO)).(po :> MemoryOp)\"]\n m[\"ppo4tso2\"] = m[\"(MemoryOp <: po :> (FenceTSO)).(po :> Write)\"]\n m[\"ppo4\"] = m[\"ppo4rr + ppo4rw + ppo4wr + ppo4ww + ppo4tso1 + ppo4tso2\"]\n m[\"ppo5\"] = m[\"Aq <: po :> MemoryOp\"]\n m[\"ppo6\"] = m[\"MemoryOp <: po :> Rl\"]\n m[\"ppo7\"] = m[\"RCsc <: po :> RCsc\"]\n m[\"ppo8\"] = m[\"pair\"]\n m[\"ppo9\"] = m[\"addrdep\"]\n m[\"ppo10\"] = m[\"datadep\"]\n m[\"ppo11\"] = m[\"ctrldep.po :> Write\"]\n m[\"ppo12\"] = m[\"(addrdep + datadep).(rf & po)\"]\n m[\"ppo13\"] = m[\"addrdep.po :> Write\"]\n\n return m[\n \"ppo1 + ppo2 + ppo3 + ppo4 + ppo5 + ppo6 + ppo7 + \"\n \"ppo8 + ppo9 + ppo10 + ppo11 + ppo12 + ppo13\"\n ]", "title": "" }, { "docid": "7ac89b0a71c9943d58dd2e15032afde8", "score": "0.504813", "text": "def makeFile(m, model):\n tempfile = ['#include \"lime.h\"\\n', '#include \"math.h\"\\n',\n '#include \"stdio.h\"\\n', '#include \"stdlib.h\"\\n',\n '#include \"%s\"\\n\\n' % model.header.fn]\n writeModelProperties(tempfile, model)\n writeWeighting(tempfile, model)\n writeImageParameters(tempfile, m, model)\n writeInterpolationFuncs(tempfile, model)\n writeDensity(tempfile, model)\n writeTemperatures(tempfile, model)\n writeAbundance(tempfile, model)\n if model.dust is not None:\n writeGastoDust(tempfile, model)\n writeDoppler(tempfile, model)\n writeVelocityStructure(tempfile, model)\n with open('model_%d.c' % m, 'w') as tosave:\n for line in tempfile:\n tosave.write('%s' % line)\n return", "title": "" }, { "docid": "06ef0a20cae196e2de5c67f6c87df504", "score": "0.5047686", "text": "def _save_prototxt(self):\n solver_path = osp.dirname(self._solver_prototxt)\n if not osp.exists(solver_path):\n os.makedirs(solver_path)\n with open(self._solver_prototxt, 'w') as f:\n f.write(text_format.MessageToString(self._solver))", "title": "" }, { "docid": "9127df10abf73188e578bcc4dcd4d09c", "score": "0.50475544", "text": "def owl_to_model(fname):\n io_class = autoclass('org.biopax.paxtools.io.SimpleIOHandler')\n io = io_class(autoclass('org.biopax.paxtools.model.BioPAXLevel').L3)\n\n try:\n file_is = autoclass('java.io.FileInputStream')(fname)\n except JavaException:\n logger.error('Could not open data file %s' % fname)\n return\n try:\n biopax_model = io.convertFromOWL(file_is)\n except JavaException as e:\n logger.error('Could not convert data file %s to BioPax model' % fname)\n logger.error(e)\n return\n\n file_is.close()\n\n return biopax_model", "title": "" }, { "docid": "b084295606442608e4e95a1201da837c", "score": "0.5046919", "text": "def export_model(self, out=None, **kwargs):\n # Remove all code transformations but respect those provided explicitly\n kwargs.setdefault('length_for_alias', None)\n kwargs.setdefault('name_all_constraints', False)\n\n CpoCompiler(self, **kwargs).write(out)", "title": "" }, { "docid": "380e97eb5048bd83b762af621acf5fd7", "score": "0.5038669", "text": "def main(model=None, output_dir=None, n_iter=100):\n if model is not None:\n nlp = spacy.load(model) # load existing spaCy model\n print(\"Loaded model '%s'\" % model)\n else:\n nlp = spacy.blank(\"en\") # create blank Language class\n print(\"Created blank 'en' model\")\n \n # create the built-in pipeline components and add them to the pipeline\n # nlp.create_pipe works for built-ins that are registered with spaCy\n if \"ner\" not in nlp.pipe_names:\n ner = nlp.create_pipe(\"ner\")\n nlp.add_pipe(ner, last=True)\n # otherwise, get it so we can add labels\n else:\n ner = nlp.get_pipe(\"ner\")\n \n # add labels\n for _, annotations in TRAIN_DATA:\n for ent in annotations.get(\"entities\"):\n ner.add_label(ent[2])\n\n # get names of other pipes to disable them during training\n other_pipes = [pipe for pipe in nlp.pipe_names if pipe != \"ner\"]\n with nlp.disable_pipes(*other_pipes): # only train NER\n # reset and initialize the weights randomly – but only if we're\n # training a new model\n if model is None:\n nlp.begin_training()\n for itn in range(n_iter):\n random.shuffle(TRAIN_DATA)\n losses = {}\n # batch up the examples using spaCy's minibatch\n batches = minibatch(TRAIN_DATA, size=compounding(4.0, 32.0, 1.001))\n for batch in batches:\n texts, annotations = zip(*batch)\n nlp.update(\n texts, # batch of texts\n annotations, # batch of annotations\n drop=0.5, # dropout - make it harder to memorise data\n losses=losses,\n )\n #print(\"Losses\", losses)\n\n # test the trained model\n for text in TEST_DATA:\n doc = nlp(text)\n print(\"Entities\", [(ent.text, ent.label_) for ent in doc.ents])\n print(\"Tokens\", [(t.text, t.ent_type_, t.ent_iob) for t in doc])\n \n # save model to output directory\n if output_dir is not None:\n output_dir = Path(output_dir)\n if not output_dir.exists():\n output_dir.mkdir()\n nlp.to_disk(output_dir)\n print(\"Saved model to\", output_dir)\n \n # test the saved model\n print(\"Loading from\", output_dir)\n nlp2 = spacy.load(output_dir)\n for text, _ in TRAIN_DATA:\n doc = nlp2(text)\n print(\"Entities\", [(ent.text, ent.label_) for ent in doc.ents])", "title": "" }, { "docid": "4dfa7950183441b0441fb0c52fd47749", "score": "0.5037608", "text": "def convert2or():\n # parse converter arguments\n args = get_args_convert2or()\n\n # input data parameters\n try:\n workspace_dir, exec_file_name, data_uri, data_path, output_path = get_params()\n entry_filename = os.path.splitext(os.path.basename(exec_file_name))[0]\n except Exception as e:\n print('Invalid arguments, {}'.format(e))\n sys.exit(1)\n\n else:\n # Generate requirements.txt\n try:\n import pipreqs\n except ImportError as e:\n raise RuntimeError('Package installation failed: {}'.format(e))\n\n else:\n time.sleep(1)\n try:\n p = subprocess.Popen([\"pipreqs\", \"--force\", workspace_dir])\n p.wait()\n time.sleep(1)\n\n # fix the bug raising from 'tensorflow', 'tensorflow_gpu'\n filename = os.path.join(workspace_dir, \"requirements.txt\")\n\n rw_file(filename, matplotlib=\"matplotlib\", tensorflow_gpu=\"\", tensorflow=\"tensorflow-gpu\")\n remove_empty_lines(filename)\n print(\"Generated 'requirements.txt' successfully!\")\n\n except Exception as e:\n raise RuntimeError(\"Generating 'requirements.txt' failed: {}\".format(e))\n\n else:\n # Generate params.json\n try:\n exec_file_name_v = os.path.relpath(exec_file_name, start=workspace_dir)\n data_path_v = \"\" if data_path == \"\" else os.path.relpath(data_path, start=workspace_dir)\n output_path_v = \"\" if output_path == \"\" else os.path.relpath(output_path, start=workspace_dir)\n params_json = json.dumps({\"exec_file_name\": exec_file_name_v,\n \"data_uri\": data_uri,\n \"data_path\": data_path_v,\n \"output_path\": output_path_v,\n })\n with open(os.path.join(workspace_dir, \"params.json\"), 'w+') as f:\n f.write(params_json)\n print(\"Generated 'params.json' successfully!\")\n\n except Exception as e:\n raise IOError(\"Generating 'params.json' failed: {}\".format(e))\n\n else:\n time.sleep(2)\n try:\n zip_folder_path = os.path.join(workspace_dir, os.pardir, \"task_files\")\n\n if not os.path.exists(zip_folder_path):\n os.makedirs(zip_folder_path)\n\n output_filename = str(entry_filename) + \"_orion.zip\"\n zip_folder(workspace_dir, os.path.join(zip_folder_path, output_filename))\n print('Zipped files successfully!')\n except Exception as e:\n raise RuntimeError('Zipping files failed: {}'.format(e))\n else:\n try:\n os.remove(os.path.join(workspace_dir, \"params.json\"))\n os.remove(os.path.join(workspace_dir, \"requirements.txt\"))\n except Exception as e:\n raise RuntimeError('Removing files failed: {}'.format(e))", "title": "" }, { "docid": "78aca13d4302fac740b8dddb5e1eb7e6", "score": "0.5032639", "text": "def descout_to_input( # noqa: C901 - fxn too complex\n outfile,\n infile,\n objective=\"force\",\n optimizer=\"lsq-exact\",\n header=\"#DESC-generated input file\",\n ftol=1e-2,\n xtol=1e-6,\n gtol=1e-6,\n maxiter=100,\n ):\n f = open(outfile, \"w+\")\n\n f.seek(0)\n\n eq = load(infile)\n try:\n eq0 = eq[-1]\n except TypeError:\n eq0 = eq\n\n f.write(header + \"\\n\")\n\n f.write(\"# global parameters\\n\")\n f.write(\"sym = {:1d} \\n\".format(eq0.sym))\n f.write(\"NFP = {:3d} \\n\".format(int(eq0.NFP)))\n f.write(\"Psi = {:.8f} \\n\".format(eq0.Psi))\n\n f.write(\"\\n# spectral resolution\\n\")\n for key, val in {\n \"L_rad\": \"L\",\n \"M_pol\": \"M\",\n \"N_tor\": \"N\",\n \"L_grid\": \"L_grid\",\n \"M_grid\": \"M_grid\",\n \"N_grid\": \"N_grid\",\n }.items():\n f.write(f\"{key} = {getattr(eq0, val)}\\n\")\n\n f.write(\"\\n\\n# solver tolerances\\n\")\n f.write(f\"ftol = {ftol}\\n\")\n f.write(f\"xtol = {xtol}\\n\")\n f.write(f\"gtol = {gtol}\\n\")\n f.write(f\"maxiter = {maxiter}\\n\")\n\n f.write(\"\\n\\n# solver methods\\n\")\n f.write(f\"optimizer = {optimizer}\\n\")\n f.write(f\"objective = {objective}\\n\")\n f.write(\"spectral_indexing = {}\\n\".format(eq0._spectral_indexing))\n f.write(\"node_pattern = {}\\n\".format(eq0._node_pattern))\n\n f.write(\"\\n# pressure and rotational transform/current profiles\\n\")\n\n if eq0.iota:\n assert (\n eq0.pressure.__class__.__name__ == \"PowerSeriesProfile\"\n and eq0.iota.__class__.__name__ == \"PowerSeriesProfile\"\n ), \"Equilibrium must have power series profiles for ascii io\"\n char = \"i\"\n iseven_pres = int(eq0._pressure.basis.sym == \"even\") + 1\n iseven_iota = int(eq0._iota.basis.sym == \"even\") + 1\n pres_profile = np.zeros((eq0.L + 1,))\n iota_profile = np.zeros((eq0.L + 1,))\n pres_profile[: eq0.L + 1 : iseven_pres] = eq0._pressure.params\n iota_profile[: eq0.L + 1 : iseven_iota] = eq0._iota.params\n\n idxs = np.linspace(0, eq0.L - 1, eq0.L, dtype=int)\n for l in idxs:\n f.write(\n \"l: {:3d}\\tp = {:16.8E}\\t{} = {:16.8E}\\n\".format(\n int(l), pres_profile[l], char, iota_profile[l]\n )\n )\n else:\n assert (\n eq0.pressure.__class__.__name__ == \"PowerSeriesProfile\"\n and eq0.current.__class__.__name__ == \"PowerSeriesProfile\"\n ), \"Equilibrium must have power series profiles for ascii io\"\n char = \"c\"\n iseven_pres = int(eq0._pressure.basis.sym == \"even\") + 1\n iseven_curr = int(eq0._current.basis.sym == \"even\") + 1\n pres_profile = np.zeros((eq0.L + 1,))\n curr_profile = np.zeros((eq0.L + 1,))\n pres_profile[: eq0.L + 1 : iseven_pres] = eq0._pressure.params\n curr_profile[: eq0.L + 1 : iseven_curr] = eq0._current.params\n\n idxs = np.linspace(0, eq0.L - 1, eq0.L, dtype=int)\n for l in idxs:\n f.write(\n \"l: {:3d}\\tp = {:16.8E}\\t{} = {:16.8E}\\n\".format(\n int(l), pres_profile[l], char, curr_profile[l]\n )\n )\n\n f.write(\"\\n\")\n\n f.write(\"\\n# fixed-boundary surface shape\\n\")\n # boundary paramters\n if eq0.sym:\n for k, (l, m, n) in enumerate(eq0.surface.R_basis.modes):\n if abs(eq0.Rb_lmn[k]) > 1e-8:\n f.write(\n \"l: {:3d}\\tm: {:3d}\\tn: {:3d}\\tR1 = {:16.8E}\\t\\\n Z1 = {:16.8E}\\n\".format(\n int(0), m, n, eq0.Rb_lmn[k], 0\n )\n )\n for k, (l, m, n) in enumerate(eq0.surface.Z_basis.modes):\n if abs(eq0.Zb_lmn[k]) > 1e-8:\n f.write(\n \"l: {:3d}\\tm: {:3d}\\tn: {:3d}\\tR1 = {:16.8E}\\t\\\n Z1 = {:16.8E}\\n\".format(\n int(0), m, n, 0, eq0.Zb_lmn[k]\n )\n )\n else:\n for k, (l, m, n) in enumerate(eq0.surface.R_basis.modes):\n if abs(eq0.Rb_lmn[k]) > 1e-8 or abs(eq0.Zb_lmn[k]) > 1e-8:\n f.write(\n \"l: {:3d}\\tm: {:3d}\\tn: {:3d}\\tR1 = {:16.8E}\\t\\\n Z1 = {:16.8E}\\n\".format(\n int(0), m, n, eq0.Rb_lmn[k], eq0.Zb_lmn[k]\n )\n )\n\n f.close()", "title": "" }, { "docid": "aac8d9a34c982c21d64ee69aa9b8aada", "score": "0.50321543", "text": "def models(self):\r\n # make model directory, removing the old one if necessary\r\n rm_rf(self.cfg.model_dir)\r\n mkdir_p(self.cfg.model_dir)\r\n\r\n # run generator script\r\n gen_script = os.path.join(self._prj_root, 'gen.py')\r\n\r\n if 'PYTHON_MSDSL' in os.environ:\r\n python_name = os.environ['PYTHON_MSDSL']\r\n else:\r\n python_name = which('python')\r\n \r\n call([python_name, gen_script, '-o', self.cfg.model_dir, '--dt', str(self.dt)])", "title": "" }, { "docid": "37e59d1ec3cd22e83b1a98ae5bb15330", "score": "0.502148", "text": "def write_model_to_file(self) -> str:\n\n model_file_content = self.interpreter.get_model_file_content()\n\n db_logger.info(f\"Writing {self.model_file_url}\")\n\n with open(self.model_file_location, 'w') as model:\n model.write(model_file_content)\n\n file_name = os.path.basename(self.model_file_location)\n project_dir = self.model_file_location.replace(file_name, '')\n\n helpers.process_includes(\n self.model_file_url, project_dir, self.interpreter\n )\n\n if not self.no_score:\n self.model_file_location = self.setup_protocol(self.score)\n\n return self.model_file_location", "title": "" }, { "docid": "5a4533cb8662178a596b89ed1d73420b", "score": "0.50117064", "text": "def test_ImportUFONoSideEffectLO(self): \n ufo_model = ufomodels.load_model(import_ufo.find_ufo_path('sm'),False)\n original_all_particles = copy.copy(ufo_model.all_particles)\n original_all_vertices = copy.copy(ufo_model.all_vertices)\n original_all_couplings = copy.copy(ufo_model.all_couplings)\n original_all_lorentz = copy.copy(ufo_model.all_lorentz)\n original_all_parameters = copy.copy(ufo_model.all_parameters)\n original_all_orders = copy.copy(ufo_model.all_orders)\n original_all_functions = copy.copy(ufo_model.all_functions)\n\n ufo2mg5_converter = import_ufo.UFOMG5Converter(ufo_model)\n model = ufo2mg5_converter.load_model()\n parameters, couplings = import_ufo.OrganizeModelExpression(ufo_model).main() \n\n self.assertEqual(original_all_particles,ufo_model.all_particles) \n self.assertEqual(original_all_vertices,ufo_model.all_vertices)\n self.assertEqual(original_all_couplings,ufo_model.all_couplings)\n self.assertEqual(original_all_lorentz,ufo_model.all_lorentz)\n self.assertEqual(original_all_parameters,ufo_model.all_parameters)\n self.assertEqual(original_all_orders,ufo_model.all_orders)\n self.assertEqual(original_all_functions,ufo_model.all_functions)", "title": "" }, { "docid": "7e8c2e02b0ee94ddd321ca2db4033cd0", "score": "0.50004613", "text": "def exe_dyn_mdl(fv, usr):\n\n exe = os.chdir(prg_path + '/dynamic_mdl-0.1') \n exe = os.getcwd() \n pcs = sbp.Popen([exe + '/a.out', str(fv)], stdout=sbp.PIPE, stderr=sbp.PIPE)\n out, error = pcs.communicate()\n if out:\n print('OK', out)\n if error:\n print('Error', error.strip()) \n if not pcs.poll():\n print(\"Dynamic Module execute finish\")\n p_nme = 'Dynamic'\n dest_pth = fles_dir + usr + '/' + p_nme\n dest_pth = dest_pth + '/' + time.get_date_time_hm()\n dummy_sys_file.crt_dir(dest_pth)\n \"\"\"Creating directory for program execution\"\"\"\n\n \"\"\"Names of the generated files\"\"\"\n fle_name1 = 'sk'\n fle_name2 = 'f_self'\n fle_name3 = 'coefficient'\n \"\"\"full path of the outputs of program\"\"\"\n out_fle1 = exe + '/' + 'sk.dat'\n out_fle2 = exe + '/' + 'fself.dat'\n out_fle3 = exe + '/' + 'coeficiente.dat' \n dummy_sys_file.cpy_file_to_pth_dir(out_fle1, dest_pth, fle_name1)\n dummy_sys_file.cpy_file_to_pth_dir(out_fle2, dest_pth, fle_name2)\n dummy_sys_file.cpy_file_to_pth_dir(out_fle3, dest_pth, fle_name3)", "title": "" }, { "docid": "10bf29315de0dab52975584c75ef2dfb", "score": "0.49911758", "text": "def generate_package(self):\n\n # Create a directory (mymodel)\n cwd = Path(self.dir)\n directory=cwd/'csharp_model'\n if (directory).isdir() :\n self.dir = directory\n else:\n self.dir = directory.mkdir()\n\n files = []\n count = 0\n\n for model in self.models:\n\n\n self.generate_component(model)\n\n ext = '' if count == 0 else str(count)\n filename = self.dir/\"%s.cs\"%signature(model)\n\n with open(filename, \"w\") as python_file:\n python_file.write(self.code.encode('utf-8','ignore'))\n files.append(filename)\n\n model.module_name = str(Path(filename).namebase)\n\n count += 1\n\n return files", "title": "" }, { "docid": "22f27813e63fc901b230aaee14b21dfe", "score": "0.49885112", "text": "def new_IDP_gen(subj,LUT_txt): #,fix4melviewtxt\n\n\n #remove trailing forward slashes in subject paths\n if subj.endswith(\"/\"):\n subj = subj[:-1]\n\n if not os.path.exists(subj + \"/IDP_files/\"):\n os.makedirs(subj + \"/IDP_files/\")\n\n subjName = subj[subj.rfind(\"/\") + 1 :]\n\n\n # IDP_FC_file = os.path.join(subj + \"/IDP_files/\", \"tvb_IDP_FC_dist.txt\")\n # IDP_SC_file = os.path.join(subj + \"/IDP_files/\", \"tvb_IDP_SC_dist.txt\")\n # IDP_MELODIC_file = os.path.join(subj + \"/IDP_files/\", \"tvb_IDP_MELODIC_SNR.txt\")\n # IDP_MCFLIRT_file = os.path.join(subj + \"/IDP_files/\", \"tvb_IDP_MCFLIRT_disp.txt\")\n # IDP_homotopic_file = os.path.join(subj + \"/IDP_files/\", \"tvb_IDP_homotopic.txt\")\n # new_IDP_list_file = os.path.join(subj + \"/IDP_files/\", \"tvb_new_IDPs.txt\")\n\n new_IDP_file = os.path.join(subj + \"/IDP_files/\", \"tvb_new_IDP.txt\")\n\n\n # IDP_output_files = [IDP_FC_file,IDP_SC_file,IDP_MELODIC_file,IDP_MCFLIRT_file,IDP_homotopic_file]\n\n # for file in IDP_output_files:\n # with open(file, 'w') as fp:\n # pass\n\n with open(new_IDP_file, 'w') as fp:\n line = '\\t'.join([\"short\",\"num\",\"category\",\"num_in_cat\",\"long\",\"unit\",\"dtype\",\"description\",\"value\"])\n\n fp.write(line)\n\n\n\n fix4melviewtxt=\"\"\n\n FC_distribution(subj)\n SC_distribution(subj)\n MELODIC_SNR(subj,fix4melviewtxt)\n MCFLIRT_displacement(subj) \n\n homotopic(subj,LUT_txt)\n \n\n\n\n\n #TODO TL/distance, or ts.txt", "title": "" }, { "docid": "cde366ecb10bf14e7b028bb5eeb70a26", "score": "0.497974", "text": "def produce_layout(eoi, graph):\n\n subprocess.run((\"/usr/bin/dot\", \"{}\".format(graph.filename), \"-o\", \n \"{}/layout.dot\".format(eoi)))", "title": "" }, { "docid": "42db4bc248db8077aaa00be0cc702c70", "score": "0.4975401", "text": "def load_model_output(text):\r\n # uncomment when working in linux and remove subsequent two lines\r\n # nlp = spacy.load(\"../models/quick-spacy/\")\r\n model_path = Path(__file__).parent.absolute() / \"models/quick-spacy/\"\r\n nlp = spacy.load(model_path)\r\n doc = nlp(text)\r\n sentence = [ent.text for ent in doc.ents]\r\n labels = [ent.label_ for ent in doc.ents]\r\n return sentence, labels", "title": "" }, { "docid": "d71b59de5a8a54084e51dfb95444b719", "score": "0.49555877", "text": "def generate_theory(\n grammar,\n config,\n theory_op_file,\n theorem_prover,\n max_depth,\n min_depth\n):\n # Get Theorem Prover Config and initialize Theorem Prover\n theorem_prover_config = TheoremProverConfig(\n grammar, **config[\"theory\"][\"theorem_prover\"]\n )\n\n num_examples = config[\"theory\"][\"num_examples\"]\n min_num_positive_examples = config[\"theory\"][\"min_num_positive_examples\"]\n max_num_negative_examples = num_examples - min_num_positive_examples\n statement_types = config[\"theory\"][\"statement_types_per_example\"]\n assertion_start_symbol = config[\"assertion\"][\"start_symbol\"]\n example_id_prefix = config.get(\"example_id_prefix\", \"\")\n\n # Generate examples for every required type of statement (Start Symbol type)\n num_true_labels = 0\n num_false_labels = 0\n curr_num_examples = 0\n progress_tracker = tqdm(total=num_examples)\n progress_tracker.set_description(desc=\"Generating Examples...\")\n while (\n curr_num_examples < num_examples and num_true_labels < min_num_positive_examples\n ):\n example = generate_random_example(\n curr_num_examples + 1,\n example_id_prefix,\n grammar,\n theorem_prover_config,\n statement_types,\n assertion_start_symbol,\n theorem_prover,\n max_depth,\n min_depth\n )\n if example is not None:\n if example.theory_assertion_instance.label:\n num_true_labels += 1\n else:\n if num_false_labels == max_num_negative_examples:\n continue\n else:\n num_false_labels += 1\n json.dump(example.to_json(), theory_op_file)\n theory_op_file.write(\"\\n\")\n curr_num_examples += 1\n progress_tracker.update()\n progress_tracker.close()\n print(f\"Generated {curr_num_examples} examples.\")\n print(f\" No. with True label: {num_true_labels}\")\n print(f\" No. with False label: {num_false_labels}\")", "title": "" }, { "docid": "6abcb45148b5a15227491491ca106d5b", "score": "0.49505714", "text": "def polish_and_save(model_filename,\n save_filename='',\n suffix='.polished',\n *args,\n **kwargs):\n\n save_filename = save_filename or model_filename.replace(\n '.onnx', suffix + '.onnx')\n\n model = onnx.load(model_filename)\n model = polish_model(model, *args, **kwargs)\n onnx.save(model, save_filename)\n logger.info('polished model saved to: %s', save_filename)\n return save_filename", "title": "" }, { "docid": "c70ae932b14d6c0ac6415942b0994419", "score": "0.49496683", "text": "def output_perplexity(task_letter):\n output_file = open(\"output/group02.perplexity\" + task_letter, mode=\"w\")\n word_dictionary = get_word_dict()\n test_sentences = open(\"data/sentences.eval\", mode=\"r\")\n\n for test_sentence in test_sentences.readlines():\n # predicted_softmax_vecs: 30 X 20.000 X 1 Output Vector with softmax probabilities\n predicted_softmax_vecs = eval_neural_network(test_sentence)\n perp = perplexity(predicted_softmax_vecs, test_sentence, word_dictionary)\n output_file.write(str(perp) + \"\\n\")\n\n output_file.close()", "title": "" }, { "docid": "a208cbaa76b296741b5c20e88440925f", "score": "0.49453568", "text": "def __init__(self,\n fileName,\n realFileName=None,\n prequelFileName=None,\n preErrorMessages=(), #TODO: check the type\n readFileLater=False,\n fillImportBoxLater=False,\n parseFileLater=False,\n noSymbolChecking=False,\n allowedFeatures=(),\n recognizeUSEOCLNativeModelDefinition=False):\n #type: (Text, Optional[Text], Optional[Text], List[Any], bool, bool, bool, bool, List[Text], bool) -> None\n\n\n # if readFileLater or fillImportBoxLater or parseFileLater:\n # assert finalizeLater\n\n # Create an empty model\n # Not to be moved after super\n # This should be done in all case so that\n # the model attribute always exist even if there\n # are some error in reading the file\n self.model = self.emptyModel() # type: Model\n\n\n\n\n # Call the super class, read the file or not\n try:\n # This can raise an exception for instance if\n # there is a problem reading the file\n super(ModelSourceFile, self).__init__(\n fileName=fileName,\n realFileName=realFileName,\n prequelFileName=prequelFileName,\n preErrorMessages=preErrorMessages,\n doNotReadFiles=readFileLater,\n allowedFeatures=allowedFeatures\n )\n except FatalError:\n pass # an error as already been registered\n\n from modelscripts.megamodels import Megamodel\n Megamodel.registerSource(self)\n Megamodel.registerModel(self.model)\n\n\n # Backward link\n self.model.source=self\n\n # Link issue box\n self.model._issueBox.addParent(self._issueBox)\n\n # Source to ModelElement Mapping\n self._modelMapping=_ModelSourceMapping()\n\n\n # Create first an empty ImportBox.\n self.importBox=ImportBox(self)\n\n # Then fill it by reading megamodel statements,\n # unless specified.\n try:\n if not fillImportBoxLater:\n parseToFillImportBox(\n self,\n noSymbolChecking,\n recognizeUSEOCLNativeModelDefinition)\n\n if not parseFileLater:\n self.parseToFillModel()\n self.finalize()\n\n except FatalError:\n pass # nothing to do, the issue has been registered", "title": "" }, { "docid": "2e5aa18489143959b9fdd129d63b51d0", "score": "0.49399826", "text": "def preflight(self):\n self.params[\"translations\"][\"mgf_input_file\"] = os.path.join(\n self.params[\"input_dir_path\"], self.params[\"input_file\"]\n )\n self.params[\"translations\"][\"output_file_incl_path\"] = os.path.join(\n self.params[\"output_dir_path\"], self.params[\"output_file\"]\n )\n self.param_file_name = os.path.join(\n self.params[\"translations\"][\"output_file_incl_path\"].strip(\".csv\")\n + \"pipi_params.def\"\n )\n self.created_tmp_files.append(self.param_file_name)\n\n # pprint.pprint(self.params['translations']['_grouped_by_translated_key'])\n # pprint.pprint(self.params)\n # sys.exit(1)\n self.params_to_write = {\n \"version\": self.META_INFO[\"version\"],\n \"output_percolator_input\": 1,\n \"mod10\": \"0.0@X?\",\n \"pepNterm\": 0.0,\n \"pepCterm\": 0.0,\n \"proNterm\": 0.0,\n \"proCterm\": 0.0,\n }\n symbols = [\"~\", \"!\", \"%\", \"^\", \"&\", \"*\", \"+\", \"<\", \">\"]\n for x, element in enumerate(symbols):\n self.params_to_write[\"mod0{0}\".format(x + 1)] = \"0.0@X{0}\".format(element)\n for aa in \"ACDEFGHIKLMNOPQRSTUVWYnc\":\n self.params_to_write[aa] = 0\n\n write_exclusion_list = [\n # 'label',\n \"-Xmx\",\n \"frag_mass_tolerance_unit\",\n \"base_mz\",\n # 'header_translations',\n # 'validation_score_field'\n ]\n\n # additional_15N_modifications = []\n # if self.params['translations']['label'] == '15N':\n # for aminoacid, N15_Diff in ursgal.ukb.DICT_15N_DIFF.items():\n # existing = False\n # for mod_dict in self.params['mods']['fix']:\n # if aminoacid == mod_dict['aa']:\n # mod_dict['mass'] += N15_Diff\n # mod_dict['name'] += '_15N_{0}'.format(aminoacid)\n # existing = True\n # if existing == True:\n # continue\n # else:\n # self.params['mods']['fix'].append(\n # {\n # 'aa': aminoacid,\n # 'mass': N15_Diff,\n # 'name': '_15N_{0}'.format(aminoacid),\n # 'pos': 'any',\n # }\n # )\n\n if self.params[\"translations\"][\"frag_mass_tolerance_unit\"] == \"ppm\":\n self.params[\"translations\"][\"_grouped_by_translated_key\"][\n \"ms2_tolerance\"\n ] = {\n \"frag_mass_tolerance\": ursgal.ucore.convert_ppm_to_dalton(\n self.params[\"translations\"][\"frag_mass_tolerance\"],\n base_mz=self.params[\"base_mz\"],\n )\n }\n\n for pipi_param_name in self.params[\"translations\"][\n \"_grouped_by_translated_key\"\n ].keys():\n for ursgal_param_name, param_value in self.params[\"translations\"][\n \"_grouped_by_translated_key\"\n ][pipi_param_name].items():\n if pipi_param_name in write_exclusion_list:\n continue\n elif pipi_param_name == \"frag_clear_mz_range\":\n min_mz, max_mz = param_value\n self.params_to_write[\"min_clear_mz\"] = min_mz\n self.params_to_write[\"max_clear_mz\"] = max_mz\n elif type(pipi_param_name) is tuple:\n for pn in pipi_param_name:\n self.params_to_write[pn] = param_value\n elif pipi_param_name == \"enzyme\":\n enz, site, aa, inh = param_value.split(\";\")\n self.params_to_write[\"enzyme\"] = \"{0} {1} {2} {3}\".format(\n enz, site, aa, inh\n )\n elif pipi_param_name == \"modifications\":\n \"\"\"\n # specify additional mass and amino acid. DO NOT change the last character.\n # maximum number is 10\n # empty entries must start with 0.0\n mod01 = 79.966331@S~ # Phosphorylation\n mod02 = 79.966331@T! # Phosphorylation\n mod03 = 79.966331@Y% # Phosphorylation\n mod04 = 42.010565@K^ # Acetylation\n mod05 = 15.994915@M& # Oxidation\n mod06 = 14.015650@K* # Methylation\n mod07 = 0.0@X+\n mod08 = 0.0@X<\n mod09 = 0.0@X>\n mod10 = 0.0@X?\n \"\"\"\n n = 0\n for mod_dict in self.params[\"mods\"][\"opt\"]:\n \"\"\"\n {'_id': 0,\n 'aa': '*',\n 'composition': {'C': 2, 'H': 2, 'O': 1},\n 'id': '1',\n 'mass': 42.010565,\n 'name': 'Acetyl',\n 'org': '*,opt,Prot-N-term,Acetyl',\n 'pos': 'Prot-N-term',\n 'unimod': True},\n \"\"\"\n if mod_dict[\"pos\"] == \"Prot-N-term\":\n self.params_to_write[\"proNterm\"] = mod_dict[\"mass\"]\n continue\n elif mod_dict[\"pos\"] == \"Prot-C-term\":\n self.params_to_write[\"proCterm\"] = mod_dict[\"mass\"]\n continue\n elif mod_dict[\"pos\"] == \"N-term\":\n self.params_to_write[\"pepNterm\"] = mod_dict[\"mass\"]\n continue\n elif mod_dict[\"pos\"] == \"C-term\":\n self.params_to_write[\"pepCterm\"] = mod_dict[\"mass\"]\n continue\n elif mod_dict[\"pos\"] == \"any\":\n pass\n else:\n print(\n \"\"\"\n Unknown positional argument for given modification:\n {0}\n PIPI cannot deal with this, please use one of the follwing:\n any, Prot-N-term, Prot-C-term, N-term, C-term\n \"\"\".format(\n mod_dict[\"org\"]\n )\n )\n sys.exit(1)\n n += 1\n assert (\n n <= 10\n ), \"\"\"\n A maximum of 10 optional modifications is allowed in PIPI.\n You specified more than 10.\n \"\"\"\n if n < 10:\n mod_n = \"mod0{0}\".format(n)\n elif n == 10:\n mod_n = \"mod{0}\".format(n)\n else:\n print(\n \"\"\"\n A maximum of 10 optional modifications is allowed in PIPI.\n You specified more than 10.\n \"\"\"\n )\n sys.exit(1)\n self.params_to_write[mod_n] = \"{0}@{1}{2}\".format(\n mod_dict[\"mass\"], mod_dict[\"aa\"], symbols[n - 1]\n )\n\n for mod_dict in self.params[\"mods\"][\"fix\"]:\n if \"N-term\" in mod_dict[\"pos\"]:\n self.params_to_write[\"n\"] = mod_dict[\"mass\"]\n elif \"C-term\" in mod_dict[\"pos\"]:\n self.params_to_write[\"c\"] = mod_dict[\"mass\"]\n else:\n self.params_to_write[mod_dict[\"aa\"]] = mod_dict[\"mass\"]\n else:\n self.params_to_write[pipi_param_name] = param_value\n self.write_params_file()\n\n self.params[\"command_list\"] = [\n \"java\",\n \"-Xmx{0}\".format(\n self.params[\"translations\"][\"_grouped_by_translated_key\"][\"-Xmx\"][\n \"-xmx\"\n ]\n ),\n \"-jar\",\n self.exe,\n self.param_file_name,\n self.params[\"translations\"][\"mgf_input_file\"],\n ]\n\n return self.params", "title": "" }, { "docid": "45b7bcbb2c25384bb8bd0c4ba1275413", "score": "0.49382108", "text": "def gen_pov(ldraw_path, pov_path):\n model, parts = get_model(ldraw_path)\n\n with open(pov_path, \"w\") as pov_file:\n pov_file.write(POV_HEADER + \"\\n\")\n writer = POVRayWriter(parts, pov_file)\n writer.write(model)\n pov_file.write(POV_TRAILER + \"\\n\")", "title": "" }, { "docid": "b0f8306823fe036ca17dd5e9585dab03", "score": "0.49359792", "text": "def django_run(target, opt=\"XTAL\"):\r\n # Set up the OpenBaebel conversion modules\r\n sdconv = OBConversion()\r\n ligref = OBMol()\r\n # Define the residues and the proteisn to analyse\r\n if os.path.isfile(os.path.join(os.path.split(sys.argv[0])[0], 'data/res_def.py')):\r\n \tres_d = [trans_res(x) for x in ast.literal_eval(open(os.path.join(os.path.split(sys.argv[0])[0], 'data/res_def.py')).read())[target.title].split()]\r\n print res_d\r\n# Molecules\r\n\t # Now read in the ligand\r\n plif_method = PlifMethod()\r\n plif_method.text= \"PYPLIF\"\r\n feature_list = [\"POLAR\",\"FACE\",\"EDGE\",\"ACCEPTOR\",\"DONOR\",\"NEGATIVE\",\"POSITIVE\"]\r\n try:\r\n plif_method.validate_unique()\r\n plif_method.save()\r\n except ValidationError:\r\n \t plif_method = PlifMethod.objects.get(text=\"PYPLIF\")\r\n out_d = {}\r\n counter = 0\r\n# Create a file for the protein\r\n t = tempfile.NamedTemporaryFile(suffix=\".pdb\",delete=False)\r\n my_prot = Protein.objects.get(code=target.title+\"TEMP\")\r\n t.write(my_prot.pdb_info.name)\r\n t.close()\r\n protref = read_prot(t.name, res_d)\r\n t = tempfile.NamedTemporaryFile(suffix=\".sdf\",delete=False)\r\n t.close()\r\n sdconv.SetInFormat(\"sdf\")\r\n if opt == \"XTAL\":\r\n mols = Molecule.objects.exclude(prot_id__code__contains=target.title).filter(prot_id__target_id=target)\r\n elif opt == \"LLOOMMPPAA\":\r\n \t mols = []\r\n \t sps = SynthPoint.objects.filter(target_id=target)\r\n \t for s in sps:\r\n \t \t mols.extend([m for m in s.mol_id.all()])\r\n else:\r\n \t print \"UNKNOWN OPTION\"\r\n \t return\r\n for dj_mol in mols:\r\n \t out_sd = Chem.SDWriter(t.name)\r\n \t out_sd.write(Chem.MolFromMolBlock(str(dj_mol.sdf_info)))\r\n \t out_sd.close()\r\n \t sdconv.ReadFile(ligref, t.name)\r\n \t # Now make the new plif\r\n \t new_plif = Plif()\r\n \t new_plif.mol_id = dj_mol\r\n \t new_plif.prot_id = my_prot\r\n \t new_plif.method_id = plif_method\r\n \t try:\r\n \t \t new_plif.validate_unique()\r\n \t \t new_plif.save()\r\n \t except ValidationError:\r\n \t \t new_plif = Plif.objects.get(mol_id=dj_mol,prot_id=my_prot,method_id=plif_method)\r\n \t lig_name = ligref.GetTitle().strip(\",\")\r\n \t prot_name = lig_name.split(\"_\")[0]\r\n \t ligref.AddHydrogens()\r\n \t counter +=1\r\n \t refresdict = pp.getresiduedict(protref, res_d)\r\n \t new_d = get_fp(protref,ligref, res_d)\r\n \t for res in new_d:\r\n \t \t new_res = PlifRes()\r\n \t \t new_res.res_name = res[:3]\r\n \t \t new_res.res_num = int(res[3:])\r\n \t \t new_res.prot_id = my_prot\r\n \t \t try:\r\n \t \t \t new_res.validate_unique()\r\n \t \t \t new_res.save()\r\n \t \t except ValidationError:\r\n \t \t \t new_res = PlifRes.objects.get(res_name=res[:3],res_num=int(res[3:]),prot_id=my_prot)\r\n \t \t new_plif.res_id.add(new_res)\r\n \t \t for bit_num, bit in enumerate(new_d[res]):\r\n \t \t \t new_bit = PlifBit()\r\n \t \t \t new_bit.feature = feature_list[bit_num]\r\n \t \t \t new_bit.method_id = plif_method\r\n \t \t \t new_bit.res_id = new_res\r\n \t \t \t try:\r\n \t \t \t \t new_bit.validate_unique()\r\n \t \t \t \t new_bit.save()\r\n \t \t \t \t my_fun(dj_mol,new_bit,new_plif,bit)\r\n \t \t \t except ValidationError:\r\n\t\t\t\t\t\t\t\t\t\tnew_bit = PlifBit.objects.get(feature=feature_list[bit_num],method_id=plif_method,res_id=new_res)\r\n\t\t\t\t\t\t\t\t\t\tnew_bit.save()\r\n\t\t\t\t\t\t\t\t\t\tnew_plif.bit_id.add(new_bit)\r\n\t\t\t\t\t\t\t\t\t\tmy_fun(dj_mol,new_bit,new_plif,bit)\r\n\t\t\t\t\t\t\t\t\t\t\r\n \t ligref = OBMol()\r\n \t notatend = sdconv.Read(ligref)", "title": "" }, { "docid": "2cea0abf4708ac80030bc376d139c8b2", "score": "0.49342123", "text": "def writeLP(item_count, capacity, items):\n lpfile = checkpath + '/knapsack.lp'\n f = open(lpfile, 'w')\n f.write(\"Maximize\\n\")\n f.write(\"Objective:\\n\")\n cns = \"\"\n cnt = 0\n for i in range(0, item_count):\n xVar = \"x_\"+str(i)\n cns = cns + \" + \" + str(items[i].value) + \" \" + xVar\n cnt = cnt + 1\n if cnt % 5 == 4:\n f.write(cns + \"\\n\")\n cns = \"\"\n cnt = 0\n f.write(cns + \"\\n\")\n\n f.write(\"Subject To\\n\")\n cns = \"\"\n cnt = 0\n for i in range(0, item_count):\n xVar = \"x_\"+str(i)\n cns = cns + \" + \" + str(items[i].weight) + \" \" + xVar\n cnt = cnt + 1\n if cnt % 5 == 4:\n f.write(cns + \"\\n\")\n cns = \"\"\n cnt = 0\n f.write(cns + \" <= \" + str(capacity) + \"\\n\")\n\n f.write(\"Binaries\\n\")\n for i in range(0, item_count):\n xVar = \"x_\"+str(i)\n f.write(xVar + \"\\n\")\n f.write(\"End\")\n f.close()", "title": "" }, { "docid": "aa711305cbfff319951e389f1f16131b", "score": "0.49325135", "text": "def createModelHandler(self):\n className = self.model.name.upper()\n filename = self.model.name + \".pk\"\n\n return f\"\"\"\n# Copyright 2017 Battelle Energy Alliance, LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\nimport sys\nimport pickle\nimport os\nfrom pythonfmu.fmi2slave import Fmi2Type, Fmi2Slave, Fmi2Causality, Fmi2Variability, Integer, Real, Boolean, String\n\nclass {className}(Fmi2Slave):\n #\n # RAVEN (raven.inl.gov) Model-based Python-driven simulator\n #\n author = \"RAVEN Team\"\n description = \"RAVEN Model-based Python-driven simulator\"\n\n def __init__(self, **kwargs):\n super().__init__(**kwargs)\n self.inputVariables = {self.inputVars}\n self.outputVariables = {self.outVars}\n # set path to raven and the serialized model\n self.raven_path = r\"{self.ravenDir}\"\n # model_path is by default the path to this model that is exported as FMU (serialized). It is stored in the resource folder\n self.model_path = self.resources + \"/\" + \"{filename}\"\n #os.path.sep\n sys.path.append(self.model_path)\n # this flag activates the initialization at the begin of the solve\n self.initialized = False\n # register raven_path variables if needed to be changed\n self.register_variable(String(\"raven_path\", causality=Fmi2Causality.parameter, variability=Fmi2Variability.tunable))\n # register input variables\n for var in self.inputVariables:\n # set var\n self.__dict__[var] = 0.0\n self.register_variable(Real(var, causality=Fmi2Causality.input))\n for var in self.outputVariables:\n # set var\n self.__dict__[var] = 0.0\n self.register_variable(Real(var, causality=Fmi2Causality.output))\n\n def setup_experiment(self, start_time: float):\n self.start_time = start_time\n if not self.initialized:\n sys.path.append(self.raven_path)\n # find the RAVEN framework\n if os.path.isdir(os.path.join(self.raven_path,\"ravenframework\")):\n # we import the Driver to load the RAVEN enviroment for the un-pickling\n try:\n import ravenframework.Driver\n except RuntimeError as ae:\n # we try to add the framework directory\n raise RuntimeError(\"Importing or RAVEN failed with error:\" +str(ae))\n else:\n print(\"framework not found in\",self.raven_path)\n # de-serialize the model\n print(\"model_path\", self.model_path)\n self.model = pickle.load(open(self.model_path, mode='rb'))\n self.initialized = True\n\n def do_step(self, current_time: float, step_size: float) -> bool:\n request = dict()\n for var in self.inputVariables:\n request[var] = self.__dict__[var]\n request['current_time'] = current_time\n request['step_size'] = step_size\n\n return_var = self.model.{self.executeMethod}(request)\n outs = return_var if isinstance(return_var,dict) else return_var[{self.indexReturnDict}]\n\n for var in outs:\n self.__dict__[var] = outs[var]\n return True\n\n\"\"\"", "title": "" }, { "docid": "bfc1b2a35b7e5fd939b8a36de23ec2a2", "score": "0.49318784", "text": "def buildSolverModel(self, lp):\n log.debug(\"create the glpk model\")\n prob = glpk.glp_create_prob()\n glpk.glp_set_prob_name(prob, lp.name)\n log.debug(\"set the sense of the problem\")\n if lp.sense == constants.LpMaximize:\n glpk.glp_set_obj_dir(prob, glpk.GLP_MAX)\n log.debug(\"add the constraints to the problem\")\n glpk.glp_add_rows(prob, len(list(lp.constraints.keys())))\n for i, v in enumerate(lp.constraints.items(), start=1):\n name, constraint = v\n glpk.glp_set_row_name(prob, i, name)\n if constraint.sense == constants.LpConstraintLE:\n glpk.glp_set_row_bnds(\n prob, i, glpk.GLP_UP, 0.0, -constraint.constant\n )\n elif constraint.sense == constants.LpConstraintGE:\n glpk.glp_set_row_bnds(\n prob, i, glpk.GLP_LO, -constraint.constant, 0.0\n )\n elif constraint.sense == constants.LpConstraintEQ:\n glpk.glp_set_row_bnds(\n prob, i, glpk.GLP_FX, -constraint.constant, -constraint.constant\n )\n else:\n raise PulpSolverError(\"Detected an invalid constraint type\")\n constraint.glpk_index = i\n log.debug(\"add the variables to the problem\")\n glpk.glp_add_cols(prob, len(lp.variables()))\n for j, var in enumerate(lp.variables(), start=1):\n glpk.glp_set_col_name(prob, j, var.name)\n lb = 0.0\n ub = 0.0\n t = glpk.GLP_FR\n if not var.lowBound is None:\n lb = var.lowBound\n t = glpk.GLP_LO\n if not var.upBound is None:\n ub = var.upBound\n t = glpk.GLP_UP\n if not var.upBound is None and not var.lowBound is None:\n if ub == lb:\n t = glpk.GLP_FX\n else:\n t = glpk.GLP_DB\n glpk.glp_set_col_bnds(prob, j, t, lb, ub)\n if var.cat == constants.LpInteger:\n glpk.glp_set_col_kind(prob, j, glpk.GLP_IV)\n assert glpk.glp_get_col_kind(prob, j) == glpk.GLP_IV\n var.glpk_index = j\n log.debug(\"set the objective function\")\n for var in lp.variables():\n value = lp.objective.get(var)\n if value:\n glpk.glp_set_obj_coef(prob, var.glpk_index, value)\n log.debug(\"set the problem matrix\")\n for constraint in lp.constraints.values():\n l = len(list(constraint.items()))\n ind = glpk.intArray(l + 1)\n val = glpk.doubleArray(l + 1)\n for j, v in enumerate(constraint.items(), start=1):\n var, value = v\n ind[j] = var.glpk_index\n val[j] = value\n glpk.glp_set_mat_row(prob, constraint.glpk_index, l, ind, val)\n lp.solverModel = prob\n # glpk.glp_write_lp(prob, None, \"glpk.lp\")", "title": "" }, { "docid": "90dd741a4cce0ae2de8a4b272b8dd9f3", "score": "0.4929237", "text": "def to_modflow(model, fname):\n try:\n import flopy\n except ImportError as e:\n raise type(e)(str(e) + \", modflow output not possible!\")\n # For modflow we want to create a new folder instead of only a file. The folder name is the base\n # name of the passed filename\n realization_dir = pathlib.Path(fname).parent\n runname = pathlib.Path(fname).name\n mfdir = realization_dir / 'MODFLOW'\n mfdir.mkdir(parents=True, exist_ok=True)\n mfname = str(mfdir / runname)\n\n # Assign name and create modflow model object\n mf = flopy.modflow.Modflow(mfname, exe_name='mf2005')\n\n # Create the discretization object\n ztop = model.grid.z0 + model.grid.lz\n zbot = model.grid.z0\n botm = np.linspace(ztop, zbot, model.grid.nz + 1)\n dis = flopy.modflow.ModflowDis(mf, model.grid.nz, model.grid.nx, model.grid.ny, \n delr=model.grid.dx, delc=model.grid.dy,\n top=ztop, botm=botm[1:])\n\n # Variables for the BAS package\n ibound = np.ones((model.grid.nz, model.grid.nx, model.grid.ny), dtype=np.int32)\n ibound[:, :, 0] = -1\n ibound[:, :, -1] = -1\n\n strt = np.ones((model.grid.nz, model.grid.nx, model.grid.ny), dtype=np.float32)\n strt[:, :, 0] = model.flowtrans['hin'][0]\n strt[:, :, -1] = model.flowtrans['hout'][0]\n\n bas = flopy.modflow.ModflowBas(mf, ibound=ibound, strt=strt)\n\n # Assign hydraulic conductivity\n hyvr_hk = np.transpose(model.data['k_iso'], (2, 0, 1))\n hyvr_layvka = 1 # VKA dataset is ratio of horizontal K\n if 'anirat' in model.data.keys():\n hyvr_vka = np.transpose(model.data['anirat'], (2, 0, 1))\n\n # Add LPF package to the MODFLOW model\n lpf = flopy.modflow.ModflowLpf(mf, # Modflow object\n hk=hyvr_hk, # Horizontal hydraulic conductivity\n layvka=hyvr_layvka, # Flag for each layer of anisotropic ratio\n vka=hyvr_vka) # Anisotropy ratios.\n else:\n # Add LPF package to the MODFLOW model\n lpf = flopy.modflow.ModflowLpf(mf, # Modflow object\n hk=hyvr_hk) # Horizontal hydraulic conductivity\n\n oc = flopy.modflow.ModflowOc(mf) # Add OC package to the MODFLOW model\n pcg = flopy.modflow.ModflowPcg(mf) # Add PCG package to the MODFLOW model\n mf.write_input() # Write the MODFLOW model input files", "title": "" }, { "docid": "486cabbe9fb6d10c75ea06f7ee5cf1a7", "score": "0.49249116", "text": "def write_production_mdp(prefix, rep, temp):\n\n fn = prefix + '_%d.mdp'%rep\n fout = open(fn, 'w')\n fout.write(\"\"\"title = prod_GBSA\ncpp = /lib/cpp\ninclude = -I../top\ndefine = \nintegrator = sd\ndt = 0.002\nnsteps = 25000000 ; 50 ns\nnstxout = 500000 ; every 1 ns \nnstvout = 500000\nnstlog = 5000 ; every 10 ps\nnstxtcout = 5000 ; every 10 ps\nnstenergy = 5000 ; every 10 ps\n\n\nimplicit_solvent = GBSA\ngb_algorithm = OBC\nnstgbradii = 1\ngb_epsilon_solvent = 80.0\nsa_algorithm = Ace-approximation\n\ncomm_mode = ANGULAR\n\nrgbradii = 0.9\ncoulombtype = Cut-off\nrvdw = 0.9\nrlist = 0.9\nrcoulomb = 0.9\n\n; CONSTRAINTS\nconstraints = hbonds\nconstraint_algorithm = LINCS\n\n; other stuff\nbd_fric = 1.0\nnstcomm = 10\ncomm_grps = System\nxtc_grps = System\nenergygrps = System\nnstlist = 10\nns_type = grid\ntc-grps = System \ntau_t = 0.0109\n\nref_t = %d \ncompressibility = 4.5e-5\nref_p = 1.0\ngen_vel = yes\ngen_temp = %d \ngen_seed = 1729\npbc = no\"\"\"%(temp, temp))\n\n fout.close()\n\n return fn", "title": "" }, { "docid": "6bfcf30894e9237e9a45c1cbf4059801", "score": "0.49182045", "text": "def write_aloha_routines(self):\n \n\n self.aloha_model.add_Lorentz_object(self.model.get('lorentz'))\n self.aloha_model.compute_subset(self.wanted_lorentz)\n # Write out the aloha routines in Python\n aloha_routines = []\n \n # First add the default external wavefunction routines\n wavefunction_routines = open(pjoin(MG5DIR,'aloha','template_files','wavefunctions.py'),'r').read() \n open(pjoin(self.dir_path,'wavefunctions.py'),'w').write(\n 'from __future__ import division\\n'+wavefunction_routines)\n\n #aloha_routines.append(open(pjoin(MG5DIR,'aloha','template_files','wavefunctions.py'),'r').read())\n\n # Now write the process-depenent Feynman rules ones\n for routine in self.aloha_model.values():\n aloha_routines.append(routine.write(output_dir = None, \n mode='mg5',\n language = 'Python'))\n\n for routine in self.aloha_model.external_routines:\n aloha_routines.append(\n open(self.aloha_model.locate_external(routine, 'Python')).read())\n\n # Now collect imports\n python_imports = []\n new_aloha_routines = []\n for aloha_routine in aloha_routines:\n new_aloha_routine = []\n for line in aloha_routine.split('\\n'):\n if any(line.startswith(token) for token in ['from','import']):\n if line not in python_imports:\n python_imports.append(line)\n else:\n new_aloha_routine.append(line)\n new_aloha_routines.append('\\n'.join(new_aloha_routine))\n aloha_routines = new_aloha_routines\n \n # Veto some imports\n vetoed_imports = ['import aloha.template_files.wavefunctions as wavefunctions']\n python_imports = [pi for pi in python_imports if pi not in vetoed_imports]\n python_imports.insert(0, 'import wavefunctions')\n\n aloha_output = open(pjoin(self.dir_path,'aloha_methods.py'),'w')\n aloha_output.write('from __future__ import division\\n')\n # Write imports\n aloha_output.write('\\n'.join(python_imports))\n aloha_output.write('\\n'*2)\n\n # Write routines\n aloha_output.write('\\n'.join(aloha_routines))\n\n aloha_output.close()", "title": "" }, { "docid": "f73793368d062ed8010c45c9ae955d77", "score": "0.49119902", "text": "def run(self):\n #---+----|----+----|----+----|----+----|----+----|----+----|----+----|\n self._openScriptFile()\n self.scriptf.write(\"from phycas import *\\n\\n\")\n\n if self.opts.seed > 0:\n self.scriptf.write('setMasterSeed(%d)\\n\\n' % self.opts.seed)\n\n if self.opts.model == 'jc':\n self.model_jc(0)\n elif self.opts.model == 'jc+i':\n self.model_jc(1)\n elif self.opts.model == 'jc+g':\n self.model_jc(2)\n elif self.opts.model == 'jc+i+g':\n self.model_jc(3)\n elif self.opts.model == 'hky':\n self.model_hky(0)\n elif self.opts.model == 'hky+i':\n self.model_hky(1)\n elif self.opts.model == 'hky+g':\n self.model_hky(2)\n elif self.opts.model == 'hky+i+g':\n self.model_hky(3)\n elif self.opts.model == 'gtr':\n self.model_gtr(0)\n elif self.opts.model == 'gtr+i':\n self.model_gtr(1)\n elif self.opts.model == 'gtr+g':\n self.model_gtr(2)\n elif self.opts.model == 'gtr+i+g':\n self.model_gtr(3)\n elif self.opts.model == 'codon':\n self.model_codon()\n\n self.model_edgelen()\n\n if self.opts.datafile is None:\n self.scriptf.write(\"mcmc.data_source = None\\n\")\n elif self.opts.datafile == 'sample.nex':\n self.saveSampleData()\n self.scriptf.write(\"mcmc.data_source = '%s'\\n\\n\" % self.opts.datafile)\n else:\n self.scriptf.write(\"mcmc.data_source = '%s'\\n\\n\" % self.opts.datafile)\n\n if self.opts.analysis == 'mcmc':\n self.mcmc_analysis()\n self.sump_analysis()\n self.sumt_analysis()\n if self.opts.analysis == 'poly':\n self.polytomy_analysis()\n elif self.opts.analysis == 'cpo':\n self.cpo_analysis()\n #elif self.opts.analysis == 'idr':\n # self.idr_analysis()\n elif self.opts.analysis == 'ss':\n self.steppingstone_analysis()\n\n self._closeScriptFile()", "title": "" }, { "docid": "c3e42d57ee79ba479dae9b3b96b226d2", "score": "0.49086118", "text": "def build_model(out: ModelOutput = ModelOutput(\"malt/swemalt-1.7.2.mco\"),\n _maltjar: Binary = Binary(\"[malt.jar]\")):\n out.download(\"http://maltparser.org/mco/swedish_parser/swemalt-1.7.2.mco\")", "title": "" }, { "docid": "e9a026d51eff7c21f70c849e56481fa5", "score": "0.49045718", "text": "def to_alien(self, file_pointer, format='opb', comments=None):\n\n if self.atms and format == 'smt':\n raise NotImplementedError('SMT-LIB2 does not support PB constraints directly; you may want to use Z3\\'s API instead')\n\n cchars = {'lp': '\\\\', 'opb': '*', 'smt': ';'}\n\n # saving formula's internal comments\n for c in self.comments:\n print(cchars[format], c, file=file_pointer)\n\n # saving externally specified comments\n if comments:\n for c in comments:\n print(cchars[format], c, file=file_pointer)\n\n # normalized soft clauses\n soft, hard = [], []\n topv = self.nv + 1\n for cl in self.soft:\n if len(cl) == 1:\n soft.append(cl)\n else:\n hard.append([topv] + cl)\n soft.append([topv])\n topv += 1\n\n if format == 'opb':\n print('* #variable= {0} #constraint= {1}'.format(self.nv, len(self.hard) + len(hard)),\n file=file_pointer)\n print('min:',\n ' '.join(['{0}{1} x{2}'.format('-' if s[0] > 0 else '+', w, abs(s[0])) for s, w in zip(soft, self.wght)]),\n ';', file=file_pointer)\n elif format == 'lp':\n print('Minimize', file=file_pointer)\n print('obj:',\n ' '.join(['{0}{1} x{2}'.format('-' if s[0] > 0 else '+', w, abs(s[0])) for s, w in zip(soft, self.wght)]),\n file=file_pointer)\n print('Subject To', file=file_pointer)\n elif format == 'smt':\n for v in range(1, self.nv + 1):\n print('(declare-fun x{0} () Bool)'.format(v), file=file_pointer)\n\n for i, cl in enumerate(self.hard + hard, 1):\n line, neg = [], 0\n for l in cl:\n if l > 0:\n if format == 'smt':\n line.append('x{0}'.format(l))\n else:\n line.append('+{0} x{1}'.format('1' if format == 'opb' else '', l))\n else:\n if format == 'smt':\n line.append('(not x{0})'.format(-l))\n else:\n line.append('-{0} x{1}'.format('1' if format == 'opb' else '', -l))\n neg += 1\n\n if format == 'smt':\n print('(assert (or {0}))'.format(' '.join(line)), file=file_pointer)\n else:\n print('{0}{1} >= {2} {3}'.format('' if format == 'opb' else 'c{0}: '.format(i),\n ' '.join(l for l in line),\n 1 - neg, ';' if format == 'opb' else ''),\n file=file_pointer)\n\n for i, am in enumerate(self.atms, len(self.hard) + len(hard) + 1):\n line, neg = [], 0\n for l in am[0]:\n if l > 0:\n line.append('-{0} x{1}'.format('1' if format == 'opb' else '', l))\n neg += 1\n else:\n line.append('+{0} x{1}'.format('1' if format == 'opb' else '', -l))\n\n print('{0} {1} >= {2} {3}'.format('' if format == 'opb' else 'c{0}:'.format(i),\n ' '.join(l for l in line),\n len(am[0]) - am[1] - neg, ';' if format == 'opb' else ''),\n file=file_pointer)\n\n if format == 'lp':\n print('Bounds', file=file_pointer)\n for v in range(1, topv):\n print('0 <= x{0} <= 1'.format(v), file=file_pointer)\n print('Binary', file=file_pointer)\n for v in range(1, topv):\n print('x{0}'.format(v), file=file_pointer)\n print('End', file=file_pointer)\n elif format == 'smt':\n for cl, w in zip(soft, self.wght):\n l = 'x{0}'.format(cl[0]) if cl[0] > 0 else '(not x{0})'.format(-cl[0])\n print('(assert-soft {0} :weight {1})'.format(l, w), file=file_pointer)\n\n print('(check-sat)', file=file_pointer)\n print('(get-model)', file=file_pointer)\n print('(get-objectives)', file=file_pointer)\n print('(exit)', file=file_pointer)", "title": "" }, { "docid": "1bed09b7d57987e01641bd6feb172467", "score": "0.4902589", "text": "def copy_files_export_model(geomodel_dir, geomodeller_file, new_dir, nx, ny, nz, **kwds):\n \n \n \n os.chdir(geomodel_dir)\n \n \n # first step: determine all Geomodeller files in original directory\n geomodel_files = []\n for l in os.listdir(\".\"):\n if os.path.splitext(l)[1] == \".sec\":\n geomodel_files.append(l)\n if os.path.splitext(l)[1] == \".s3d\":\n geomodel_files.append(l)\n if os.path.splitext(l)[1] == \".md5\":\n geomodel_files.append(l)\n if os.path.splitext(l)[1] == \".wsp\":\n geomodel_files.append(l)\n if os.path.splitext(l)[1] == \".xml\":\n # This is the original file - keep for later\n geomodel_xml_ori = l\n \n print geomodel_xml_ori\n os.chdir(new_dir)\n\n for f in geomodel_files:\n shutil.copyfile(os.path.join(geomodel_dir, f), os.path.join(\".\",f))\n\n for l2 in os.listdir(\".\"):\n if l2 == geomodeller_file:\n print l2\n shutil.copyfile(l2,os.path.join(\".\", geomodel_xml_ori))\n if \"original\" in l2:\n new_grid = \"exported_grid_original.txt\"\n new_delxyz = \"delxyz_original.txt\"\n else:\n new_grid = \"exported_grid_\" + os.path.splitext(l2)[0] + \".txt\"\n new_delxyz = \"delxyz_\" + os.path.splitext(l2)[0] + \".txt\"\n \n if kwds.has_key('nz_list'): # cell discretization study\n try:\n i = int(l2[-7:-4])\n except ValueError:\n os.chdir('..') \n continue # original model\n subprocess.call(\"/home/flo/bin/export_model %s %d %d %d\" % (geomodel_xml_ori, nx, ny, kwds['nz_list'][i]), shell=True)\n else:\n\n while True:\n proc = subprocess.Popen(\"/home/flo/bin/export_model %s %d %d %d\" % (geomodel_xml_ori, nx, ny, nz), shell=True)\n for i in range(10):\n time.sleep(5)\n if proc.poll() == 0:\n print(\"Process finished successfully\")\n break\n if proc.poll() == None:\n proc.kill()\n print(\"\\n\\n\\tProcess killed, restart!\\n\\n\")\n continue\n elif proc.poll() == 0:\n print(\"Process finished successfully\")\n break\n print(\"\\n\\n\\tProcess not finished correctly!\\n\\n\\n\")\n break\n # subprocess.call(\"/home/flo/bin/export_model %s %d %d %d\" % (geomodel_xml_ori, nx, ny, nz), shell=True)\n shutil.copyfile(\"exported_grid.txt\", os.path.join(\".\", new_grid))\n shutil.copyfile(\"delxyz.txt\", os.path.join(\".\", new_delxyz))", "title": "" }, { "docid": "8dc902536351b3d417ec8d2e8f7e209e", "score": "0.48992693", "text": "def convert2lite(save_dir,\n lite_valid_places=\"arm\",\n lite_model_type=\"naive_buffer\"):\n\n from paddlelite.lite import Opt\n opt = Opt()\n opt.set_model_dir(save_dir + \"/inference_model\")\n opt.set_valid_places(lite_valid_places)\n opt.set_model_type(lite_model_type)\n opt.set_optimize_out(save_dir + \"/opt\")\n opt.run()", "title": "" }, { "docid": "38860f271edd9ad00978731b25db1a2b", "score": "0.48990908", "text": "def makefile2dot(**kwargs):\n\n direction = kwargs.get('direction', \"BT\")\n if direction not in [\"LR\", \"RL\", \"BT\", \"TB\"]:\n raise ValueError('direction must be one of \"BT\", \"TB\", \"LR\", RL\"')\n\n output = kwargs.get('output', '')\n view = kwargs.get('view', False)\n\n graph = build_graph(stream_database(), direction=direction)\n if output == \"\":\n if view:\n graph.view()\n else:\n print(graph)\n else:\n with open(output, 'w') as file:\n file.write(str(graph))\n if view:\n graph.view()", "title": "" }, { "docid": "7c4d103572b36b3f5587fd6cccb6325d", "score": "0.48969853", "text": "def load_model(fp: str):\n pass", "title": "" }, { "docid": "6128a8fd86ce81b50e836d0234155722", "score": "0.48924553", "text": "def writemodelfile(self, classpy=None):\n if not classpy is None:\n classpy.close()\n pyfile = '%s.py' % os.path.join(self.modelspath, self.thismodule)\n classpy = open(pyfile, 'r')\n lines = classpy.readlines()\n lines.insert(2, '\\n')\n classpy.close()\n froms = list()\n for line in lines:\n prefix = ''\n if 'ForeignKey' in line or 'OneToOneField' in line \\\n or 'ManyToManyField' in line:\n idx = lines.index(line)\n third = ''\n paren = False\n comma = False\n first, second = line.split('(', 1)\n if not 'self' in second: # do nothing if it is\n if ',' in second: # some options exist\n second, third = second.split(',', 1)\n third = ', %s' % third\n if ')' in second: # that's all in the line\n paren = True # so it can be put back\n second = second.replace(')', '').strip()\n if \"'\" in second:\n second = second.replace(\"'\", '').strip()\n if '\"' in second:\n second = second.replace('\"', '').strip()\n relatedmodel = second # totally naked\n assert len(second.split()) == 1\n module = self.checkthisimport(relatedmodel)\n if not module is None:\n if module.startswith('.'):\n prefix = '#'\n line = \"%s('%s'%s\" % (first, second, third)\n else:\n line = \"%s(%s%s\" % (first, second, third)\n if paren:\n line = '%s)\\n' % line.rstrip()\n if not line.endswith('\\n'):\n line = '%s\\n' % line\n if idx:\n lines[idx] = line\n if module:\n impline = '%sfrom %s import %s\\n' % (prefix, module,\n relatedmodel)\n else:\n impline = 'import %s\\n' % model\n if not impline in froms:\n #print(impline)\n froms.insert(0, impline)\n # now find all the other imported items\n chunk = line.replace('=', ' ')\n chunk = chunk.replace('[', ' ')\n chunk = chunk.replace('(', ' ')\n chunk = chunk.replace(')', ' ')\n chunk = chunk.replace('.', ' ')\n chunk = chunk.replace(']', ' ')\n chunk = chunk.replace(',', ' ')\n bits = chunk.split()\n for bit in bits:\n if self.seemsok(bit, line):\n module = self.checkthisimport(bit)\n if not module is None:\n if module == '':\n impline = 'import %s\\n' % bit\n else:\n impline = 'from %s import %s\\n' % (module, bit)\n if not impline in froms:\n froms.insert(0, impline)\n\n clean = list()\n for item in froms:\n if not '__future__' in item:\n if not item in clean:\n clean.insert(0, item)\n cleaner = self.removethismodule(clean)\n del(clean)\n cleaner.sort()\n cleaner.reverse()\n if aggregate:\n cleanest = self.aggregatefroms(cleaner)\n else:\n cleanest = cleaner\n for item in cleanest:\n lines.insert(self.startline, item)\n classpy = open(pyfile, 'w')\n classpy.writelines(lines)\n classpy.close()", "title": "" }, { "docid": "6b24a82db4e8a7271fd5aaa542794f09", "score": "0.48900214", "text": "def _translate_and_simulate(self, simulate):\n import os\n import shutil\n import jinja2\n import datetime\n\n # Delete output files\n self.deleteOutputFiles()\n\n worDir = self._outputDir_\n\n # Construct the model instance with all parameter values\n # and the package redeclarations\n dec = self._declare_parameters()\n dec.extend(self._modelModifiers_)\n\n if len(dec) == 0:\n model_modifier = \"\"\n else:\n model_modifier = '{dec}'.format(mn=self.modelName, dec=','.join(dec))\n\n file_name = \"{}.py\".format(self.modelName.replace(\".\", \"_\"))\n## self._time_stamp_old_files = datetime.datetime.now()\n with open(os.path.join(worDir, file_name), mode=\"w\", encoding=\"utf-8\") as fil:\n path_to_template = os.path.join(\n os.path.dirname(__file__), os.path.pardir, \"development\")\n env = jinja2.Environment(loader=jinja2.FileSystemLoader(path_to_template))\n\n template = env.get_template(\"optimica_run.template\")\n\n # Note that filter argument must respect glob syntax ([ is escaped with []]) + OPTIMICA mat file\n # stores matrix variables with no space e.g. [1,1].\n txt = template.render(\n model=self.modelName,\n model_modifier=model_modifier,\n ncp=self._simulator_.get('numberOfIntervals'),\n rtol=self._simulator_.get('eps'),\n solver=self._simulator_.get('solver'),\n start_time=self._simulator_.get('t0') if self._simulator_.get(\n 't0') is not None else 'mod.get_default_experiment_start_time()',\n final_time=self._simulator_.get('t1') if self._simulator_.get(\n 't1') is not None else 'mod.get_default_experiment_stop_time()',\n result_file_name=f\"{self._simulator_.get('resultFile')}.mat\",\n simulate=simulate,\n time_out=self._simulator_.get('timeout'),\n filter=self._result_filter,\n generate_html_diagnostics=self._generate_html_diagnostics,\n debug_solver=self._debug_solver,\n debug_solver_interactive_mode=self._debug_solver_interactive_mode)\n\n fil.write(txt)\n shutil.copyfile(\n os.path.join(\n os.path.dirname(__file__),\n \"OutputGrabber.py\"),\n os.path.join(\n worDir,\n \"OutputGrabber.py\"))\n\n try:\n super()._runSimulation([\"jm_ipython.sh\", file_name],\n self._simulator_.get('timeout'),\n worDir)\n\n self._check_simulation_errors(worDir=worDir, simulate=simulate)\n# self._copyNewFiles(worDir)\n# self._deleteTemporaryDirectory(worDir)\n\n except Exception as e: # Catch all possible exceptions\n em = f\"Simulation failed in '{worDir}'\\n Exception: {e}.\\n You need to delete the directory manually.\\n\"\n self._reporter.writeError(em)\n raise e\n os.remove(os.path.join(worDir, \"OutputGrabber.py\"))", "title": "" }, { "docid": "989010fe3938e83cdafa116524121a61", "score": "0.4889075", "text": "def _create(name, pretrained=True, channels=3, classes=80, autoshape=True, verbose=True, device=None):\n from pathlib import Path\n\n from stamp_processing.module.yolov5.models.experimental import attempt_load\n from stamp_processing.module.yolov5.models.yolo import Model\n from stamp_processing.module.yolov5.utils.torch_utils import select_device\n\n file = Path(__file__).absolute()\n\n save_dir = Path(\"\") if str(name).endswith(\".pt\") else file.parent\n path = (save_dir / name).with_suffix(\".pt\") # checkpoint path\n try:\n device = select_device((\"0\" if torch.cuda.is_available() else \"cpu\") if device is None else device)\n\n if pretrained and channels == 3 and classes == 80:\n model = attempt_load(path, map_location=device) # download/load FP32 model\n else:\n cfg = list((Path(__file__).parent / \"models\").rglob(f\"{name}.yaml\"))[0] # model.yaml path\n model = Model(cfg, channels, classes) # create model\n if autoshape:\n model = model.autoshape() # for file/URI/PIL/cv2/np inputs and NMS\n return model.to(device)\n\n except Exception as e:\n help_url = \"https://github.com/ultralytics/yolov5/issues/36\"\n s = \"Cache may be out of date, try `force_reload=True`. See %s for help.\" % help_url\n raise Exception(s) from e", "title": "" }, { "docid": "1163e643e5b668503b7ff3340a328791", "score": "0.48880547", "text": "def make_dot(aut_fname, dot_fname):\n coll = collection.Collection()\n aut = load_automaton(coll, aut_fname, False, False)\n\n make_backup_file(dot_fname)\n\n dot_handle = open(dot_fname, 'w')\n dot_handle.write(aut.to_dot())\n dot_handle.close()", "title": "" } ]
129bf3d37fc164404295d1647290238f
Set an instance variable to test that this method gets called
[ { "docid": "abe8787ed3fa3d0a67f544bcbdd156df", "score": "0.0", "text": "def __parent_setup_2(self):\r\n self.parent_setup_exists += 1", "title": "" } ]
[ { "docid": "de81fdce13d62ec47641ea1a8c6ee8c7", "score": "0.73396957", "text": "def before_tester_run(self) -> None:", "title": "" }, { "docid": "f3b2d6e10065ff6b983c1edd3630c152", "score": "0.7324083", "text": "def classSetUp(self):\r\n assert self.something == True\r\n self.something = False", "title": "" }, { "docid": "51262b0e50f19bfeabbd206ecbc400f8", "score": "0.72828907", "text": "def _test(self):\n pass", "title": "" }, { "docid": "f755fb8af7ee7ec95e19b5587ea6f098", "score": "0.7067964", "text": "def testing(self):", "title": "" }, { "docid": "808425a98f90a1aaa5ee11daef2130fd", "score": "0.70527375", "text": "def _mockme(self):\n pass", "title": "" }, { "docid": "ccc77d904ab17a7c854c8f5789312938", "score": "0.69012713", "text": "def setup_method(self, method):\n assert method", "title": "" }, { "docid": "f7529441c82c36857491679fc0ad7bba", "score": "0.6900921", "text": "def test_(self):\n pass", "title": "" }, { "docid": "0714b55a928f10390dd112e4a71d64e7", "score": "0.6892391", "text": "def test_init(self):\n pass", "title": "" }, { "docid": "6395a0447babd542b1cb1cb13ec3ce90", "score": "0.68894523", "text": "def before_test_run(self) -> None:", "title": "" }, { "docid": "eaa2604b3e54d4eaa66788fed871de75", "score": "0.68563145", "text": "def testInit(self):", "title": "" }, { "docid": "39d1e4b8a393b823fec5ed0de63b0ff3", "score": "0.6824386", "text": "def test(cls):\n pass", "title": "" }, { "docid": "0645a56473c801c578eebf4001ceec3b", "score": "0.68234134", "text": "def test(self):\n # TODO\n pass", "title": "" }, { "docid": "343d0cce8356ac3e92c0de489f4efcd1", "score": "0.67005354", "text": "def test(self):\n pass", "title": "" }, { "docid": "343d0cce8356ac3e92c0de489f4efcd1", "score": "0.67005354", "text": "def test(self):\n pass", "title": "" }, { "docid": "675dfa93100212ef6375295a1a1ce439", "score": "0.66937464", "text": "def setup_method(self):\n self.out = None\n return", "title": "" }, { "docid": "1b1a5d7ff04d8016d90ae97a5edbdd07", "score": "0.6687489", "text": "def after_tester_run(self) -> None:", "title": "" }, { "docid": "82815e8f330845c57d8826a78a373780", "score": "0.664704", "text": "def setUp(self):\n self.obj = SomeClass()", "title": "" }, { "docid": "1996573f8fd08abd8c20cb78f5d337bd", "score": "0.6622737", "text": "def self_test():", "title": "" }, { "docid": "9f668c50b248a875b1d26bf7f593c018", "score": "0.6618984", "text": "def __init__(self):\n self.verbose = False\n self.simulate = False\n self.debug = False", "title": "" }, { "docid": "ad8da9c4f89cf541e07ee70f4417550c", "score": "0.65945566", "text": "def test(self):\n return True", "title": "" }, { "docid": "87ab336d1d39a975629372b0424f8642", "score": "0.6570382", "text": "def setup_method(self):\n\n reload(pysat.instruments.pysat_testing)\n self.name = 'testing'\n self.ref_time = pysat.instruments.pysat_testing._test_dates['']['']\n return", "title": "" }, { "docid": "e3b18d01113aa19349cad235b54ecf01", "score": "0.6537312", "text": "def SetUp(self):\n pass", "title": "" }, { "docid": "f9d58dbcc9463628df8a2b5dcadfc8fb", "score": "0.6458454", "text": "def _on_test_begin(self):\n pass", "title": "" }, { "docid": "12415e51a45655613acc98a6dd303b6a", "score": "0.6443678", "text": "def setUp(self):\n self.result = ''", "title": "" }, { "docid": "d388939ba349a300a7a20adb599c127d", "score": "0.64378744", "text": "def __call__(self):\n pass", "title": "" }, { "docid": "d388939ba349a300a7a20adb599c127d", "score": "0.64378744", "text": "def __call__(self):\n pass", "title": "" }, { "docid": "cb74abd832f67445757a9df7a3220729", "score": "0.6430383", "text": "def test_set_instance_field(self):\n pass", "title": "" }, { "docid": "06618ec00d1e59a1aa3d17ad542fd572", "score": "0.6405326", "text": "def setup_method(self):\n\n warnings.simplefilter(\"always\", DeprecationWarning)\n self.ref_time = pysat.instruments.pysat_testing._test_dates['']['']\n self.warn_msgs = []\n self.war = \"\"\n return", "title": "" }, { "docid": "1020f9278cdfaa93e4732c3cd4517798", "score": "0.6402218", "text": "def test_init(self):\n self.assertTrue(hasattr(self.test, \"mouse_is_on\"))\n self.assertTrue(hasattr(self.test, \"hover\"))\n self.assertTrue(hasattr(self.test, \"request_list\"))\n self.assertTrue(hasattr(self.test, \"command_list\"))", "title": "" }, { "docid": "51f35c96fe367ace0d686a1971bd1021", "score": "0.6395603", "text": "def test_instrument_init(self):\n\n assert self.testInst.new_thing\n return", "title": "" }, { "docid": "3304793c6d84b318dab1a82509cae13e", "score": "0.63714534", "text": "def setup_class(self):", "title": "" }, { "docid": "3304793c6d84b318dab1a82509cae13e", "score": "0.63714534", "text": "def setup_class(self):", "title": "" }, { "docid": "4d89e4e206ecb876b22b77a273dfebbe", "score": "0.6368773", "text": "def initiate(self):", "title": "" }, { "docid": "0b0e1f4643bfe57e20fa45988b64a3cc", "score": "0.63375026", "text": "def init(self):\n\t\tpass", "title": "" }, { "docid": "4dcfc871867931eeed4399775081280b", "score": "0.63354313", "text": "def setUp(self):\n self.options = Options()", "title": "" }, { "docid": "3795754c68868d6a9865aab795995b9d", "score": "0.6334769", "text": "def setup(self) -> None:", "title": "" }, { "docid": "b269306d60acc935f8f503117a22bcd6", "score": "0.6329245", "text": "def run(self):\n assert_equal(self, 1, 1, 'test')\n assert_true(self, False, 'test')\n return", "title": "" }, { "docid": "db3d57cbf37ee951907cbac117eeb09b", "score": "0.6328691", "text": "def test_init(self, hotp):\n hotp.assert_not_called()\n hotp.return_value.verify.assert_not_called()", "title": "" }, { "docid": "f0868c574190f7892a5ed88ea2afca7d", "score": "0.63255155", "text": "def init(self):\n return True", "title": "" }, { "docid": "cf419d90fd2a0cb0ae308b3de97c90b5", "score": "0.63226557", "text": "def __init__(self):\r\n self.initialized = False", "title": "" }, { "docid": "cf419d90fd2a0cb0ae308b3de97c90b5", "score": "0.63226557", "text": "def __init__(self):\r\n self.initialized = False", "title": "" }, { "docid": "19eaeecab013f1002b22a8666ddd1919", "score": "0.6319818", "text": "def call(self):\n pass", "title": "" }, { "docid": "801be9e54b91396497442b025caecff4", "score": "0.6318234", "text": "def __init__(self):\n self.initialized = False", "title": "" }, { "docid": "801be9e54b91396497442b025caecff4", "score": "0.6318234", "text": "def __init__(self):\n self.initialized = False", "title": "" }, { "docid": "801be9e54b91396497442b025caecff4", "score": "0.6318234", "text": "def __init__(self):\n self.initialized = False", "title": "" }, { "docid": "ab0f4e081e9a97e6d579fb34b09337ac", "score": "0.6315708", "text": "def test_something(self):\n pass", "title": "" }, { "docid": "ab0f4e081e9a97e6d579fb34b09337ac", "score": "0.6315708", "text": "def test_something(self):\n pass", "title": "" }, { "docid": "20e315badfcd5ea2fbd2adeb30f65c3d", "score": "0.63003427", "text": "def init(self, ):\n pass", "title": "" }, { "docid": "fcafb07835deeb463e5bda11509ea028", "score": "0.62935686", "text": "def test_method(self):", "title": "" }, { "docid": "ff31b1e94630d6d5ec1d74bc327ce48a", "score": "0.6287083", "text": "def SetUp(self, *args, **kwargs) -> None: # type: ignore[no-untyped-def]", "title": "" }, { "docid": "ff31b1e94630d6d5ec1d74bc327ce48a", "score": "0.6287083", "text": "def SetUp(self, *args, **kwargs) -> None: # type: ignore[no-untyped-def]", "title": "" }, { "docid": "4034806bcb3ae3ad4c90b6a3eb9844a0", "score": "0.62859535", "text": "def setUp(self):\n print(\"setUp\")\n print(\"===========\")", "title": "" }, { "docid": "cb3cfadca177693c1d29f5e79bdd34cd", "score": "0.6281448", "text": "def setUp(self):\n self.supvisors = MockedSupvisors()", "title": "" }, { "docid": "4acc340f00371ec4a826c5450a6971f1", "score": "0.6276333", "text": "def test_init(self, hotp):\n hotp.assert_not_called\n hotp.return_value.verify.assert_not_called()", "title": "" }, { "docid": "afa0cd3753bfed9ca350c54f4c15ff2c", "score": "0.6259981", "text": "def setUpAttr(cls):\n pass", "title": "" }, { "docid": "3c963aade5e955a41445071162555445", "score": "0.6255766", "text": "def test(self, test):\n\n self._test = test", "title": "" }, { "docid": "3c963aade5e955a41445071162555445", "score": "0.6255766", "text": "def test(self, test):\n\n self._test = test", "title": "" }, { "docid": "2c82da39e2f0c091b2a1b647b101d2ef", "score": "0.6237032", "text": "def verify(self):\n raise Exception(\"abstact class called unexpectedly\")", "title": "" }, { "docid": "b931cb4adffb6ce8764de896d8ea98f8", "score": "0.6232571", "text": "def method(self):\n pass", "title": "" }, { "docid": "19c9f152b244481fe2a33153022c0f29", "score": "0.6231853", "text": "def setUpClass(cls):\n cls.token = generate_v2_test_token(username='bob')\n addr.const = MagicMock()\n addr.const.VLAB_IPAM_LOG_LEVEL = 'INFO'\n addr.const.VLAB_VERIFY_TOKEN = False", "title": "" }, { "docid": "ac72173d9df6ed7e73a1b9e8adc5e667", "score": "0.6227319", "text": "def setUp(cls):\n pass", "title": "" }, { "docid": "d7cf04ce463176249d361d722146083a", "score": "0.6224186", "text": "def setup_method(self):\n\n self.name = 'ndtesting'\n self.ref_time = pysat.instruments.pysat_testing._test_dates['']['']\n return", "title": "" }, { "docid": "40cbf579c77d25fc0bfdb693242b1beb", "score": "0.62184924", "text": "def __init__ (self):\n\t\tpass", "title": "" }, { "docid": "a5c013bdd1962d473bc29d8ef9f1594c", "score": "0.62147206", "text": "def test(self):", "title": "" }, { "docid": "6d80dfe989aa65877ae3dc3d77fb57db", "score": "0.6212095", "text": "def set(self) -> None:\n ...", "title": "" }, { "docid": "0d3a7974a6fa0f3868f469c68d2757c6", "score": "0.62030315", "text": "def run(self):\n # Subclass and overload\n assert(False)", "title": "" }, { "docid": "403c4dfdd692c82ed5514985279b1191", "score": "0.61971235", "text": "def setUp(self):\n self.context = Context()", "title": "" }, { "docid": "403c4dfdd692c82ed5514985279b1191", "score": "0.61971235", "text": "def setUp(self):\n self.context = Context()", "title": "" }, { "docid": "206755d80bf9e86ccc213a1fbc09e89b", "score": "0.6192788", "text": "def setUp(self):\n\t\tpass", "title": "" }, { "docid": "3e483681071a4e84409738933ef4c0ed", "score": "0.6190741", "text": "def init(self):\n pass", "title": "" }, { "docid": "3e483681071a4e84409738933ef4c0ed", "score": "0.6190741", "text": "def init(self):\n pass", "title": "" }, { "docid": "3e483681071a4e84409738933ef4c0ed", "score": "0.6190741", "text": "def init(self):\n pass", "title": "" }, { "docid": "3e483681071a4e84409738933ef4c0ed", "score": "0.6190741", "text": "def init(self):\n pass", "title": "" }, { "docid": "3e483681071a4e84409738933ef4c0ed", "score": "0.6190741", "text": "def init(self):\n pass", "title": "" }, { "docid": "3e483681071a4e84409738933ef4c0ed", "score": "0.6190741", "text": "def init(self):\n pass", "title": "" }, { "docid": "3e483681071a4e84409738933ef4c0ed", "score": "0.6190741", "text": "def init(self):\n pass", "title": "" }, { "docid": "3e483681071a4e84409738933ef4c0ed", "score": "0.6190741", "text": "def init(self):\n pass", "title": "" }, { "docid": "3e483681071a4e84409738933ef4c0ed", "score": "0.6190741", "text": "def init(self):\n pass", "title": "" }, { "docid": "3e483681071a4e84409738933ef4c0ed", "score": "0.6190741", "text": "def init(self):\n pass", "title": "" }, { "docid": "82ed2740386a8b33a8212e5597c25781", "score": "0.6189197", "text": "def setUpClass(cls):\n cls.review = inspect.getmembers(review, inspect.isfunction)", "title": "" }, { "docid": "e1d913a5d348c714c776c3a28899f32b", "score": "0.61843926", "text": "def test_bad_init(self):", "title": "" }, { "docid": "8a12d13bb0d51fc9b1dc9016725da181", "score": "0.6181193", "text": "def setUp(self):\n self.amity = Amity()", "title": "" }, { "docid": "fc8f2d6201521d534f054d29a2847b9b", "score": "0.6176834", "text": "def __post_init__(self) -> None:", "title": "" }, { "docid": "dab9d16dbf4b0efe8d5bf45a9b1de6d4", "score": "0.6171858", "text": "def test(self):\r\n return False", "title": "" }, { "docid": "7f6ca4697e61e917d06607e64687bb0f", "score": "0.61702687", "text": "def run_test(self):\n pass", "title": "" }, { "docid": "3aea05982b9de22fb76c4c5f2fdcd5f2", "score": "0.6170054", "text": "def setup(self) -> bool:\n ...\n\n return True", "title": "" }, { "docid": "8cbc5b40487aa2d2d7445d47ccfdbd26", "score": "0.61607534", "text": "def pre_test_response(self):", "title": "" }, { "docid": "66342ab51d8d53981de648ca89724675", "score": "0.6158772", "text": "def test_something(self):\r\n return True", "title": "" }, { "docid": "2fc2e6931a140e00b1ded6302e3cb897", "score": "0.61483204", "text": "def __init__(__self__):\n pass", "title": "" }, { "docid": "2fc2e6931a140e00b1ded6302e3cb897", "score": "0.61483204", "text": "def __init__(__self__):\n pass", "title": "" }, { "docid": "2fc2e6931a140e00b1ded6302e3cb897", "score": "0.61483204", "text": "def __init__(__self__):\n pass", "title": "" }, { "docid": "2fc2e6931a140e00b1ded6302e3cb897", "score": "0.61483204", "text": "def __init__(__self__):\n pass", "title": "" }, { "docid": "2fc2e6931a140e00b1ded6302e3cb897", "score": "0.61483204", "text": "def __init__(__self__):\n pass", "title": "" }, { "docid": "2fc2e6931a140e00b1ded6302e3cb897", "score": "0.61483204", "text": "def __init__(__self__):\n pass", "title": "" }, { "docid": "2fc2e6931a140e00b1ded6302e3cb897", "score": "0.61483204", "text": "def __init__(__self__):\n pass", "title": "" }, { "docid": "2fc2e6931a140e00b1ded6302e3cb897", "score": "0.61483204", "text": "def __init__(__self__):\n pass", "title": "" }, { "docid": "2fc2e6931a140e00b1ded6302e3cb897", "score": "0.61483204", "text": "def __init__(__self__):\n pass", "title": "" }, { "docid": "1e19df646aed902a37f593713904d3a0", "score": "0.6147413", "text": "def test_run(self):\n pass", "title": "" }, { "docid": "4bcdcb21bc840f71fda45502ffcd7f0c", "score": "0.6147379", "text": "def initialize(self):\r\n pass", "title": "" }, { "docid": "4bcdcb21bc840f71fda45502ffcd7f0c", "score": "0.6147379", "text": "def initialize(self):\r\n pass", "title": "" }, { "docid": "4bcdcb21bc840f71fda45502ffcd7f0c", "score": "0.6147379", "text": "def initialize(self):\r\n pass", "title": "" } ]
dde81fd75e8dab25708f015fee2011b9
If the same item appears in the list, sums the quantities and create a unique Food object.
[ { "docid": "fc122672b927400040c2b8b6fa8b33a7", "score": "0.5547891", "text": "def eraseDubloons(foodList):\n resList = []\n foodNames = [f.codsougr_name for f in foodList]\n uniqueFoodNames = list(set(foodNames))\n for name in uniqueFoodNames:\n duplicates = [f for f in foodList if f.codsougr_name == name]\n qte_nette = sum([f.qte_nette for f in duplicates])\n codsougr = [f.codsougr for f in duplicates][0]\n \n newFood = Food(codsougr, name, qte_nette)\n resList.append(newFood)\n return resList", "title": "" } ]
[ { "docid": "4f021ae31218d83063fc7b7fe95882c6", "score": "0.6519353", "text": "def __add_muilt(self,food_name,quantity):\n\t\tif (not food_name in self.items):\n\t\t\tself.items[food_name] =0\n\t\tself.items[food_name] = self.items[food_name] + quantity", "title": "" }, { "docid": "232d5321f12a8823e62ae2f05ee7bab2", "score": "0.5817523", "text": "def addFood(self, num):\n self.camp.data[\"foodQuantity\"] += int(num)", "title": "" }, { "docid": "3f7d71c67883675ad8cf3a3be6f640a0", "score": "0.575201", "text": "def add(self, name, quantity):\n item = Item(name, quantity)\n if item in self.items:\n # we already have this, so add to it\n self.items[self.items.index(item)] += item\n else:\n self.items.append(item)", "title": "" }, { "docid": "5d5bf92df7dcf5cd7b9da5461a553cbf", "score": "0.5631031", "text": "def condense_grocery_list(list_of_grocery_items):\n condensed_list=[]\n condensed_dict=dict()\n for grocery_item in list_of_grocery_items:\n dict_key=grocery_item.unit+\"-\"+grocery_item.name\n if dict_key in condensed_dict:\n condensed_dict[dict_key].number = str(float(condensed_dict[dict_key].number) + float(grocery_item.number))\n else:\n condensed_dict[dict_key]=grocery_item\n\n for item in condensed_dict:\n condensed_list.append(condensed_dict[item])\n\n condensed_list.sort(key=lambda x: x.name)\n condensed_dict={}\n for item in condensed_list:\n if item.name in condensed_dict:\n condensed_dict[item.name] = condensed_dict[item.name] + \", \" + item.number.strip() + \" \" + item.unit.strip()\n else:\n condensed_dict[item.name]=item.name + \": \" + item.number.strip() + \" \" + item.unit.strip()\n condensed_list=[]\n for item in condensed_dict:\n new_item=shopping_item()\n new_item.name=item\n new_item.grocery_list_line=condensed_dict[item]\n condensed_list.append(new_item)\n\n return condensed_list", "title": "" }, { "docid": "615ee899602c1129b4b2df5f3f783058", "score": "0.5625155", "text": "def createMeal(jour, tyrep, itemsName, itemsDict, minItem, maxItem, meanQuantities):\n nItems = random.randint(minItem,maxItem)\n items = random.sample(itemsName, nItems)\n foodItems = [Food(itemsDict[x], x, meanQuantities[x]) for x in items]\n meal = Meal(foodItems, jour=jour, tyrep=tyrep)\n meal.reorder()\n return meal", "title": "" }, { "docid": "ff3c0498b926385879c47b0567a49a81", "score": "0.5587279", "text": "def consume_food(self, food):\n quantity = self.__items['food'].get(food, 0)\n if quantity > 0:\n self.__items['food'][food] = quantity - 1", "title": "" }, { "docid": "0139cce57cee1ffab79707a72f831baf", "score": "0.55362856", "text": "def add_to_inventory_v2(inventory, added_items):\n\n for item in added_items:\n if item not in inventory:\n my_inventory.setdefault(item, 1)\n else:\n my_inventory[item] += 1\n return my_inventory", "title": "" }, { "docid": "7d1cce71848b886fcbf253344f3c75a8", "score": "0.5494977", "text": "def add_to_inventory(inventory, added_items):\n for key in list(inventory):\n for item in added_items:\n if key == item:\n inventory[key] += 1\n else:\n inventory.setdefault(item, 1)\n\n return my_inventory", "title": "" }, { "docid": "21510822cd79307c80383341d84fa004", "score": "0.53571904", "text": "def add(self, item1, item2):\n for sumField in self.sumFields:\n item1[sumField] = item1[sumField] + item2[sumField]", "title": "" }, { "docid": "ae3d284c0f2807c357fd971d08a6f7f8", "score": "0.5330261", "text": "def addDish(self,dish):\r\n\t\tself.items[dish.name] = dish\r\n\t\tfor nutrient in TARGET_NUTRIENTS:\r\n\t\t\tself.totals[nutrient]+=dish.nutrition[nutrient]", "title": "" }, { "docid": "f1159106f3c256affb95ba2576b99a40", "score": "0.5326817", "text": "def add_inventory_item_and_quantity(self,ingredient:I_Inventory_Item,quantity:float):\n pass", "title": "" }, { "docid": "9fa86c34f680edfb331c7e63841be1d7", "score": "0.5321849", "text": "def update_world_totals(new_item):\n # TODO: Can definitely optimize this search if performance becomes an issue\n for item in world_inv:\n if new_item.equals(item):\n item.set_count(item.count + new_item.count)\n return\n\n if (new_item.damage != 0 and (new_item.full_name in degradeable_tools or new_item.full_name in degradeable_armor)):\n world_inv.append(Item(new_item.full_name, new_item.count, -1, new_item.lore))\n else:\n world_inv.append(Item(new_item.full_name, new_item.count, new_item.damage, new_item.lore))", "title": "" }, { "docid": "90ff6a56e8a808fc9686f492e012e077", "score": "0.52610236", "text": "def feeding_animal(self):\n for food in self.get_food_set():\n for animal in self.get_animal_set():\n if (abs(animal.position[0] - food.position[0]) < Constants.ANIMAL_WIDTH / 2 + Constants.FOOD_WIDTH / 2 and \\\n abs(animal.position[1] - food.position[1]) < Constants.ANIMAL_HEIGHT / 2 + Constants.FOOD_HEIGHT / 2):\n self.animal_set.remove(animal)\n self.money += 1\n food.used = True\n if food.used:\n self.food_set.remove(food)", "title": "" }, { "docid": "f300c9707bf682e3a6b1d5677eba5aa9", "score": "0.52460706", "text": "def execute_item_creation(self)->None:\n [ingedient.use_quantity(quantity) for ingedient,quantity in self.ingredient_quantity]", "title": "" }, { "docid": "70b286ea3b1c733d10122c24d15eb782", "score": "0.52455187", "text": "def digest_fat(self):\n can_move = min(self.population - self.food, self.fat_storage)\n self.fat_storage -= can_move\n self.food += can_move", "title": "" }, { "docid": "ae9736db106bb68027158dacec819627", "score": "0.5245462", "text": "def add_fruit(inventory, fruit, quantity=0):\n #print fruit, quantity\n inventory[fruit]=quantity\n #return inventory", "title": "" }, { "docid": "bf0a041ec85a5bcb7a4fa271d76f533e", "score": "0.5186193", "text": "def add_item(self, item_name, item_price):\n if 'none' in self.items_with_price:\n self.items_with_price.clear()\n self.items_with_price.update({str(item_name): float(item_price)})", "title": "" }, { "docid": "379782037ed753eb6be9b48c0ea5b3b7", "score": "0.51835597", "text": "def add_to_inventory(inventory, added_items):\n for i in added_items:\n if i in inventory.keys():\n inventory[i] += 1\n else:\n inventory[i] = 1\n return inventory", "title": "" }, { "docid": "8fd6eb79281380c997d0ddf13a3b1137", "score": "0.51719004", "text": "def updateCart(self, replaceItem):\n\t\ttopFoods = self.dashFilter(replaceItem)\n\t\trestOfCart = [item for item in self.foods if item[\"id\"] != replaceItem]\n\t\t#topCombo = self.comboFilter(topFoods, restOfCart)\n\t\ttopCombo = self.randomFilter(topFoods)\n\t\tself.foods = [{\"id\":topCombo[\"id\"],\"name\":topCombo['name'],\"groups\":item['groups'],\"combos\":topCombo['combos']} if item[\"id\"] == replaceItem else item for item in self.foods]\n\t\treturn {'id':topCombo['id'],'name':topCombo['name'].upper(),'img':topCombo['id']+'.jpg'}", "title": "" }, { "docid": "242d58e8f298df51464e9f6224d850aa", "score": "0.51659745", "text": "def addFood(surface) -> Food:\n\n newFood = Food(\n coord=[randint(0, screen_resolution[0]), randint(0, screen_resolution[1])],\n energy=2,\n surface=surface\n )\n foodHolder.append(newFood)\n\n return newFood", "title": "" }, { "docid": "3c319e9376a5acfcd6692426a6106298", "score": "0.51233023", "text": "def test_bulk_inventory_quantity_combined(self):\n category = ['Clothing', 'Unisex Adult Clothing', 'Pants']\n\n expected_left_bulk_header = 'Individual Quantity'\n\n new_quantities = ['200', '300', '400', '500']\n\n expected_product_offerings = [\n ['1', '500.00', '550', '200', 't', 'L', '1', 'Black', '1'],\n ['1', '500.00', '550', '300', 't', 'L', '1', 'Pinkish', '2'],\n ['1', '500.00', '550', '400', 't', 'M', '2', 'Black', '1'],\n ['1', '500.00', '550', '500', 't', 'M', '2', 'Pinkish', '2'],\n ['2', '1.00', '111', '200', 't', 'L', '1', 'Black', '1'],\n ['2', '1.00', '111', '300', 't', 'L', '1', 'Pinkish', '2'],\n ['2', '1.00', '111', '400', 't', 'M', '2', 'Black', '1'],\n ['2', '1.00', '111', '500', 't', 'M', '2', 'Pinkish', '2'],\n ['3', '10.00', '222', '200', 't', 'L', '1', 'Black', '1'],\n ['3', '10.00', '222', '300', 't', 'L', '1', 'Pinkish', '2'],\n ['3', '10.00', '222', '400', 't', 'M', '2', 'Black', '1'],\n ['3', '10.00', '222', '500', 't', 'M', '2', 'Pinkish', '2']\n ]\n\n expected_variation_properties = [\n ['1', 't', '62809790503', 'Size', '49', 'f', 't', 'f'],\n ['1', 'f', '200', 'Color (primary)', '', 'f', 't', 'f'],\n ['2', 't', '62809790503', 'Size', '49', 'f', 't', 'f'],\n ['2', 'f', '200', 'Color (primary)', '', 'f', 't', 'f'],\n ['3', 't', '62809790503', 'Size', '49', 'f', 't', 'f'],\n ['3', 'f', '200', 'Color (primary)', '', 'f', 't', 'f']\n ]\n\n expected_variation_options = [\n ['1', 't', '2119', 'L', '1'],\n ['1', 't', '2116', 'M', '2'],\n ['1', 'f', '1', 'Black', '1'],\n ['1', 'f', '', 'Pinkish', '2'],\n ['2', 't', '2119', 'L', '1'],\n ['2', 't', '2116', 'M', '2'],\n ['2', 'f', '1', 'Black', '1'],\n ['2', 'f', '', 'Pinkish', '2'],\n ['3', 't', '2119', 'L', '1'],\n ['3', 't', '2116', 'M', '2'],\n ['3', 'f', '1', 'Black', '1'],\n ['3', 'f', '', 'Pinkish', '2']\n ]\n\n bpiv = BulkPageInventoryVariations(self.driver, self.ts)\n\n # Select category in bulk edit area\n bpiv.select_category(category)\n\n bulk_row = bpiv.bulk_edit_row\n assert bpiv.property_box(bulk_row, 0).text == 'Choose Property'\n\n # Set first variation property\n bpiv.set_property(bulk_row, 0, 'Size', \"Men's\")\n # Add two options to first property, one by one\n bpiv.add_option(bulk_row, 0, 'L', displayed_name=\"L Men's\")\n bpiv.add_option(bulk_row, 0, 'M', displayed_name=\"M Men's\")\n\n # Set second variation property\n bpiv.set_property(bulk_row, 1, 'Color (primary)')\n # Add two options to first property, one by one\n bpiv.add_option(bulk_row, 1, 'Black')\n bpiv.add_custom_option(bulk_row, 1, 'Pinkish')\n\n bpiv.select_inventory_tab(bulk_row, \"Quantity\")\n\n # Switch to individual quantities, check the header is changed\n click(bpiv.bulk_header_checkbox(bulk_row, i=0))\n click(bpiv.bulk_header_checkbox(bulk_row, i=1), delay=0.5)\n assert bpiv.bulk_edit_row_header_text(bulk_row, i=0) == expected_left_bulk_header\n\n # Enter individual quantities for the property combinations\n inputs = bpiv.bulk_individual_option_inputs(bulk_row, i=0)\n for i, quantity in enumerate(new_quantities):\n send_keys(inputs[i], str(quantity) + Keys.RETURN)\n\n # Check that only titles are in listing preview as preview is disabled\n check_simplified_preview(bpiv)\n\n # Blue dot is not displayed and sync button is disabled before clicking on Apply\n assert bpiv.is_part_modified('Variations') is False, 'Blue dot should not be visible yet'\n assert bpiv.sync_updates_button().is_enabled() is False, 'Sync button should be still disabled'\n\n # Apply\n assert bpiv.operation_apply().is_enabled() is True\n click(bpiv.operation_apply())\n # Check that sync button is enabled and blue dot is displayed after clicking on Apply\n wait_for_web_assert(True, lambda: bpiv.sync_updates_button().is_enabled(),\n 'Sync button is not enabled')\n assert bpiv.is_part_modified('Variations') is True, 'Blue dot didn\\'t show up'\n\n # Check bulk edit area texts and that Apply button is disabled\n check_bulk_properties_disabled(bpiv)\n\n # Sync updates\n click(bpiv.sync_updates_button())\n\n # Check data in DB\n check_db_state(expected_product_offerings, expected_variation_properties, expected_variation_options)", "title": "" }, { "docid": "8ea81c57ff2db743346ab37434f24574", "score": "0.51203", "text": "def calculateAmount(self):\n return sum([item.price for item in self.items])", "title": "" }, { "docid": "e5115be39649d3f3029e91969f33dab0", "score": "0.5108897", "text": "def add_to_inventory(player_inventory, added_items):\n # For each new piece of loot...\n for new_loot_piece in added_items:\n if new_loot_piece in player_inventory:\n # if the loot already existed in inventory, increment it's count\n player_inventory[new_loot_piece] += 1\n else:\n # If the loot was not already in the inventory, create a new piece of loot\n # in the inventory and se it's default value to 1.\n player_inventory.setdefault(new_loot_piece, 1)\n\n return player_inventory", "title": "" }, { "docid": "5e4b677c435c587a9cfddc38115a3c4f", "score": "0.5098413", "text": "def test_bulk_inventory_single_quantity_individual(self, property_number):\n category = ['Home & Living', 'Furniture']\n\n expected_left_bulk_header = 'Individual Quantity'\n\n new_quantities = ['22', '33']\n\n expected_product_offerings = [\n [\n ['1', '500.00', '550', '22', 't', 'wool', '1', 'Black', '1'],\n ['1', '500.00', '550', '22', 't', 'wool', '1', 'White', '2'],\n ['1', '500.00', '550', '33', 't', 'cotton', '2', 'Black', '1'],\n ['1', '500.00', '550', '33', 't', 'cotton', '2', 'White', '2'],\n ['2', '1.00', '111', '22', 't', 'wool', '1', 'Black', '1'],\n ['2', '1.00', '111', '22', 't', 'wool', '1', 'White', '2'],\n ['2', '1.00', '111', '33', 't', 'cotton', '2', 'Black', '1'],\n ['2', '1.00', '111', '33', 't', 'cotton', '2', 'White', '2'],\n ['3', '10.00', '222', '22', 't', 'wool', '1', 'Black', '1'],\n ['3', '10.00', '222', '22', 't', 'wool', '1', 'White', '2'],\n ['3', '10.00', '222', '33', 't', 'cotton', '2', 'Black', '1'],\n ['3', '10.00', '222', '33', 't', 'cotton', '2', 'White', '2']\n ],\n [\n ['1', '500.00', '550', '22', 't', 'wool', '1', 'Black', '1'],\n ['1', '500.00', '550', '33', 't', 'wool', '1', 'White', '2'],\n ['1', '500.00', '550', '22', 't', 'cotton', '2', 'Black', '1'],\n ['1', '500.00', '550', '33', 't', 'cotton', '2', 'White', '2'],\n ['2', '1.00', '111', '22', 't', 'wool', '1', 'Black', '1'],\n ['2', '1.00', '111', '33', 't', 'wool', '1', 'White', '2'],\n ['2', '1.00', '111', '22', 't', 'cotton', '2', 'Black', '1'],\n ['2', '1.00', '111', '33', 't', 'cotton', '2', 'White', '2'],\n ['3', '10.00', '222', '22', 't', 'wool', '1', 'Black', '1'],\n ['3', '10.00', '222', '33', 't', 'wool', '1', 'White', '2'],\n ['3', '10.00', '222', '22', 't', 'cotton', '2', 'Black', '1'],\n ['3', '10.00', '222', '33', 't', 'cotton', '2', 'White', '2']\n ]\n ]\n\n expected_variation_properties = [\n [\n ['1', 't', '502', 'Fabric', '', 'f', 't', 'f'],\n ['1', 'f', '200', 'Color (primary)', '', 'f', 'f', 'f'],\n ['2', 't', '502', 'Fabric', '', 'f', 't', 'f'],\n ['2', 'f', '200', 'Color (primary)', '', 'f', 'f', 'f'],\n ['3', 't', '502', 'Fabric', '', 'f', 't', 'f'],\n ['3', 'f', '200', 'Color (primary)', '', 'f', 'f', 'f']\n ],\n [\n ['1', 't', '502', 'Fabric', '', 'f', 'f', 'f'],\n ['1', 'f', '200', 'Color (primary)', '', 'f', 't', 'f'],\n ['2', 't', '502', 'Fabric', '', 'f', 'f', 'f'],\n ['2', 'f', '200', 'Color (primary)', '', 'f', 't', 'f'],\n ['3', 't', '502', 'Fabric', '', 'f', 'f', 'f'],\n ['3', 'f', '200', 'Color (primary)', '', 'f', 't', 'f']\n ]\n ]\n\n expected_variation_options = [\n [\n ['1', 't', '', 'wool', '1'],\n ['1', 't', '', 'cotton', '2'],\n ['1', 'f', '1', 'Black', '1'],\n ['1', 'f', '10', 'White', '2'],\n ['2', 't', '', 'wool', '1'],\n ['2', 't', '', 'cotton', '2'],\n ['2', 'f', '1', 'Black', '1'],\n ['2', 'f', '10', 'White', '2'],\n ['3', 't', '', 'wool', '1'],\n ['3', 't', '', 'cotton', '2'],\n ['3', 'f', '1', 'Black', '1'],\n ['3', 'f', '10', 'White', '2']\n ],\n [\n ['1', 't', '', 'wool', '1'],\n ['1', 't', '', 'cotton', '2'],\n ['1', 'f', '1', 'Black', '1'],\n ['1', 'f', '10', 'White', '2'],\n ['2', 't', '', 'wool', '1'],\n ['2', 't', '', 'cotton', '2'],\n ['2', 'f', '1', 'Black', '1'],\n ['2', 'f', '10', 'White', '2'],\n ['3', 't', '', 'wool', '1'],\n ['3', 't', '', 'cotton', '2'],\n ['3', 'f', '1', 'Black', '1'],\n ['3', 'f', '10', 'White', '2']\n ]\n ]\n\n\n bpiv = BulkPageInventoryVariations(self.driver, self.ts)\n\n # Select category in bulk edit area\n bpiv.select_category(category)\n\n bulk_row = bpiv.bulk_edit_row\n assert bpiv.property_box(bulk_row, 0).text == 'Choose Property'\n\n # Set first variation property\n bpiv.set_property(bulk_row, 0, 'Fabric')\n # Add two options to first property, one by one\n bpiv.add_custom_option(bulk_row, 0, 'wool')\n bpiv.add_custom_option(bulk_row, 0, 'cotton')\n\n # Set second variation property\n bpiv.set_property(bulk_row, 1, 'Color (primary)')\n # Add two options to first property, one by one\n bpiv.add_option(bulk_row, 1, 'Black')\n bpiv.add_option(bulk_row, 1, 'White')\n\n bpiv.select_inventory_tab(bulk_row, \"Quantity\")\n\n # Switch to individual quantities, check the header is changed\n click(bpiv.bulk_header_checkbox(bulk_row, i=property_number), delay=0.5)\n assert bpiv.bulk_edit_row_header_text(bulk_row, i=0) == expected_left_bulk_header\n\n # Enter individual quantities\n inputs = bpiv.bulk_individual_option_inputs(bulk_row, i=property_number)\n for i, quantity in enumerate(new_quantities):\n send_keys(inputs[i], str(quantity) + Keys.RETURN)\n\n # Check that only titles are in listing preview as preview is disabled\n check_simplified_preview(bpiv)\n\n # Blue dot is not displayed and sync button is disabled before clicking on Apply\n assert bpiv.is_part_modified('Variations') is False, 'Blue dot should not be visible yet'\n assert bpiv.sync_updates_button().is_enabled() is False, 'Sync button should be still disabled'\n\n # Apply\n assert bpiv.operation_apply().is_enabled() is True\n click(bpiv.operation_apply())\n # Check that sync button is enabled and blue dot is displayed after clicking on Apply\n wait_for_web_assert(True, lambda: bpiv.sync_updates_button().is_enabled(),\n 'Sync button is not enabled')\n assert bpiv.is_part_modified('Variations') is True, 'Blue dot didn\\'t show up'\n\n # Check bulk edit area texts and that Apply button is disabled\n check_bulk_properties_disabled(bpiv)\n\n # Sync updates\n click(bpiv.sync_updates_button())\n\n # Check data in DB\n check_db_state(expected_product_offerings[property_number],\n expected_variation_properties[property_number],\n expected_variation_options[property_number])", "title": "" }, { "docid": "ddde5fc2882d117d910fb6854f8c507f", "score": "0.50927", "text": "def concatenate_items(self) -> dict:\n test = []\n for text in self.receipt_text:\n if self.text_is_number(text) | self.remove_pound_symbol(text) | self.change_o_to_zero(text):\n text = text[1:]\n else:\n test.append(text)\n\n if self.text_is_number(text):\n if test[-1] in self.dict_to_return:\n self.dict_to_return[test[-1]]['quantity'] += 1\n else:\n self.dict_to_return[test[-1]] = {'name': text, 'quantity': 1}\n test = []\n\n return self.dict_to_return", "title": "" }, { "docid": "28e1c26cb444d691a6d757c89aed7d2b", "score": "0.5092597", "text": "def add_inventory_item_and_quantity(self,ingredient:I_Inventory_Item,quantity:float):\n self.ingredient_quantity.append((ingredient,quantity))", "title": "" }, { "docid": "0a68a1b31438ff6224ac04a9bb4f05d1", "score": "0.50659823", "text": "def scan_onto_shelf(self):\n if hasattr(self, \"shelf\"):\n items_added = []\n for item in self.items_stored:\n for product in self.shelf.compartments:\n if product[\"Product Number\"] == item.number:\n if product[\"Items Stored\"] is None:\n product[\"Items Stored\"] = []\n product[\"Items Stored\"].append(item)\n product[\"Quantity\"] += 1\n items_added.append(item)\n Item.sorted_items.append(item)\n Item.shipment.remove(item)\n for item in items_added:\n self.items_stored.remove(item)", "title": "" }, { "docid": "484270c6a6c051655cc77f321d243802", "score": "0.50581247", "text": "def add_to_inventory(inventory, added_items):\n if added_items in inventory:\n inventory[added_items] += 1\n else:\n inventory[element] = 1", "title": "" }, { "docid": "a4d21353320ad10efa03007bf3481510", "score": "0.50539666", "text": "def add(self, product, quantity):\n product_id = product.id\n if product_id not in self.basket:\n self.basket[product_id] = {\"price\": float(product.regular_price), \"quantity\": quantity}\n else:\n self.basket[product_id][\"quantity\"] = quantity\n self.save()", "title": "" }, { "docid": "35cf195a68cb0eebf880a0671ed3bb9d", "score": "0.50376934", "text": "def _add_dict_inventory(self, d, item_pouch): # item_pouch used to find which dictionary to look in\r\n for group in d:\r\n if group in item_pouch:\r\n if item_pouch[group] is None:\r\n item_pouch[group] = d[group]\r\n elif isinstance(item_pouch[group], list):\r\n item_pouch[group].extend(d[group])\r\n elif isinstance(item_pouch[group], dict):\r\n self._add_dict_inventory(d[group], item_pouch[group])\r\n elif isinstance(item_pouch[group], int):\r\n item_pouch[group] = item_pouch.setdefault(group, 0)+d[group]\r\n else:\r\n item_pouch[group] = d[group]", "title": "" }, { "docid": "3ca2e8c63e53f9ab69061677cf963375", "score": "0.50190413", "text": "def test_adding_item_to_order_model_is_add_one_to_quantity(self):\n cart_item_1 = CartItem.objects.create(order=self.order, item=self.item_1)\n cart_item_1.Add() # quantity = 1 ** payout = self.item_1.price * 1\n cart_item_1.Add() # quantity = 2 ** payout = self.item_1.price * 2\n\n self.assertEqual(CartItem.objects.get(order=self.order, item=self.item_1).quantity, 2)\n self.assertEqual(cart_item_1.total_price, self.item_1.price * 2)\n self.assertEqual(self.order.payout, self.item_1.price * 2)", "title": "" }, { "docid": "f019318a90a39d1d4baa58c66b55a7e8", "score": "0.50127226", "text": "def uniq_add(my_list=[]):\n\n return sum(set(my_list))", "title": "" }, { "docid": "85016ea7f6bcc15054e712a84e7aaa66", "score": "0.5005258", "text": "def create_order_item(self, food_item, quantity):\n OrderItem.objects.create(\n user=self.customer,\n food_item=food_item,\n quantity=quantity,\n )", "title": "" }, { "docid": "3f95d8bb1a0a822a0a89287235821bd2", "score": "0.50050193", "text": "def calculateAmount(self):\n summa = 0\n for item in self.items:\n summa += item.price\n return summa", "title": "" }, { "docid": "0a975cec92256ad6ac4119086b89bcaf", "score": "0.50043887", "text": "def add(self, product, quantity=None, update_quantity=False):\n if product.price_with_taxes > 0 and quantity != \"0\":\n self.cart[str(product.id)] = {\n 'user_id': self.request.user.id,\n 'article_id': str(product.id),\n 'name': product.name,\n 'quantity': quantity,\n 'price_type': product.price_type,\n 'price_with_taxes': str(product.price_with_taxes),\n 'article_code': product.article_code,\n }\n self.save()", "title": "" }, { "docid": "e946d19a98dd4708d5ea172fdab839a3", "score": "0.49940917", "text": "def fortune(player):\n random.seed()\n \n # (inventory item, amount received)\n treasures = [\n ('food',random.randint(100,500)),\n ('money',random.randint(200,500)),\n ('bullets',random.randint(30,100)),\n ('kits',random.randint(2,3)),\n ('parts',random.randint(2,3)),\n ]\n \n # Determine random item.\n found = random.choice(treasures)\n \n # Handle special message for food.\n if found[0] == 'food':\n current_food = player.get_from_inventory('food')\n # Handle food inventory limit.\n if current_food + found[1] > 1000:\n added_food = 1000 - current_food\n print(\"You found {} pounds of food in an abandoned wagon\".format(found[1]))\n print(\"You can only add {} pounds to your wagon\".format(added_food))\n player.update_inventory('food',1000)\n else:\n print(\"You found {} pounds of food in an abandoned wagon\".format(found[1]))\n player.add_to_inventory('food',found[1])\n \n # Handle special message for money.\n elif found[0] == 'money':\n print(\"You found ${} in an abandoned wagon\".format(\"%.2f\" %found[1]))\n player.add_to_inventory('money',found[1])\n \n # Handle all other items.\n else:\n print(\"You found {} {} in an abandoned wagon\".format(found[1],found[0]))\n player.add_to_inventory(found[0],found[1])", "title": "" }, { "docid": "1e056e6e443126ef305397e1edf9f00e", "score": "0.49848992", "text": "def calc_items_total(cls, items):\n shipping_charge = 0\n subtotal = 0\n ad_charge_amt = 0\n ad_charge_breakup = {}\n\n for item in items:\n shipping_charge += item.shipping_charge\n subtotal += item.subtotal\n ad_charge_amt += item.additional_charge\n\n for charge in item.cost_breakup['additional_charge']:\n k = slugify(charge.get('name')).replace('-', '_')\n v = charge.get('amt')\n # Initialize value\n if ad_charge_breakup.get(k, None) is None:\n ad_charge_breakup[k] = 0.0\n ad_charge_breakup[k] += v\n\n return {\n 'shipping_charge': helper.round_off(shipping_charge),\n 'subtotal': helper.round_off(subtotal),\n 'additional_charge': helper.round_off(ad_charge_amt),\n 'cost_breakup': {\n 'additional_charge': ad_charge_breakup\n }\n }", "title": "" }, { "docid": "5ba05c3eea1691cbb2cf7dd27f2811a3", "score": "0.49668226", "text": "def add(self, comida, quantity=1, update_quantity=False):\n product_id = str(comida.id)\n if product_id not in self.cart:\n self.cart[product_id] = {'quantity': 0,\n 'precio': str(comida.precio)}\n if update_quantity:\n self.cart[product_id]['quantity'] = quantity\n else:\n self.cart[product_id]['quantity'] += quantity\n self.save()", "title": "" }, { "docid": "2a55875101d7602488fcb4f6054d62ad", "score": "0.49663317", "text": "def initializeFoods(self):\n for i in range(0, self.numFoods):\n self.addFood()", "title": "" }, { "docid": "9da1b12f109a9a6aef2dab62544176d0", "score": "0.49556127", "text": "def add(self, product, qty):\n product_id = str(product.id)\n\n if product_id in self.basket:\n self.basket[product_id][\"qty\"] = qty\n else:\n self.basket[product_id] = {\"price\": str(product.regular_price), \"qty\": qty}\n\n self.save()", "title": "" }, { "docid": "ffc752d2ca3326fdbd563d1bbeac4d05", "score": "0.49489328", "text": "def add(self, product, quantity=1, update_quantity=False):\n product_id = str(product.id) # converted to string because JSON use only string data\n if product_id not in self.cart:\n self.cart[product_id] = {'quantity': 0,\n 'price': str(product.price)\n }\n if update_quantity:\n self.cart[product_id]['quantity'] = quantity\n else:\n self.cart[product_id]['quantity'] += quantity # increment the quantity\n self.save() # saving the state of the quantity after update", "title": "" }, { "docid": "3e122449c4307f51ed5cb01667c9a9d1", "score": "0.49486324", "text": "def eatFood(self):\n # if a prey is in contact with a food, then eat the food by removing the food and increment hunger.\n objIDInContactList = super().getContactObjects()\n # if there are objects in contact with this prey, then determine if they are food and if so eat them!\n foodEaten = 0\n if objIDInContactList:\n for objID in objIDInContactList:\n food = hsm.objIDToObject[objID]\n if food in hsm.foodList:\n hsm.foodList.remove(food)\n hsm.destroy(objID)\n foodEaten += 1\n # incrememnt hunger accordingly\n self.hunger += mv.PREY_INCREMENT_HUNGER \n # throw away excess, prey can only eat so much food at a time.\n if self.hunger > mv.PREY_MAX_HUNGER:\n self.hunger = mv.PREY_MAX_HUNGER\n if foodEaten > 0:\n self.foodTimeStamps.append([hsm.frameCount, foodEaten])", "title": "" }, { "docid": "892adb19f630ba769f49adfe54589f4c", "score": "0.4945769", "text": "def add_item(self, x):\n self._items_with_price.update(x)", "title": "" }, { "docid": "8b3b75d718473bad062e7134b4e43dd2", "score": "0.49438444", "text": "def add_item_to_cart(self, product_id, quantity):\n product = Product.objects.filter(product_id=product_id, store=self.customer_id.store_id, deleted_at=None).first()\n if product is None or product.inventory < quantity:\n raise ProductNotFoundException\n\n product.inventory -= quantity\n product.save()\n\n existing_cart_item = CartItem.objects.filter(cart_id=self.cart_id, product_id=product.product_id\n , removed_at=None).first()\n\n self.customer_id.last_cart_activity_at = datetime.now()\n self.customer_id.updated_at = datetime.now()\n self.customer_id.save()\n if existing_cart_item:\n existing_cart_item.quantity += quantity\n existing_cart_item.save()\n return existing_cart_item\n else:\n cart_item = CartItem(\n cart_item_id=str(uuid.uuid4().int),\n product_id=product.product_id,\n cart_id=self.cart_id,\n quantity=quantity\n ).save()\n return cart_item", "title": "" }, { "docid": "106331337e2a2c1d307ba926e7184437", "score": "0.49325597", "text": "def calculate_total_nutrition_fulldayofeating2(\n specificingredient2_dict_list,\n all_nutrients_and_default_units,\n set_to_zero_if_none,\n):\n\n # Initialize the dictionary which will store the results\n result_total_nutrition_fulldayofeating = {}\n for dict_k in all_nutrients_and_default_units:\n nutrient_name = dict_k['nutrient_name_measuredfood']\n new_dict = {nutrient_name: 0}\n result_total_nutrition_fulldayofeating.update(\n new_dict\n )\n\n # Go through all the SpecificIngredients related to the FullDayOfEating.\n # For each SpecificIngredient, calculate its Nutrition by\n # multiplying the calculated_amount divided by the reference amount\n # multiplied by each nutrient value in the associated RawIngredient.\n\n # Sum up the total nutrition\n for k in range(len(specificingredient2_dict_list)):\n # For each nutrient, calculate the amount of that nutrient contained\n # in the calculated_amount of the SpecificIngredient\n for dict_k in all_nutrients_and_default_units:\n nutrient_name = dict_k['nutrient_name_measuredfood']\n result_total_nutrition_fulldayofeating[nutrient_name] = \\\n result_total_nutrition_fulldayofeating[nutrient_name] \\\n + set_to_zero_if_none(specificingredient2_dict_list[k][\n 'raw_ingredient'][nutrient_name]) \\\n * float(specificingredient2_dict_list[k]['calculated_amount'])\\\n / float(specificingredient2_dict_list[k][\n 'raw_ingredient']['reference_amount'])\n\n # Round the values in the result_total_nutrition_fulldayofeating\n # Initialize the dictionary containing the rounded values.\n result_total_nutrition_fulldayofeating_rounded = {}\n for dict_k in all_nutrients_and_default_units:\n nutrient_name = dict_k['nutrient_name_measuredfood']\n new_dict = {nutrient_name: 0}\n result_total_nutrition_fulldayofeating_rounded.update(\n new_dict\n )\n\n # Round the values and save them to the respective dictionary.\n for dict_k in all_nutrients_and_default_units:\n nutrient_name = dict_k['nutrient_name_measuredfood']\n result_total_nutrition_fulldayofeating_rounded[nutrient_name] = \\\n round(result_total_nutrition_fulldayofeating[nutrient_name], 1)\n\n return result_total_nutrition_fulldayofeating,\\\n result_total_nutrition_fulldayofeating_rounded", "title": "" }, { "docid": "6e950cf94ff01f7be16cc47e54da41ba", "score": "0.4931627", "text": "def _update_total_nutrition_values(self, unique_id, date, plan, add):\n table = self.dynamodb.Table(c.TABLE_NUTRITIONAL_NEEDS_DAY)\n week = form.get_week_by_date(date)\n\n item_nutrients_for_week = table.get_item(\n TableName=c.TABLE_NUTRITIONAL_NEEDS_WEEK,\n Key={\n c.UNIQUE_IDENTIFIER: unique_id,\n c.WEEK: week\n },\n ProjectionExpression='#toplevel',\n ExpressionAttributeNames={\"#toplevel\": c.NUTRIENTS_FOR_WEEK}\n )\n\n item_nutrients_for_day = table.get_item(\n TableName=c.TABLE_NUTRITIONAL_NEEDS_DAY,\n Key={\n c.UNIQUE_IDENTIFIER: unique_id,\n c.DATE: date\n },\n ProjectionExpression='#toplevel',\n ExpressionAttributeNames={\"#toplevel\": c.NUTRIENTS_FOR_DAY}\n )\n\n for container, container_content in plan.iteritems():\n item_nutrients_for_container = table.get_item(\n TableName=c.TABLE_NUTRITIONAL_NEEDS_DAY,\n Key={c.UNIQUE_IDENTIFIER: unique_id, c.DATE: date},\n ProjectionExpression='#toplevel.#container',\n ExpressionAttributeNames={\n \"#toplevel\": c.NUTRIENTS_FOR_CONTAINER,\n \"#container\": container\n }\n )\n for meal_key, meal_content in container_content.iteritems():\n\n\n\n for n in params.nutrientList:\n item_nutrients_for_week[c.ITEM][c.NUTRIENTS_FOR_WEEK][n]['VAL'] += meal_content['nutrients'][n]['VAL'] * \\\n (1 if add else -1)\n item_nutrients_for_week[c.ITEM][c.NUTRIENTS_FOR_WEEK][n]['UNIT'] = meal_content['nutrients'][n]['UNIT']\n item_nutrients_for_day[c.ITEM][c.NUTRIENTS_FOR_DAY][n]['VAL'] += meal_content['nutrients'][n]['VAL'] * \\\n (1 if add else -1)\n item_nutrients_for_day[c.ITEM][c.NUTRIENTS_FOR_DAY][n]['UNIT'] = meal_content['nutrients'][n]['UNIT']\n item_nutrients_for_container[c.ITEM][c.NUTRIENTS_FOR_CONTAINER][container][n]['VAL'] += \\\n meal_content['nutrients'][n]['VAL'] * (1 if add else -1)\n item_nutrients_for_container[c.ITEM][c.NUTRIENTS_FOR_CONTAINER][container][n]['UNIT'] = \\\n meal_content['nutrients'][n]['UNIT']\n\n\n table.update_item(\n TableName=c.TABLE_NUTRITIONAL_NEEDS_DAY,\n Key={c.UNIQUE_IDENTIFIER: unique_id,\n c.DATE: date},\n UpdateExpression='SET #toplevel.#cat = :value',\n ExpressionAttributeNames={\n \"#toplevel\": c.NUTRIENTS_FOR_CONTAINER,\n \"#cat\": container\n },\n ExpressionAttributeValues={\n \":value\": item_nutrients_for_container[c.ITEM][c.NUTRIENTS_FOR_CONTAINER][container]\n }\n )\n\n table.update_item(\n TableName=c.TABLE_NUTRITIONAL_NEEDS_DAY,\n Key={c.UNIQUE_IDENTIFIER: unique_id,\n c.DATE: date},\n UpdateExpression='SET #toplevel = :value',\n ExpressionAttributeNames={\n \"#toplevel\": c.NUTRIENTS_FOR_DAY\n },\n ExpressionAttributeValues={\n \":value\": item_nutrients_for_day[c.ITEM][c.NUTRIENTS_FOR_DAY]\n }\n )\n\n table.update_item(\n TableName=c.TABLE_NUTRITIONAL_NEEDS_WEEK,\n Key={\n c.UNIQUE_IDENTIFIER: unique_id,\n c.WEEK: week\n },\n UpdateExpression='SET #toplevel = :value',\n ExpressionAttributeNames={\n \"#toplevel\": c.NUTRIENTS_FOR_WEEK\n },\n ExpressionAttributeValues={\n \":value\": item_nutrients_for_week[c.ITEM][c.NUTRIENTS_FOR_WEEK]\n }\n )", "title": "" }, { "docid": "c7dfb948b8301a0430651d0b5bb487bb", "score": "0.49241003", "text": "def player_eats_food(self, snake, food):\n head = snake[0]\n head_x, head_y = head\n for food_item in food:\n food_x, food_y = food_item\n #Return whether or not the player's received food, adds fat if they have\n if head_x == food_x and head_y == food_y:\n #We've eaten food, update our score\n self.score += int(150 + int(len(snake)/10)*5)\n\n #Add more fat to the user if they already have some or they have a decent score\n self.fat += 8 + int(self.fat/4) + int(self.score/500)\n return True\n return False", "title": "" }, { "docid": "7e5cd209b264fb2dd585ad69bbc06220", "score": "0.49199784", "text": "def createConsumptionSeq(df):\n mealList = []\n M = {} #Contient les aliments d'un repas \n nomen = df.nomen.iloc[0]\n jour = df.jour.iloc[0]\n tyrep = df.tyrep.iloc[0]\n \n for index, row in df.iterrows(): # Scan tous les aliments de consommation\n #print(row.nomen, row.jour, row.tyrep)\n if (row.nomen == nomen) & (row.jour == jour) & (row.tyrep == tyrep):\n name = str(row.nomen) + '_' + str(index)\n M[name] = Food(row.codsougr, row.codsougr_name, row.qte_nette) # Crée l'objet FoodItem\n else:\n m = list(set(M.values())) #Set of food items in a meal\n uniqueM = eraseDubloons(m)\n meal = Meal(uniqueM, jour = jour, tyrep = tyrep)\n meal.reorder()\n logging.debug('{}'.format(meal.nameTuple))\n if meal:\n mealList.append(meal)\n\n M = {}\n nomen = row.nomen\n jour = row.jour\n tyrep = row.tyrep\n \n meals = [m for m in mealList if m]\n return meals", "title": "" }, { "docid": "c830b9fef7a64c1f072cecdf121412c0", "score": "0.4918213", "text": "def create_dico(item_list):\n assert type(item_list) is list\n dico = {}\n for items in item_list:\n for item in items:\n if item not in dico:\n dico[item] = 1\n else:\n dico[item] += 1\n return dico", "title": "" }, { "docid": "c830b9fef7a64c1f072cecdf121412c0", "score": "0.4918213", "text": "def create_dico(item_list):\n assert type(item_list) is list\n dico = {}\n for items in item_list:\n for item in items:\n if item not in dico:\n dico[item] = 1\n else:\n dico[item] += 1\n return dico", "title": "" }, { "docid": "6ddaa4351c2892173251704a2ad04d77", "score": "0.49181578", "text": "def sum_list(new_list):\n return sum(new_list)\n # return None", "title": "" }, { "docid": "9ee3ad439b54ccc0e9043295f07ea1cd", "score": "0.49134657", "text": "def saved_meal_item_add(\n self, meal_id, food_id, food_entry_name, serving_id, num_units\n ):\n params = {\n \"method\": \"saved_meal_item.add\",\n \"format\": \"json\",\n \"saved_meal_id\": meal_id,\n \"food_id\": food_id,\n \"food_entry_name\": food_entry_name,\n \"serving_id\": serving_id,\n \"number_of_units\": num_units,\n }\n\n response = self.session.get(self.api_url, params)\n return self.valid_response(response)", "title": "" }, { "docid": "a46b885d595c7bea38ce23fc1d6daa4d", "score": "0.4910791", "text": "def calculateInventory(self):\n from collections import Counter\n orders = self.model[0]\n shipping = self.model[1]\n inventory = {key:dict() for key in shipping}\n citys = {o.dest for o in orders}\n months = {o.orderTime.month for o in orders}\n products = {o.pro for o in orders}\n #print shipping\n for pro in products:\n for city in citys:\n inventory[pro][city] = Counter()\n for o in orders:\n o.ship = self.chooseBestShipping(o,citys)\n\n inventory[o.pro][o.ship][o.orderTime.month] += o.num\n #self.view.showInventory(inventory)\n self.inventory = inventory\n self.citys = citys\n self.months = months\n self.products = products\n return inventory", "title": "" }, { "docid": "dcbca7ea38a938ec591a33b34ace0ede", "score": "0.49077475", "text": "def save(self, *args, **kwargs):\n if self.pk is None:\n item = self.item\n item.quantity += self.quantity\n item.save()\n return super(Entry, self).save(*args, **kwargs)", "title": "" }, { "docid": "cbf4880010aa81907064f104b9d60f03", "score": "0.49064472", "text": "def subtotal(self):\n sub = 0.0\n for i in self.items:\n sub += i.quantity * INVENTORY[i.name]\n return sub", "title": "" }, { "docid": "442dae3041bd85957abe5451209e318b", "score": "0.48951203", "text": "def _initilizeItemInventory(self):\n for itemName in itemPrice.keys():\n for j in range(5):\n self._itemInventory.add(itemName)", "title": "" }, { "docid": "2bf1960258a00848bdd160a6a903b421", "score": "0.48922408", "text": "def find_food(self):\n \n if self.check_knife(1):\n self.knife_dur -= 1\n self.food += 1", "title": "" }, { "docid": "4280fe8c4dcd8681a0e6d972b181946f", "score": "0.48690817", "text": "def createFoodDict(food_item_list):\n food_dict = {}\n for food_item in food_item_list:\n words = food_item.split(' ')\n first_word = words[0]\n food_dict[first_word] = food_item\n return food_dict", "title": "" }, { "docid": "31e19cfc7f9cc7ac082b4629f8df71cb", "score": "0.48465908", "text": "def scan_onto_cart(self):\n if hasattr(self, \"shelf\"):\n for item in Item.shipment:\n if len(self.items_stored) < self.max_capacity:\n for product in self.shelf.compartments:\n if item.number == product[\"Product Number\"]:\n self.items_stored.append(item)", "title": "" }, { "docid": "bb607bc5bfc19f80144df53d7174c4b2", "score": "0.4835511", "text": "def returnfrequentitemsets(itemSet, transactionList, minSupport, freqSet):\n\t\t_itemSet = set()\n\t\tlocalSet = defaultdict(int)\n\n\t\tfor item in itemSet:\n\t\t\tfor transaction in transactionList:\n\t\t\t\tif item.issubset(transaction):\n\t\t\t\t\tfreqSet[item] += 1\n\t\t\t\t\tlocalSet[item] += 1\n\n\t\tfor item, count in localSet.items():\n\t\t\t\tsupport = float(count)/len(transactionList)\n\n\t\t\t\tif support >= minSupport:\n\t\t\t\t\t\t_itemSet.add(item)\n\n\t\treturn _itemSet", "title": "" }, { "docid": "f64d95b9e2dbd365c3e3c875f63c4ec1", "score": "0.48311788", "text": "def add_item(self, item):\n self.items_with_price.update(item)", "title": "" }, { "docid": "b1dea7e98dd8eb0a30e38b7120376a69", "score": "0.4829422", "text": "def process_item(self, item, spider):\n # looks for a duplicate\n duplicate_check = self.db[self.collection_name].find({'name': item['name'], 'store': item['store'], 'category': item['category'], 'animal': item['animal']}).count()\n if duplicate_check == 0:\n self.db[self.collection_name].insert(dict(item))\n logging.debug('Product added to MongoDB')\n return item\n else:\n # logging.debug('Duplicate found. Skipping product')\n raise DropItem('Duplicate found. Skipping product: %s' % item['name'])\n # return item", "title": "" }, { "docid": "616c81c4094a7fd6dab55793794e5f42", "score": "0.48247102", "text": "def generate_food(self):\n num_food = ENV_N_FOOD\n\n pos = [(random() * self.W, random() * self.H) for i in range(num_food)]\n\n return [Food(x, y, self.W, self.H) for (x, y) in pos]", "title": "" }, { "docid": "0bdff644a55ea47546734621b9954d60", "score": "0.4823412", "text": "def budget(data,productsIds, quantity):\r\n \r\n data=data.fillna(0)\r\n budg=0\r\n products=productsIds\r\n quantity=quantity\r\n \r\n dataFilter = data[data['No. Item'].isin(products)]\r\n prices=dataFilter['Precio Unitario'].tolist()\r\n \r\n for i in range(len(quantity)):\r\n budg+=quantity[i]*prices[i]\r\n return budg", "title": "" }, { "docid": "b9b6f3b617d4f2917091b725f1b1b61a", "score": "0.48086756", "text": "def get_total(self):\n\n base_price = 5\n if self.species == \"Christmas melon\":\n base_price = base_price * 1.5\n\n total = (1 + self.tax) * self.qty * base_price\n \n # if qty is less than ten melons and international, then +$3 to international_order\n if self.qty < 10 and self.order_type == 'international':\n total = total + 3\n\n return total", "title": "" }, { "docid": "a15150d7171093c8a2769cd73df5a8ab", "score": "0.4804091", "text": "def fruits(fruit_list, fruit):\n\n updated_fruits_list = []\n for item in fruit_list:\n updated_fruits_list.append(item)\n updated_fruits_list.append(fruit)\n\n return updated_fruits_list", "title": "" }, { "docid": "17ff7340eee41c80ad913f6610332c1b", "score": "0.47999802", "text": "def add_to_cart(self, selection_id, selection_qty):\n # read the data from data storage\n for keys, values in ShoppingCart.__ecommerce_data.items():\n # check if the product contain necessary data before proceed\n if \"products\" and \"sku\" and \"prices\" and \"stock\" in values:\n # check if it is a correct sku code from the data storage\n if selection_id in values[\"sku\"]:\n # add product information to the instance variables\n self.__seller_id.append(keys)\n product_id = values[\"sku\"].index(selection_id)\n self.__seller.append(values[\"first_name\"] + \" \" + values[\"last_name\"])\n self.__sku.append(values[\"sku\"][product_id])\n self.__price.append(values[\"prices\"][product_id])\n self.__product_name.append(values[\"products\"][product_id])\n self.__pos.append(product_id)\n self.__check_out_qty.append(selection_qty)", "title": "" }, { "docid": "c1ac6afebd277ad77e422efa246f2ad9", "score": "0.47989768", "text": "def addItem(self, itemId, itemQty):\r\n self.items[itemId] += itemQty\r\n return self", "title": "" }, { "docid": "d10ce6669d3c4fce854bab14bf47b1b7", "score": "0.47970992", "text": "def add_item(self, item):\n LOG.info('Add Product')\n LOG.info(\n \"Add the item: %s , price: %s, quantity: %s\"\n , item.get_name(), item.get_price(), item.get_quantity())\n if not item.get_name() in self.store_stock:\n self.db_manager.insert_element(item)\n else:\n current_item_quantity = \\\n self.store_stock[item.get_name()].get_quantity() \\\n + item.get_quantity()\n self.db_manager.update_quantity_field(quantity=current_item_quantity,\n id=self.store_stock[item.get_name()].get_id())\n LOG.info(\"Complete to add a item successful\")", "title": "" }, { "docid": "1f5be3b43a6445bef2e20fac0d8943eb", "score": "0.4792217", "text": "def combine_the_anon_cart(self, request):\n cart = request.session.get('cart')\n if cart:\n for item in cart.items():\n product = Product.objects.get(id=item[0])\n if product.user == request.user:\n messages.error(request, 'You can\\'t buy your product')\n else:\n quantity = item[1]['quantity']\n self.add(product, quantity)\n del request.session['cart']", "title": "" }, { "docid": "5f77055a0765db17ee93d22d7bb315d6", "score": "0.47913775", "text": "def total_items_sold(self, given_date):\n # ensure given_date is a date\n assert isinstance(given_date, date), \"given_date must be datetime.date()\"\n\n # get quantity from order_lines for given_date\n quantities = [\n i[0]\n for i in cur.execute(\"\"\"\n SELECT quantity\n FROM orders\n INNER JOIN order_lines ON orders.id=order_lines.order_id\n WHERE created_at LIKE ?\n \"\"\", (given_date.isoformat()+'%',))\n ]\n\n # sum quantity\n return sum(quantities)", "title": "" }, { "docid": "2a07d888277c16437ea1ed144e02e719", "score": "0.47881973", "text": "def oh_quantity_by_location(self):\n location_list = self.transactions.all().values(\"location\").distinct().order_by(\"location\")\n my_dict = dict()\n for l in location_list:\n location = Location.objects.get(pk=l[\"location\"])\n my_dict[location] = self.get_oh_quantity(location)\n return my_dict", "title": "" }, { "docid": "b7a2e2d6e3ad23e24d00b97541db3187", "score": "0.47833753", "text": "def returnItemsWithMinSupport(itemSet, transactionList, minSupport, freqSet):\n _itemSet = set()\n localSet = defaultdict(int)\n\n for item in itemSet:\n for transaction in transactionList:\n if item.issubset(transaction):\n freqSet[item] += 1\n localSet[item] += 1\n\n # print 'localSet',localSet\n for item, count in localSet.items():\n # print item,count\n support = float(count)/len(transactionList)\n\n if support >= minSupport:\n _itemSet.add(item)\n\n return _itemSet", "title": "" }, { "docid": "8e7d505ef1815ef26d24544b025773ca", "score": "0.47816607", "text": "def main():\r\n group_of_list = []\r\n group = {}\r\n count = 0\r\n dict_input_food_and_price = input()\r\n dict_input_food_and_price = json.loads(dict_input_food_and_price)\r\n while True:\r\n yourname = input()\r\n if yourname == \"End\":\r\n break\r\n group[yourname] = 0\r\n for food, price in dict_input_food_and_price.items():\r\n while True:\r\n yourname = input()\r\n if yourname == \"End\":\r\n break\r\n group_of_list.append(yourname)\r\n count += 1\r\n total = price/count\r\n for food in group_of_list:\r\n group[food] = group.get(food)+total\r\n count = 0\r\n group_of_list = []\r\n print(group)", "title": "" }, { "docid": "57fee8f9f7f940bfc0f162909b7ee2b2", "score": "0.47804844", "text": "def reducer(L):\n\tdistribution = {}\n\tfor item in L:\n\t\tfor record in item:\n\t\t\tif record in distribution:\n\t\t\t\tdistribution[record] += item[record]\n\t\t\telse:\n\t\t\t\tdistribution[record] = item[record]\n\t\titem.clear()\n\treturn distribution", "title": "" }, { "docid": "7dbbe3574385be484fb2e72fb6016bb1", "score": "0.47785863", "text": "def add_item(self, new_item):\n self.items_with_price.update(new_item)", "title": "" }, { "docid": "301be53ed8845a4768b33a75ed332e78", "score": "0.4775439", "text": "def getFood( self, amount ) :\n\t\treturn self.getSupply( amount )", "title": "" }, { "docid": "6e98031becdd3b82c96afc040c16348c", "score": "0.47707295", "text": "def get_total(self, **kwargs):\n return self.get_price_per_item(**kwargs) * self.get_quantity(**kwargs)", "title": "" }, { "docid": "99bf9dc41926c19f7c1798b9a962799d", "score": "0.47650927", "text": "def cardlist_sum(cardlist1, cardlist2):\n cards = deepcopy(cardlist1)\n for name, amount in cardlist2.items():\n if name not in cards:\n cards[name] = 0\n cards[name] = cards[name] + amount\n return cards", "title": "" }, { "docid": "af3e40827b1596cd982bb10623718ab9", "score": "0.47612268", "text": "def food_add_favorite(self, food_id, serving_id=None, number_of_units=None):\n\n params = {\"method\": \"food.add_favorite\", \"format\": \"json\", \"food_id\": food_id}\n\n if serving_id and number_of_units:\n params[\"serving_id\"] = serving_id\n params[\"number_of_units\"] = number_of_units\n\n response = self.session.get(self.api_url, params=params)\n return self.valid_response(response)", "title": "" }, { "docid": "46d8b637023ae9e022ee3a1e96d883cd", "score": "0.47490332", "text": "def food_insert(self, name, calories, protein, fat, carbs, fiber):\n pass", "title": "" }, { "docid": "7d8c0504c732838f7986103ecf4d9fb0", "score": "0.47435322", "text": "def add(self, product, quantity=1, update_quantity=False):\n product_vencode = str(product.vencode)\n if product_vencode not in self.cart:\n self.cart[product_vencode] = {'quantity': 0,\n 'price': str(product.price)}\n if update_quantity:\n self.cart[product_vencode]['quantity'] = quantity\n else:\n self.cart[product_vencode]['quantity'] += quantity\n self.save()", "title": "" }, { "docid": "abd12d614dd01fa7d1dc8d540d13a970", "score": "0.4740142", "text": "def add(self, item):", "title": "" }, { "docid": "96d3c09b955b186c27327828c5c875b7", "score": "0.47401217", "text": "def test_unique_quantities(self):\n collection = Collection(['air__temperature', 'water__temperature'])\n\n quantities = collection.quantities()\n\n self.assertEqual(len(quantities), 1)\n self.assertTrue('temperature' in quantities)", "title": "" }, { "docid": "3f0904ee6421b4e1fba7c11b177be256", "score": "0.47290277", "text": "def get_sum(items=[]):\n\t\tresult = 0\n\t\tif items:\n\t\t\tresult = reduce(lambda x, y: x+y, items)\n\t\treturn result", "title": "" }, { "docid": "baa64214b750820c4bb4b85c71e8520d", "score": "0.47211656", "text": "def _amount_all(self):\n for order in self:\n amount_untaxed = 0.0\n for line in order.depence:\n amount_untaxed += line.montant\n\n order.update({\n\n 'Montant_total': amount_untaxed,\n })", "title": "" }, { "docid": "105d02d685d7278913bf8246ba61d913", "score": "0.4696378", "text": "def union(a, b):\n c = a.copy()\n for item, quantity in b.items():\n if item in c:\n c[item] += quantity\n else:\n c[item] = quantity\n return c", "title": "" }, { "docid": "43a8f10d46c49f4fc02930e22a6c1f53", "score": "0.46955162", "text": "def sum_list(input_list: typing.List[float]) -> float:\n return sum(input_list)", "title": "" }, { "docid": "6b6714d61699a90caccf507a1439ba0b", "score": "0.46904996", "text": "def add_food(self, food: FoodProp) -> None:\n if not self.can_place_food(food):\n raise Exception('Cannot place a food with that color on the colorful plate')\n\n self.foods.append(food)", "title": "" }, { "docid": "68e5cbf07cac2a1423f45e8e5ac6e692", "score": "0.46781105", "text": "def __len__(self):\n return sum(item[\"qty\"] for item in self.basket.values())", "title": "" }, { "docid": "0b38093e769550aa59eb195f38c23eb6", "score": "0.46772686", "text": "def add_item(self, product, price):\n if not product in self.items_in_cart:\n self.items_in_cart[product] = price\n print (product + \" added.\")\n else:\n print (product + \" is already in the cart.\")", "title": "" }, { "docid": "1690b8513ce9e630d2fbdaaf6b81ca8d", "score": "0.4674813", "text": "def sum_mixed_list(input_list: List[Union[int, float]]) -> float:\n return reduce(lambda x, y: x+y, input_list)", "title": "" }, { "docid": "8d97e4fa24429dc883f373bd0ca99355", "score": "0.46693996", "text": "def _product_association(self):\n # Find orders that bought the target item\n orders = self.data[self.data[self.item_col]==self.target][self.order_col]\n \n # Find all items in those orders\n items = self.data[self.data[self.order_col].isin(orders)]\n \n # Recommend items frequently bought with the target item \n self._find_recs(items.groupby(self.item_col).size())\n self._print_recs(\"popular items other customers bought\")", "title": "" }, { "docid": "e92b39dd58dbb549782499edade31dac", "score": "0.46683574", "text": "def autoPopulateInventory(self):\n if not self._permanent:\n self.clearInventory()\n\n # Randomly select the itemCount\n itemCount = int(getRandomItemFromList(self._numOfItemsCarried))\n\n if not itemCount: # 0 items carried\n return True\n\n for itemNum in range(1, itemCount + 1): # +1 due to exclusive ranges\n itemId = getRandomItemFromList(self._itemCatalog)\n if not itemId:\n continue\n if \"/\" not in itemId:\n continue\n\n dLog(\"creature.autoPopulateInventory: obj = \" + itemId)\n oType, oNum = itemId.split(\"/\")\n obj1 = ObjectFactory(oType, oNum)\n obj1.load()\n self.addToInventory(obj1)\n return True", "title": "" }, { "docid": "4025849a832f2176dc523dd3048e8ffc", "score": "0.46618393", "text": "def raw_ingredients_exact(item_set):\n remaining = item_set.copy()\n raw_ingredients = {}\n while any(remaining):\n item, quantity = list(remaining.items())[0]\n item_object = items[item]\n del remaining[item]\n if item_object.raw:\n raw_ingredients = union(raw_ingredients, {item: quantity})\n else:\n item_list = item_object.recipe.ingredients.items()\n for ingredient, ingredient_quantity in item_list:\n remaining = union(\n remaining,\n multiply(\n {ingredient: ingredient_quantity},\n quantity / item_object.recipe.quantity,\n ),\n )\n return raw_ingredients", "title": "" }, { "docid": "5a586b690c5f4ad80786ce24861767f9", "score": "0.46590447", "text": "def frequentOneItem(self):\n\n candidate = {}\n # global finalPatterns, minSup, Database\n # self.minSup = self.minSup\n for i in range(len(self.Database)):\n for j in range(len(self.Database[i])):\n if self.Database[i][j] not in candidate:\n candidate[self.Database[i][j]] = 1\n else:\n candidate[self.Database[i][j]] += 1\n self.finalPatterns = {keys: value for keys, value in candidate.items() if value >= self.minSup}", "title": "" }, { "docid": "cac81ccf764ad1c0b428a32da2a2677e", "score": "0.46445563", "text": "def add_item(request):\n\n if request.method == \"GET\":\n return HttpResponseRedirect(reverse(\"index\"))\n \n # Variable definition\n try:\n dish = Dish.objects.get(pk=(request.POST[\"dish\"]))\n except:\n return HttpResponseRedirect(reverse(\"index\"))\n\n extra = request.POST.getlist('extra')\n\n # No toppings Pizza validator\n if (dish.pk == 1 or dish.pk == 2 or dish.pk == 11 or dish.pk == 12) and len(extra) != 0:\n return HttpResponseRedirect(reverse(\"index\"))\n\n # 1 topping Pizza validator\n if (dish.pk == 3 or dish.pk == 4 or dish.pk == 13 or dish.pk == 14) and len(extra) != 1:\n return HttpResponseRedirect(reverse(\"index\"))\n\n # 2 toppings Pizza validator\n if (dish.pk == 5 or dish.pk == 6 or dish.pk == 15 or dish.pk == 16) and len(extra) != 2:\n return HttpResponseRedirect(reverse(\"index\"))\n\n # 3 toppings Pizza validator\n if (dish.pk == 7 or dish.pk == 8 or dish.pk == 17 or dish.pk == 18) and len(extra) != 3:\n return HttpResponseRedirect(reverse(\"index\"))\n\n # Special Pizza Pizza validator\n if (dish.pk == 9 or dish.pk == 10 or dish.pk == 19 or dish.pk == 20) and len(extra) != 5:\n return HttpResponseRedirect(reverse(\"index\"))\n \n # Item variables\n extras = []\n price = dish.price\n\n for item in extra:\n new_extra = Extra.objects.get(pk=(item))\n extras.append(new_extra)\n price += new_extra.price\n\n new_item = Item(dish=dish, price=price)\n new_item.save()\n for item in extras:\n new_item.extras.add(item)\n\n # Cart Handling\n try:\n cart = Cart.objects.get(user=request.user)\n except:\n cart = Cart(user=request.user)\n cart.save()\n cart.items.add(new_item)\n cart.save()\n\n return HttpResponseRedirect(reverse(\"index\"))", "title": "" }, { "docid": "a923dccac387cb688c107df06e72d941", "score": "0.4643559", "text": "def sum(items):\n if items is None or len(items) == 0:\n return None\n try:\n return sum(items)\n except:\n msg = \"Error while addition: %s\" %sys.exc_info()[0]\n sys.stderr.write(msg)\n raise", "title": "" }, { "docid": "e5e463dda3a19d276a1fe9f6ee356c72", "score": "0.46426684", "text": "def test_add_item(self):\n before = len(self.shopping_list_1.items)\n self.shopping_list_1.add_item(self.item_3)\n after = len(self.shopping_list_1.items)\n self.assertEqual(1, after - before,\n msg='The object should add an item to the instance')", "title": "" }, { "docid": "a0d6261296989b18c47c219f2b8c7f83", "score": "0.46423283", "text": "def get_total(self):\n\n # self.base_price = self.get_base_price()\n\n if self.species == \"Christmas\":\n self.base_price *= 1.5\n total = (1 + self.tax) * self.qty * self.base_price\n return total", "title": "" } ]
17ff9d02a0b166bd8a9c253d3fe795a0
Update details of an Experiment. Update details of an Experiment with specific Experiment Id.
[ { "docid": "10d33500639a40dacacd744c0f51f89c", "score": "0.64625514", "text": "async def update(\n self,\n subscription_id: str,\n resource_group_name: str,\n workspace_name: str,\n experiment_id: str,\n body: Optional[\"_models.ModifyExperiment\"] = None,\n **kwargs: Any\n ) -> \"_models.Experiment\":\n cls = kwargs.pop('cls', None) # type: ClsType[\"_models.Experiment\"]\n error_map = {\n 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError\n }\n error_map.update(kwargs.pop('error_map', {}))\n\n content_type = kwargs.pop('content_type', \"application/json\") # type: Optional[str]\n\n if body is not None:\n _json = self._serialize.body(body, 'ModifyExperiment')\n else:\n _json = None\n\n request = build_update_request(\n subscription_id=subscription_id,\n resource_group_name=resource_group_name,\n workspace_name=workspace_name,\n experiment_id=experiment_id,\n content_type=content_type,\n json=_json,\n template_url=self.update.metadata['url'],\n )\n request = _convert_request(request)\n request.url = self._client.format_url(request.url)\n\n pipeline_response = await self._client._pipeline.run( # pylint: disable=protected-access\n request,\n stream=False,\n **kwargs\n )\n response = pipeline_response.http_response\n\n if response.status_code not in [200]:\n map_error(status_code=response.status_code, response=response, error_map=error_map)\n error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)\n raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)\n\n deserialized = self._deserialize('Experiment', pipeline_response)\n\n if cls:\n return cls(pipeline_response, deserialized, {})\n\n return deserialized", "title": "" } ]
[ { "docid": "a90932de5b0dd8cc3450221c5e50b85a", "score": "0.7416664", "text": "def update_experiment(request, pk):\n\n required_data = request.data.get(\"requiredData\")\n tests_config = request.data.get(\"testsConfig\")\n expiration = request.data.get(\"expiration\")\n allow_multiple_answers = request.data.get(\"allowMultipleAnswers\")\n disabled = request.data.get(\"disabled\")\n name = request.data.get(\"name\")\n\n try:\n print('ENTERED')\n experiment = Experiment.objects.get(pk=pk)\n except Experiment.DoesNotExist:\n return Response('Experiment with given ID not found', status=HTTP_404_NOT_FOUND)\n\n if tests_config is None or len(tests_config) == 0:\n return Response('At least one test must be provided', status=HTTP_400_BAD_REQUEST)\n\n if expiration is None:\n return Response('Valid expiration date must be provided', status=HTTP_400_BAD_REQUEST)\n\n try:\n experiment.name = name\n experiment.requiredDataConfig = required_data\n experiment.testsConfig = tests_config\n experiment.expiration = expiration\n experiment.allowMultipleAnswers = allow_multiple_answers\n experiment.disabled = disabled\n experiment.save()\n except Exception as e:\n return Response(str(e), HTTP_400_BAD_REQUEST)\n\n serialized = ExperimentSerializer(experiment).data\n\n return Response(serialized, HTTP_200_OK)", "title": "" }, { "docid": "0aace5c2053a4192cc2f99b73768ad86", "score": "0.67339313", "text": "def update_experiment_metadata(\n writer_client, experiment_id, name=None, description=None\n):\n logger.info(\"Modifying experiment %r\", experiment_id)\n request = write_service_pb2.UpdateExperimentRequest()\n request.experiment.experiment_id = experiment_id\n if name is not None:\n logger.info(\"Setting exp %r name to %r\", experiment_id, name)\n request.experiment.name = name\n request.experiment_mask.name = True\n if description is not None:\n logger.info(\n \"Setting exp %r description to %r\", experiment_id, description\n )\n request.experiment.description = description\n request.experiment_mask.description = True\n try:\n grpc_util.call_with_retries(writer_client.UpdateExperiment, request)\n except grpc.RpcError as e:\n if e.code() == grpc.StatusCode.NOT_FOUND:\n raise ExperimentNotFoundError()\n if e.code() == grpc.StatusCode.PERMISSION_DENIED:\n raise PermissionDeniedError()\n if e.code() == grpc.StatusCode.INVALID_ARGUMENT:\n raise InvalidArgumentError(e.details())\n raise", "title": "" }, { "docid": "546f21b219d724d0964dc0327c6c25f2", "score": "0.66571933", "text": "def test_submit_edit_experiment(self):\n experiment = self.create_test_experiment(name=\"old_name\")\n experiment_id = experiment.id\n num_experiments = Experiment.objects.count()\n experiment = {\n \"name\": \"new_name\", \"notes\": \"hi\", \"uniformRandom\": True,\n \"csvUpload\": False,\n \"tracks\": [{\"id\": None, \"weighting\": None, \"name\": \"A\"}]\n }\n response = self.client.post(\n reverse(\"ab_testing_tool_submit_edit_experiment\", args=(experiment_id,)),\n follow=True, content_type=\"application/json\", data=json.dumps(experiment)\n )\n self.assertOkay(response)\n self.assertEquals(num_experiments, Experiment.objects.count())\n experiment = Experiment.objects.get(id=experiment_id)\n self.assertEquals(experiment.name, \"new_name\")", "title": "" }, { "docid": "3cfc1af097dea409e1e342c357e44dae", "score": "0.61671877", "text": "def refresh(self) -> None:\n self.update_from_remote_data(self._api_client.experiment_get(self.uuid))", "title": "" }, { "docid": "9e7efd2d39f68200aa5173115ccf47fd", "score": "0.6142266", "text": "def test_edit_experiment_view(self):\n experiment = self.create_test_experiment()\n response = self.client.get(reverse(\"ab_testing_tool_edit_experiment\", args=(experiment.id,)))\n self.assertTemplateUsed(response, \"ab_tool/edit_experiment.html\")", "title": "" }, { "docid": "6c08855f31c71f29263100fbc5d63505", "score": "0.59430104", "text": "def update_emp(db, id, hoursTrained, trainedStatus):\r\n trainingInfo = {\"Hours Trained\" : hoursTrained,\r\n \"Training Completed\" : trainedStatus}\r\n\r\n db.collection(\"Employees\").document(id).set(trainingInfo)", "title": "" }, { "docid": "58e74d8d26fab10630d7b37236175eee", "score": "0.5902595", "text": "def test_submit_edit_started_experiment_changes_name_and_notes(self):\n experiment = self.create_test_experiment(name=\"old_name\", notes=\"old_notes\",\n tracks_finalized=True)\n experiment_id = experiment.id\n num_experiments = Experiment.objects.count()\n old_track = self.create_test_track(experiment=experiment, name=\"old_name_track\")\n experiment_json = {\n \"name\": \"new_name\", \"notes\": \"new_notes\", \"tracks\": [{\"id\": old_track.id,\n \"name\": \"new_track_name\"}],\n }\n response = self.client.post(\n reverse(\"ab_testing_tool_submit_edit_experiment\", args=(experiment_id,)),\n follow=True, content_type=\"application/json\", data=json.dumps(experiment_json)\n )\n self.assertOkay(response)\n self.assertEquals(num_experiments, Experiment.objects.count())\n experiment = Experiment.objects.get(id=experiment_id)\n self.assertEquals(experiment.name, \"new_name\")\n self.assertEquals(experiment.notes, \"new_notes\")\n self.assertEquals(experiment.tracks.all()[0].name, \"new_track_name\")", "title": "" }, { "docid": "5b3b67f1d59d87115dd9fc81a90aab86", "score": "0.58990973", "text": "def update(self, collab_id: str, project_id: str, expt_id: str, **updates):\n return self._execute_operation(\n operation=\"put\",\n url=self._generate_single_url(\n collab_id=collab_id,\n project_id=project_id, \n expt_id=expt_id\n ),\n payload=updates\n )", "title": "" }, { "docid": "4272881909e59c29f6e7e94aae595276", "score": "0.5827913", "text": "def update(id_exp=None):\n\n current_app.logger.info(\"PUT /sync route with id: %s\" % id_exp)\n\n (gfg_id, exp_name, session_id), session, resp = parse_id_exp(id_exp)\n\n # Check JSON validity\n if utils.check_valid_json(request.get_data()):\n valid_json = json.loads(request.get_data())\n # session.datastring = valid_json\n else:\n resp = {\"status\": \"bad request\"}\n current_app.logger.error(\"Invalid JSON\")\n\n current_app.logger.info(\n \"Current trial: %s, unique_id: %s, experiment name: %s, session id: %s \" % (valid_json['currenttrial'],\n valid_json['uniqueid'], valid_json['experimentname'], valid_json['sessionid']))\n\n # For each trial, pass to appropriate parser, if not in db\n for json_trial in valid_json['data']:\n if exp_name == \"category_switch\":\n experiment_class = CategorySwitch\n elif exp_name == \"keep_track\":\n experiment_class = KeepTrack\n else:\n current_app.logger.error(\"%s does not exist\" % (exp_name))\n resp = {\"status\": \"bad request\"}\n\n db_trial, new = db_utils.get_or_create(db.session,\n experiment_class, gfg_id=gfg_id, session_id=session_id,\n trial_num=json_trial['current_trial'])\n\n # If the trial is new, add data\n if new:\n db_trial.add_json_data(json_trial)\n db.session.commit()\n\n # For each event, pass to parser, if not in db\n for json_event in valid_json['eventdata']:\n db_event, new = db_utils.get_or_create(db.session, EventData,\n gfg_id=gfg_id, session_id=session_id, exp_name=exp_name, \n timestamp = utils.convert_timestamp(json_event['timestamp']))\n\n if new:\n db_event.add_json_data(json_event)\n db.session.commit()\n\n # For the QuestionData, pass to parser, if not in db\n for json_ques in valid_json['questiondata']:\n db_ques, new = db_utils.get_or_create(db.session, QuestionData,\n gfg_id=gfg_id, session_id=session_id, exp_name=exp_name)\n\n if new:\n db_ques.add_json_data(json_ques)\n db.session.commit()\n\n if resp is None:\n resp = {\"status\": \"user data saved\"}\n return jsonify(**resp)", "title": "" }, { "docid": "6b05deea5431803e1eac4738e4cc6a36", "score": "0.5811839", "text": "def update(self, request, pk=None):\n selected_routine = request.query_params.get('routine', None) \n\n try: \n exercise = Exercise.objects.get(pk=pk)\n\n exercise.routine = Routine.objects.get(pk=selected_routine)\n exercise.description = request.data['description']\n exercise.save()\n\n return Response({}, status=status.HTTP_204_NO_CONTENT)\n \n except ValidationError as ex:\n return Response({\"reason\": ex.message}, status=status.HTTP_400_BAD_REQUEST)", "title": "" }, { "docid": "e1ea28e67ff7a830d61e1851c7da9f33", "score": "0.56713605", "text": "async def edit_training_examples(\n intent: str,\n id: str,\n request_data: TextData,\n current_user: User = Depends(auth.get_current_user),\n):\n mongo_processor.edit_training_example(\n id, request_data.data, intent.lower(), current_user.get_bot(), current_user.get_user()\n )\n return {\"message\": \"Training Example updated!\"}", "title": "" }, { "docid": "b610548abb62bf8777e59ea744a47f65", "score": "0.5661236", "text": "def put(self, exploration_id):\n exploration = exp_services.get_exploration_by_id(exploration_id)\n version = self.payload.get('version')\n _require_valid_version(version, exploration.version)\n\n commit_message = self.payload.get('commit_message')\n change_list = self.payload.get('change_list')\n\n try:\n exp_services.update_exploration(\n self.user_id, exploration_id, change_list, commit_message)\n except utils.ValidationError as e:\n raise self.InvalidInputException(e)\n\n self.values.update(self._get_exploration_data(exploration_id))\n self.render_json(self.values)", "title": "" }, { "docid": "4f444f6fc62b19e72dd10f8ea281178b", "score": "0.5650311", "text": "def update(self, entity_id, details, delete_existing):", "title": "" }, { "docid": "829b7025e64765315044ed5f1182fe55", "score": "0.5622866", "text": "def causx_edit_an_entity(id):\n project = get_project()\n updated_entry = request.get_json()[\"updated_entry\"]\n entity = causx_set_variable(project, id, updated_entry)\n response=dict(entity=entity)\n calc_params = get_calc_params(project, data_required=False)\n if calc_params:\n response[\"layers\"] = get_qnodes_layer(calc_params)\n return response, 200", "title": "" }, { "docid": "d3a677e8ac2b6e770914054e27e7250c", "score": "0.56208825", "text": "def test_submit_edit_started_experiment_changes_existing_tracks(self):\n experiment = self.create_test_experiment(name=\"old_name\", tracks_finalized=False,\n assignment_method=Experiment.WEIGHTED_PROBABILITY_RANDOM)\n track1 = self.create_test_track(experiment=experiment, name=\"A\")\n track2 = self.create_test_track(experiment=experiment, name=\"B\")\n self.create_test_track_weight(experiment=experiment, track=track1)\n self.create_test_track_weight(experiment=experiment, track=track2)\n track_count = experiment.tracks.count()\n experiment_json = {\n \"name\": \"new_name\", \"notes\": \"hi\", \"uniformRandom\": False,\n \"csvUpload\": False,\n \"tracks\": [{\"id\": track1.id, \"weighting\": 30, \"name\": \"C\"},\n {\"id\": track2.id, \"weighting\": 70, \"name\": \"D\"}]\n }\n response = self.client.post(\n reverse(\"ab_testing_tool_submit_edit_experiment\", args=(experiment.id,)),\n follow=True, content_type=\"application/json\", data=json.dumps(experiment_json)\n )\n self.assertOkay(response)\n experiment = Experiment.objects.get(id=experiment.id)\n self.assertEquals(experiment.assignment_method, Experiment.WEIGHTED_PROBABILITY_RANDOM)\n self.assertEquals(experiment.tracks.count(), track_count)\n track1 = experiment.tracks.get(id=track1.id)\n track2 = experiment.tracks.get(id=track2.id)\n self.assertEquals(track1.name, \"C\") #Checks name has changed\n self.assertEquals(track2.name, \"D\")\n self.assertEquals(track1.weight.weighting, 30) #Checks weighting has changed\n self.assertEquals(track2.weight.weighting, 70)", "title": "" }, { "docid": "02d99f571b3fa51ed6afda42fff2838c", "score": "0.56005096", "text": "def put(self, id):\n data = request.get_json()\n return update_a_reporter(id=id, data=data)", "title": "" }, { "docid": "3eca505c37f3bd0881bd1ed010599a0a", "score": "0.559187", "text": "def test_edit_experiment_view_started_experiment(self):\n experiment = self.create_test_experiment()\n experiment.tracks_finalized = True\n experiment.save()\n response = self.client.get(reverse(\"ab_testing_tool_edit_experiment\", args=(experiment.id,)))\n self.assertTemplateUsed(response, \"ab_tool/edit_experiment.html\")", "title": "" }, { "docid": "e11f93fd1499f93fcb17b45a21891a2d", "score": "0.55208725", "text": "def test_submit_edit_started_experiment_does_not_change_tracks(self):\n experiment = self.create_test_experiment(name=\"old_name\", tracks_finalized=True,\n assignment_method=Experiment.WEIGHTED_PROBABILITY_RANDOM)\n experiment_id = experiment.id\n num_experiments = Experiment.objects.count()\n no_tracks = experiment.tracks.count()\n experiment = {\n \"name\": \"new_name\", \"notes\": \"hi\", \"uniformRandom\": True,\n \"csvUpload\": False,\n \"tracks\": [{\"id\": None, \"weighting\": None, \"name\": \"A\"},\n {\"id\": None, \"weighting\": None, \"name\": \"B\"},\n {\"id\": None, \"weighting\": None, \"name\": \"C\"}]\n }\n response = self.client.post(\n reverse(\"ab_testing_tool_submit_edit_experiment\", args=(experiment_id,)),\n follow=True, content_type=\"application/json\", data=json.dumps(experiment)\n )\n self.assertOkay(response)\n self.assertEquals(num_experiments, Experiment.objects.count())\n experiment = Experiment.objects.get(id=experiment_id)\n self.assertEquals(experiment.assignment_method, Experiment.WEIGHTED_PROBABILITY_RANDOM)\n self.assertEquals(experiment.tracks.count(), no_tracks)", "title": "" }, { "docid": "2db940d31dbabec99f670e73eaba4b31", "score": "0.5507888", "text": "def test_submit_edit_experiment_nonexistent(self):\n experiment_id = NONEXISTENT_EXPERIMENT_ID\n experiment = {\"name\": \"new_name\", \"notes\": \"\"}\n response = self.client.post(\n reverse(\"ab_testing_tool_submit_edit_experiment\", args=(experiment_id,)),\n content_type=\"application/json\", data=json.dumps(experiment)\n )\n self.assertEquals(response.status_code, 404)", "title": "" }, { "docid": "214e821c70df6edb4046bfd747a7248d", "score": "0.5484202", "text": "def edit(project):\n if g.user:\n session['exp'] = project\n abstract = s.get_experiment_data(g.user, project, 'abstract')\n gl = s.get_experiment_data(g.user, project, 'gl')\n ec = s.get_experiment_data(g.user, project, 'ec')\n fn = s.get_experiment_data(g.user, project, 'fn')\n return render_template('edit.html', project=project, abstract=abstract, gl=gl, ec=ec, fn=fn)\n return redirect(url_for('login'))", "title": "" }, { "docid": "f70ff7a5bde05f7ffc13d2ef2cf7f6ab", "score": "0.5455811", "text": "def update(self, entity_id, details, delete_existing=False):\n self._validate_entity_id(entity_id)\n\n if not isinstance(details, dict):\n msg = (_(\"Provided details %s is not valid dict.\")\n % details)\n raise ValueError(msg)\n\n return self._storage.update(\n entity_id, details, delete_existing)", "title": "" }, { "docid": "95f73067644b0b8d431330b8b573696d", "score": "0.54527766", "text": "def update(self, event_id, event_time=None, **kwargs):\n pass", "title": "" }, { "docid": "95f73067644b0b8d431330b8b573696d", "score": "0.54527766", "text": "def update(self, event_id, event_time=None, **kwargs):\n pass", "title": "" }, { "docid": "95f73067644b0b8d431330b8b573696d", "score": "0.54527766", "text": "def update(self, event_id, event_time=None, **kwargs):\n pass", "title": "" }, { "docid": "5ba50b0f354cac4d2eb92581c8b79298", "score": "0.5451184", "text": "def update(self, id, data):\n return self.make_request('update', '/' + id, data)", "title": "" }, { "docid": "53e08e1e5e63528ecaa3e670768710da", "score": "0.54415154", "text": "def put(user_id,self):\n form = ResearchInfoUpdateForm(request.form)\n if form.validate():\n \n research_info = ResearchInformation.query.filter((ResearchInformation.id == form.research_id.data) & (ResearchInformation.user_id == user_id))\n if research_info is None:\n return make_response(jsonify({'error':'Please give an appropriate research ID'}),400)\n update_dict = {}\n for key, value in form.data.items():\n if value and key != 'research_id':\n update_dict[key] = value\n try:\n research_info.update(update_dict)\n db.session.commit()\n except:\n return make_response(jsonify({'error' : 'Database Connection Problem'}),500)\n return make_response(jsonify({'msg' : ' Successfully changed'}),201)\n\n else:\n return make_response(jsonify({'error':'Wrong input format'}),400)", "title": "" }, { "docid": "83b67ad4ce2e3e31eb6207122f1a66f3", "score": "0.5403792", "text": "def put(self):\n return employee_service.edit_employee(get_jwt_identity(), get_uuid(request), **api.payload), 201", "title": "" }, { "docid": "d2ac49e54bcfac440fb624d369239316", "score": "0.53886133", "text": "def _add_experiment(self, experiment_key, variation_id):\n\n experiment_id = self.config.get_experiment_id(experiment_key)\n self.params[EXPERIMENT_PARAM_FORMAT.format(experiment_prefix=Params.EXPERIMENT_PREFIX,\n experiment_id=experiment_id)] = variation_id", "title": "" }, { "docid": "0fd8bb0e660cd252ad87d2e7d500422a", "score": "0.53784376", "text": "def put(self, id):\n\t\ttoken_parser.parse_args()\n\t\trecord = Record.query.get(id)\n\t\tif not record:\n\t\t\treturn error_response('RECORD NOT FOUND', 404) \n\t\t# PERMISSION_DENIED because don't want user to know this record id exists\n\t\tif record.runner.id!=int(get_jwt_identity()) and not min_access_level(self, ROLES['admin']):\n\t\t\treturn error_response('RECORD NOT FOUND', 404) \n\n\t\tdata = request.get_json() or {}\n\t\ttry:\n\t\t\trecord.update(data)\n\t\texcept ValueError as error:\n\t\t\treturn error_response('RECORD NOT FOUND', 404)\n\t\texcept Exception as error:\n\t\t\treturn error_response('PERMISSION DENIED', 403)\n\t\tdb.session.commit()\n\t\treturn make_response(jsonify(record.to_dict()), 200)", "title": "" }, { "docid": "7a8c7feb5e4f073a780db8763a8c3a47", "score": "0.53570676", "text": "def test_edit_report(self):\n report = self.create_report()\n pk = report[0].pk\n my_url = report_url(pk)\n response = self.client.put(\n my_url,\n self.updated_report,\n **self.headers,\n format='json'\n )\n self.assertEqual(response.status_code, status.HTTP_200_OK)", "title": "" }, { "docid": "3c6b8e8628b2258d34781373f3756603", "score": "0.5355502", "text": "def put(self, id):\n # Get the json object from the request body\n data = request.get_json()\n # Check all fields for availability and get the new data if the field is available\n if 'email' not in data:\n email = None\n else:\n email = data[\"email\"]\n if 'first_name' not in data:\n first_name = None\n else:\n first_name = data[\"first_name\"]\n if 'last_name' not in data:\n last_name = None\n else:\n last_name = data[\"last_name\"]\n if 'availability' not in data:\n availability = None\n else:\n availability = data[\"availability\"]\n\n # Update all requested fields of the requested interviewer, if there is an interviewer with the id, else return\n # an error\n interviewer = interviewer_ctrl.update_interviewer(id, email, first_name, last_name, availability)\n if not interviewer:\n response = \"Interviewer with id %s does not exist or email %s is already taken by another interviewer\" % (\n id, email), 400\n else:\n response = interviewer, 200\n return response", "title": "" }, { "docid": "15c1f7722e5e0c8697db10072db3f404", "score": "0.53356797", "text": "def put(self, id):\n data = request.json\n args = request.args\n return update_unit(id, data, args)", "title": "" }, { "docid": "f5a0bd39cf438b04b13e0fd2dad2b75f", "score": "0.5282175", "text": "def update_project(id=None, name=None, description=None):\n pass", "title": "" }, { "docid": "7da3e669c6d3439e143564ac0ff1622b", "score": "0.5282065", "text": "def test_edit_experiment_view_last_modified_updated(self):\n experiment = self.create_test_experiment()\n experiment.name += \" (updated)\"\n response = self.client.post(reverse(\"ab_testing_tool_submit_edit_experiment\",\n args=(experiment.id,)),\n content_type=\"application/json\",\n data=experiment.to_json())\n self.assertEquals(response.content, \"success\")\n updated_experiment = Experiment.objects.get(id=experiment.id)\n self.assertLess(experiment.updated_on, updated_experiment.updated_on,\n response)", "title": "" }, { "docid": "a63400c45bfcc9bce89953c8ae6b3701", "score": "0.5281295", "text": "def put(self, id):\n data = request.get_json()\n if 'email' not in data:\n email = None\n else:\n email = data[\"email\"]\n if 'first_name' not in data:\n first_name = None\n else:\n first_name = data[\"first_name\"]\n if 'last_name' not in data:\n last_name = None\n else:\n last_name = data[\"last_name\"]\n if 'availability' not in data:\n availability = None\n else:\n availability = data[\"availability\"]\n interviewee = interviewee_ctrl.update_interviewee(id, email, first_name, last_name, availability)\n if not interviewee:\n response = \"Interviewee with id %s does not exist or email %s is already taken by another interviewee\" % (\n id, email), 400\n else:\n response = interviewee, 200\n return response", "title": "" }, { "docid": "f85bb5f0f4fbddd046d215c53c2b171b", "score": "0.52504665", "text": "def put(self,request,employee_id):\n try:\n get_data = Employee.objects.get(pk=employee_id)\n employee_data = EmployeeSerializer(get_data,data=request.data)\n if not(employee_data.is_valid()):\n return ApiResponse().error(\"Error while update employee details\",400)\n employee_data.save()\n return ApiResponse().success(employee_data.data,200)\n except Exception as err:\n print(err)\n return ApiResponse().success(\"Error\",500)", "title": "" }, { "docid": "ecba54809858f5347c6dc258ddbd143b", "score": "0.5228171", "text": "def update_testimonial():\n data = request.get_json()\n description = data.get('description')\n title = data.get('title')\n image = data.get('image')\n rank = data.get('rank')\n testimonial_id = data.get('id')\n\n if (not title) or (not description) or (not rank) or (not testimonial_id):\n return jsonify(result={'failed_msg': \"Unanle to save testimonial with missing fields\"})\n\n return jsonify(result=save_testimonial(title, description, rank, image, _id=testimonial_id))", "title": "" }, { "docid": "997ffa7f45afa19ce3c55b4b84f758f6", "score": "0.5227724", "text": "async def update(\n self,\n id: int,\n data: EnvironmentUpdateSchema\n ) -> Record:\n\n async with self.database.transaction():\n query = (\n environments_table.update()\n .where(environments_table.c.id == id)\n .values(\n name=data.name,\n description=data.description,\n updated_at=datetime.now()\n )\n .returning(\n environments_table.c.id,\n environments_table.c.name,\n environments_table.c.code,\n environments_table.c.description,\n environments_table.c.created_at,\n environments_table.c.updated_at,\n environments_table.c.deleted_at,\n environments_table.c.is_deleted\n )\n )\n\n return await self.database.fetch_one(query)", "title": "" }, { "docid": "1b7edbfabd11852be3e16d3e6569fbe4", "score": "0.52266294", "text": "def update(self, model, id, update_with):\n raise NotImplementedError('You have not implemented an update func in your client class')", "title": "" }, { "docid": "15f18479a5de36268b4534b705d906ee", "score": "0.5196483", "text": "def test_put_experiment_valid_permission(self):\n url = '/' + version + '/collection/unittestcol/experiment/unittestexp/'\n data = {'description': 'A new experiment for unit tests. Updated'}\n\n response = self.client.put(url, data=data)\n self.assertEqual(response.status_code, 200)", "title": "" }, { "docid": "cf3a3156e9bbff73bb66d428cba3cc4a", "score": "0.5194518", "text": "def test_submit_edit_experiment_changes_assignment_method_to_weighted(self):\n experiment = self.create_test_experiment(name=\"old_name\")\n experiment_id = experiment.id\n num_experiments = Experiment.objects.count()\n no_track_weights = experiment.track_probabilites.count()\n experiment = {\n \"name\": \"new_name\", \"notes\": \"hi\", \"uniformRandom\": False,\n \"csvUpload\": False,\n \"tracks\": [{\"id\": None, \"weighting\": 20, \"name\": \"A\"},\n {\"id\": None, \"weighting\": 80, \"name\": \"B\"}]\n }\n response = self.client.post(\n reverse(\"ab_testing_tool_submit_edit_experiment\", args=(experiment_id,)),\n follow=True, content_type=\"application/json\", data=json.dumps(experiment)\n )\n self.assertOkay(response)\n self.assertEquals(num_experiments, Experiment.objects.count())\n experiment = Experiment.objects.get(id=experiment_id)\n self.assertEquals(experiment.assignment_method, Experiment.WEIGHTED_PROBABILITY_RANDOM)\n self.assertEquals(experiment.track_probabilites.count(), no_track_weights + 2)", "title": "" }, { "docid": "3ca66dab826730162433142988260ed8", "score": "0.5188264", "text": "def test_submit_edit_experiment_changes_assignment_method_to_uniform(self):\n experiment = self.create_test_experiment(\n name=\"old_name\", assignment_method=Experiment.WEIGHTED_PROBABILITY_RANDOM)\n experiment_id = experiment.id\n num_experiments = Experiment.objects.count()\n no_tracks = experiment.tracks.count()\n experiment = {\n \"name\": \"new_name\", \"notes\": \"hi\", \"uniformRandom\": True,\n \"csvUpload\": False,\n \"tracks\": [{\"id\": None, \"weighting\": None, \"name\": \"A\"},\n {\"id\": None, \"weighting\": None, \"name\": \"B\"},\n {\"id\": None, \"weighting\": None, \"name\": \"C\"}]\n }\n response = self.client.post(\n reverse(\"ab_testing_tool_submit_edit_experiment\", args=(experiment_id,)),\n follow=True, content_type=\"application/json\", data=json.dumps(experiment)\n )\n self.assertOkay(response)\n self.assertEquals(num_experiments, Experiment.objects.count())\n experiment = Experiment.objects.get(id=experiment_id)\n self.assertEquals(experiment.assignment_method, Experiment.UNIFORM_RANDOM)\n self.assertEquals(experiment.tracks.count(), no_tracks + 3)", "title": "" }, { "docid": "7565e22ff55c1a4dbfaef25b7830ba89", "score": "0.5184008", "text": "def put(self, id):\n report = self.mydb.get_by_id(id)\n \n if report:\n data = request.get_json()\n report.createdBy = data['username']\n report.type = data['flag']\n report.location = data['location']\n report.status = data['status']\n report.image = data['image']\n report.video = data['video']\n report.comment = ['comment']\n return make_response(jsonify({\n \"message\":\"Report edited successfully\",\n \"data\": report.serialize()\n }), 201)\n else:\n return make_response(jsonify({\n \"message\": \"ID invalid, no report found\"\n }),400)", "title": "" }, { "docid": "160cb1102a31e22433b0a95d1b19c287", "score": "0.5183066", "text": "def upload_experiment(self, experiment: Experiment) -> None:\n data = {\n 'device_name': experiment.backend_name,\n 'type': experiment.type,\n 'extra': experiment.extra,\n }\n if experiment.start_datetime:\n data['start_time'] = local_to_utc_str(experiment.start_datetime)\n if experiment.tags:\n data['tags'] = experiment.tags\n if experiment.uuid:\n data['uuid'] = experiment.uuid\n response_data = self._api_client.experiment_upload(data)\n experiment.update_from_remote_data(response_data)", "title": "" }, { "docid": "a0650d7cb60a6083e8c9359cba962c35", "score": "0.517982", "text": "def update(self, typ, id, **kwargs):\n return self._load(self._request(self._url(typ, id), 'PUT', kwargs))", "title": "" }, { "docid": "8b5e1688a20dad535a152c61484ada45", "score": "0.517151", "text": "def update(self, step_name, worker_id):\n record = ReportClient().get_record(step_name, worker_id)\n logging.debug(\"Get Record=%s\", str(record))\n self.search_alg.update(record.serialize())\n ParameterSharing().remove()\n logging.info(f\"Update Success. step_name={step_name}, worker_id={worker_id}\")\n logging.info(\"Best values: %s\", ReportServer().print_best(step_name=General.step_name))", "title": "" }, { "docid": "4cc0ccaf21cc95495b47366271ce2579", "score": "0.51659554", "text": "def post_experiment(self):\n if not self.has_prepared_out:\n self.prepare_experiment()\n\n for exp, prod, desc in self.experiment.overview:\n print exp, prod, desc\n dt = json.dumps(\\\n {'name': exp,\n 'desc': desc})\n resp = requests.post(\n self.server_url + 'experiments/',\n data=dt,\n headers={'Content-type':'application/json'})\n print resp", "title": "" }, { "docid": "3a1a25ea3994e5d6663400ef4dc22a54", "score": "0.5152818", "text": "def edit_quiz(request_ctx, course_id, id, quiz_notify_of_update, **request_kwargs):\n\n path = '/v1/courses/{course_id}/quizzes/{id}'\n payload = {\n 'quiz[notify_of_update]' : quiz_notify_of_update,\n }\n url = request_ctx.base_api_url + path.format(course_id=course_id, id=id)\n response = client.put(request_ctx, url, payload=payload, **request_kwargs)\n\n return response", "title": "" }, { "docid": "76aca4be1ce01e6f0e21a2963567cc01", "score": "0.5147998", "text": "def test_edit_experiment_view_nonexistent(self):\n e_id = NONEXISTENT_EXPERIMENT_ID\n response = self.client.get(reverse(\"ab_testing_tool_edit_experiment\", args=(e_id,)))\n self.assertTemplateNotUsed(response, \"ab_tool/edit_experiment.html\")\n self.assertEquals(response.status_code, 404)", "title": "" }, { "docid": "ddb24b14a3442c8472d5c6d09956b062", "score": "0.51298386", "text": "def update_project(self, id: str, name: str = None, description: str = None) -> Dict:\n pass", "title": "" }, { "docid": "9609d770f7c1fe4197f40d9c80ed4b91", "score": "0.5129037", "text": "def update_environment(context: click.Context, application_id: str, environment_id: str, name: str, url: str) -> None:\n api = context.obj\n application = api.application_by_id(application_id)\n environment = api.environment_by_id(application, environment_id)\n updated_environment = api.update_environment(application, environment, name, url)\n table = [[\"ID\", \"Name\", \"URL\"], [updated_environment.id, updated_environment.name, updated_environment.url]]\n click.echo(tabulate.tabulate(table, headers=\"firstrow\"))", "title": "" }, { "docid": "da739f61d8dd8506648f0f235b47402d", "score": "0.51289636", "text": "def edit_experience(request, id):\r\n experience = Experience.objects.get(pk=id)\r\n context = {\r\n 'experience': experience\r\n }\r\n if request.method == 'POST':\r\n experience.user_id = request.POST['user_id' or None]\r\n experience.company_name = request.POST['company_name' or None]\r\n experience.job_title = request.POST['job_title' or None]\r\n experience.job_description = request.POST['job_description' or None]\r\n experience.year_from = request.POST['year_from' or None]\r\n experience.year_to = request.POST['year_to' or None]\r\n\r\n experience.save()\r\n messages.success(request, 'Experience Updated Successfully')\r\n return redirect('dashboard', request.user.pk)\r\n else:\r\n return render(request, 'accounts/edit_experience.html', context)", "title": "" }, { "docid": "1d62bd10f0ddf13b60680d431d1214af", "score": "0.5126918", "text": "def update_teacher(email=None, zipcode=None, bio=None, \n mobile_number=None, days_of_week=None,teaching_experience_in_hours=None,\n pay_rate_per_hour=None, pod_id=None, img_url=None, covid_risk_profile_id=None):\n\n teacher = db.session.query(Teacher).filter(Teacher.email==email).one()\n teacher.zipcode=zipcode \n teacher.bio=bio \n teacher.mobile_number=mobile_number\n teacher.days_of_week=days_of_week\n teacher.teaching_experience_in_hours=teaching_experience_in_hours\n teacher.pay_rate_per_hour=pay_rate_per_hour\n teacher.pod_id=pod_id\n teacher.img_url=img_url\n teacher.covid_risk_profile_id=covid_risk_profile_id\n\n db.session.commit()\n\n return teacher", "title": "" }, { "docid": "608457875b3bc5272855818c41796e9c", "score": "0.5124847", "text": "def edit(obj, index, description, duration, code, task_id, project_id):\n with ots_filestore(obj) as timesheet_storage:\n timesheet_edited = timesheet_storage.edit_timesheet(\n index,\n description=description,\n duration=duration,\n task_code=code,\n task_id=task_id,\n project_id=project_id,\n )\n if timesheet_edited:\n click.echo('Timesheet updated.')\n else:\n click.echo('Nothing to update.')", "title": "" }, { "docid": "3cbff9690ae2f28c9373856792e9b190", "score": "0.50883085", "text": "def update(self, data, id):\n with db.session.begin_nested():\n self.query.filter_by(id=id).update(data)", "title": "" }, { "docid": "f165c0fcebdc404642a446e6277bbe24", "score": "0.5067855", "text": "def test_submit_edit_experiment_wrong_course(self):\n experiment = self.create_test_experiment(name=\"old_name\",\n course_id=TEST_OTHER_COURSE_ID)\n data = {\"name\": \"new_name\", \"notes\": \"\"}\n response = self.client.post(\n reverse(\"ab_testing_tool_submit_edit_experiment\", args=(experiment.id,)),\n content_type=\"application/json\", data=json.dumps(data)\n )\n self.assertError(response, UNAUTHORIZED_ACCESS)", "title": "" }, { "docid": "a61125735c80419b89b3c2ef8fdb6cc7", "score": "0.50544035", "text": "def update_suite(suiteId):\n\t res = requests.post('https://api.ghostinspector.com/v1/suites/'+suiteId+'/?apiKey='+apiKey)\n\t return res", "title": "" }, { "docid": "67143faae1b601cdc1166defe338b29d", "score": "0.505348", "text": "def step_impl(context, post_id):\n context.payload = constants.POST_INFORMATION_TO_UPDATE\n context.response = request.put(context.url + \"/%s\" % post_id, context.payload)", "title": "" }, { "docid": "c923e8f9fc820942ebc8185747323eb5", "score": "0.5051238", "text": "def get_experiment(experiment_id):\n req = requests.get(\"{}experiments/{}/?format=json\".format(__base_url__, experiment_id))\n metadata = req.json()\n if metadata['status'] != 'error':\n save_metadata(metadata)\n else:\n print 'Error fetching metadata for {}'.format(experiment_id)", "title": "" }, { "docid": "0c7f7bf6fb01612d985fa2b143d8134b", "score": "0.5051149", "text": "def wxt_update_person(person_id, data, myheaders):\n update_person_url = 'https://api.ciscospark.com/v1/people/{0}'\n response = requests.put(update_person_url.format(person_id),\n data=json.dumps(data),\n headers=myheaders)\n return response", "title": "" }, { "docid": "4a558e28f4fcb1556894539bd864dab7", "score": "0.5049372", "text": "def experiment_detail(request, exp_name):\n exp = get_object_or_404(Experiment, name=exp_name)\n reports = ExperimentReport.objects.filter(experiment=exp)\n\n return render_to_response(\n \"splango/experiment_detail.html\",\n {\"title\": exp.name, \"exp\": exp, \"reports\": reports},\n RequestContext(request))", "title": "" }, { "docid": "0758bd2dc137efe10c77c05015e4b113", "score": "0.50357556", "text": "def update_movie_details(db: Session, sl_id: int, details: schema.UpdateMovie):\n db.query(model.Movies).filter(model.Movies.id == sl_id).update(vars(details))\n db.commit()\n return db.query(model.Movies).filter(model.Movies.id == sl_id).first()", "title": "" }, { "docid": "e912a37963fc2345ab045aa5222960ba", "score": "0.5034493", "text": "def update_by_id(self, id=None, **kwargs):\n if not id:\n raise PardotAPIArgumentError('id is required to update a prospect.')\n response = self._post(path='/do/update/id/{id}'.format(id=id), params=kwargs)\n return response", "title": "" }, { "docid": "bec9d8e407070c9a77a984f35bc91736", "score": "0.5032699", "text": "def update_test_run(self, run_update_model, project, run_id):\n route_values = {}\n if project is not None:\n route_values['project'] = self._serialize.url('project', project, 'str')\n if run_id is not None:\n route_values['runId'] = self._serialize.url('run_id', run_id, 'int')\n content = self._serialize.body(run_update_model, 'RunUpdateModel')\n response = self._send(http_method='PATCH',\n location_id='cadb3810-d47d-4a3c-a234-fe5f3be50138',\n version='5.0',\n route_values=route_values,\n content=content)\n return self._deserialize('TestRun', response)", "title": "" }, { "docid": "41e64bd368f6696cc8782c9dac2067f6", "score": "0.50115055", "text": "def update(self, request, pk=None):\n player = Player.objects.get(user=request.auth.user)\n suspect = Suspect.objects.get(pk=pk)\n suspect.player = player\n suspect.name = request.data[\"name\"]\n suspect.description = request.data[\"description\"]\n guilty = Guilty.objects.get(pk=request.data[\"guiltyId\"])\n suspect.guilty = guilty\n movie = Movie.objects.get(pk=request.data[\"movieId\"])\n suspect.suspect_image_url = request.data[\"suspectImageUrl\"]\n suspect.movie = movie\n \n suspect.save()\n\n\n return Response({}, status=status.HTTP_204_NO_CONTENT)", "title": "" }, { "docid": "489dce730c6e0515e0c8902d35851c78", "score": "0.49720645", "text": "def update_test(testId):\n\t res = requests.post('https://api.ghostinspector.com/v1/tests/'+testId+'/?apiKey='+apiKey)\n\t return res", "title": "" }, { "docid": "6b1c0e0826bdbdb476676fb3b9967ce7", "score": "0.49566084", "text": "def test_put_experiment_no_permissions(self):\n url = '/' + version + '/collection/col1/experiment/exp1/'\n data = {'description': 'A new experiment for unit tests. Updated'}\n\n response = self.client.put(url, data=data)\n self.assertEqual(response.status_code, 403)", "title": "" }, { "docid": "60ee48539f966381bc6e042c165a54a8", "score": "0.49527475", "text": "def put(self, id):\n return DAO.update(id, api.payload)", "title": "" }, { "docid": "3c8a67ef451a5b9d3cfbff45c9f08289", "score": "0.49498895", "text": "def delete_experiment(writer_client, experiment_id):\n logger.info(\"Deleting experiment %r\", experiment_id)\n request = write_service_pb2.DeleteExperimentRequest()\n request.experiment_id = experiment_id\n try:\n grpc_util.call_with_retries(writer_client.DeleteExperiment, request)\n except grpc.RpcError as e:\n if e.code() == grpc.StatusCode.NOT_FOUND:\n raise ExperimentNotFoundError()\n if e.code() == grpc.StatusCode.PERMISSION_DENIED:\n raise PermissionDeniedError()\n raise", "title": "" }, { "docid": "60ebeb2bb4b736d0d454c31d04209770", "score": "0.49268797", "text": "def test_edit_experiment_view_with_tracks_weights(self):\n experiment = self.create_test_experiment()\n experiment.assignment_method = Experiment.WEIGHTED_PROBABILITY_RANDOM\n track1 = self.create_test_track(name=\"track1\", experiment=experiment)\n track2 = self.create_test_track(name=\"track2\", experiment=experiment)\n self.create_test_track_weight(experiment=experiment, track=track1)\n self.create_test_track_weight(experiment=experiment, track=track2)\n response = self.client.get(reverse(\"ab_testing_tool_edit_experiment\", args=(experiment.id,)))\n self.assertTemplateUsed(response, \"ab_tool/edit_experiment.html\")", "title": "" }, { "docid": "1a83bb2f35b27541623f09db41fce462", "score": "0.49165675", "text": "def update_results(student_id, experiment_id, student_response):\n\n replies = get_experiment_replies(experiment_id)\n replies_dict = json.loads(replies)\n\n replies_dict['replies'].append({\"id\": int(student_id), \"response\": student_response})\n\n new_replies = json.dumps(replies_dict)\n\n con = sql.connect(DB_PATH)\n cur = con.cursor()\n\n cur.execute(\"UPDATE experiments SET replies = ? WHERE id = ?\", (new_replies, experiment_id))\n\n cur.execute(\"DELETE FROM experiments_students \"\n \"WHERE student_id = ? AND experiment_id = ?\", (student_id, experiment_id))\n\n con.commit()\n\n con.close()", "title": "" }, { "docid": "d8b1a5247263b3eaa3ed81d6c8455d38", "score": "0.49110433", "text": "def update(self, kwargs):\n if self._status in [DEFAULT, DETACHED]:\n if any([k not in self.param_keys() and k in self._specials for k in kwargs]):\n raise ValueError('Key not recognised!')\n else:\n self.__dict__.update(kwargs)\n else:\n raise ValueError('Parameters of a created experiment cannot be updated.')", "title": "" }, { "docid": "b058a3ccc0c492a3a2054e916ba606a3", "score": "0.4910217", "text": "def update(self, model, rec_id, data):\n\n url = self.base_url + model + '/' + str(rec_id)\n data = format_write_data(data)\n return self.session.put(url, data=data)", "title": "" }, { "docid": "9c3a3f85820ec6698cda066db2a4dae5", "score": "0.4900522", "text": "def detail_put(self, request, id, *args, **kwargs):\n raise NotAllowed()", "title": "" }, { "docid": "84ef2def248ce0213ba3a195c608662a", "score": "0.48950928", "text": "def edit_education(request, id):\r\n education = Education.objects.get(pk=id)\r\n context = {\r\n 'education': education\r\n }\r\n if request.method == 'POST':\r\n education.user_id = request.POST['user_id' or None]\r\n education.school = request.POST['school' or None]\r\n education.field_of_study = request.POST['field_of_study' or None]\r\n education.qualification = request.POST['qualification' or None]\r\n education.year = request.POST['year' or None]\r\n\r\n education.save()\r\n messages.success(request, 'Education Updated Successfully, you may add another !')\r\n return redirect('dashboard', request.user.pk)\r\n else:\r\n return render(request, 'accounts/edit_education.html', context)\r\n\r\n # DELETE EDUCATION\r", "title": "" }, { "docid": "1f9b95546a734ec60c96a828783d5784", "score": "0.4894346", "text": "def test_experiment_specification(self, client, ephemeral_storage):\n\n storage = ephemeral_storage.storage\n\n _add_experiment(storage, name=\"a\", version=1, _id=1)\n _add_trial(storage, experiment=1, id_override=\"ae8\", status=\"completed\")\n\n response = client.simulate_get(\"/experiments/status/a\")\n assert response.status == \"200 OK\"\n assert response.json == {\n \"trials_completed\": 1,\n \"best_trials_id\": \"7bbfdb8c684aa5f4be324a09d7da94af\",\n \"best_evaluation\": 0.05,\n \"start_time\": \"0001-01-01 00:00:00\",\n \"finish_time\": \"0001-01-02 00:00:00\",\n \"max_trials\": 10,\n \"nb_trials\": 1,\n \"progress\": 0.1,\n \"trial_status_count\": {\"completed\": 1},\n \"elapsed_time\": \"23:59:50\",\n \"sum_of_trials_time\": \"23:59:50\",\n \"eta\": \"8 days, 23:58:30\",\n \"eta_milliseconds\": 777510000.0,\n }", "title": "" }, { "docid": "527afa47e52ca91f15e0bf2975749899", "score": "0.48926717", "text": "def edit(id):\n\n pet = Pet.query.get_or_404(id)\n form = EditPetForm(obj=pet)\n\n if form.validate_on_submit():\n pet.photo_url = form.data[\"photo_url\"]\n pet.notes = form.data[\"notes\"]\n pet.available = form.data['available']\n db.session.commit()\n flash(f\"{pet.name} updated.\", 'success')\n return redirect(url_for('index'))\n\n else:\n return render_template(\"edit_pet_form.html\", form=form, pet=pet)", "title": "" }, { "docid": "f73c9c211624982ebce62505144b3717", "score": "0.48906887", "text": "def update(self):\n response = self.api(self.id).get()\n self._update_fields(response)", "title": "" }, { "docid": "3aac6ddcfb59a9b9059b7ceec9897878", "score": "0.48723197", "text": "def put(self, id):\n data = request.json\n if data[\"entityID\"] != id:\n return {\"message\": \"entityID property in the incoming json object and id parameter in the URL path are\"\n \"not matched\"}, 400\n evn = EventService.get_by_id(id)\n if not evn:\n return {\"message\": \"The event does not exist\"}, 404\n else:\n return EventService.update(data, id)", "title": "" }, { "docid": "4ee013b5fc5b6473163ae2bdd9f275cd", "score": "0.4865083", "text": "def test_experiment_specification(self, client, ephemeral_storage):\n\n storage = ephemeral_storage.storage\n\n _add_experiment(storage, name=\"a\", version=1, _id=1)\n _add_trial(storage, experiment=1, id_override=\"ae8\", status=\"completed\")\n\n response = client.simulate_get(\"/experiments/a\")\n\n assert response.status == \"200 OK\"\n\n assert response.json[\"name\"] == \"a\"\n assert response.json[\"version\"] == 1\n assert response.json[\"status\"] == \"not done\"\n assert response.json[\"trialsCompleted\"] == 1\n assert response.json[\"startTime\"] == \"0001-01-01 00:00:00\" # TODO\n assert response.json[\"endTime\"] == \"0001-01-02 00:00:00\" # TODO\n assert len(response.json[\"user\"])\n assert response.json[\"orionVersion\"] == \"x.y.z\"\n\n _assert_config(response.json[\"config\"])\n _assert_best_trial(response.json[\"bestTrial\"])", "title": "" }, { "docid": "d7d6f7bd49709c1e26e4af92b13dda56", "score": "0.48624727", "text": "def api_employee_edit(request):\r\n\r\n if request.method.lower() == 'post':\r\n\r\n data = request.POST.dict()\r\n employee_id = int(request.POST['id'])\r\n\r\n try:\r\n employee = Employee.objects.get(id=employee_id)\r\n\r\n form_employee = EmployeeEditForm(data=data, instance=employee)\r\n\r\n if form_employee.is_valid():\r\n form_employee.save()\r\n return ApiResponse.http(status=ApiResponse.ST_SUCCESS, message='Employee successfully updated!', employee_id=employee_id)\r\n else:\r\n errors = dict(form_employee.errors)\r\n return ApiResponse.http(status=ApiResponse.ST_FAILED, message='Validation errors.', errors=errors)\r\n except Employee.DoesNotExist:\r\n return ApiResponse.http(status=ApiResponse.ST_FAILED, message='Invalid employee.')\r\n else:\r\n return ApiResponse.http(status=ApiResponse.ST_FORBIDDEN, message='Use Post!')", "title": "" }, { "docid": "9bcea40c6b9b0fdaa251a9fb9aff1687", "score": "0.4860615", "text": "def delete_experiment(request, pk):\n\n try:\n experiment = Experiment.objects.get(pk=pk)\n except Experiment.DoesNotExist:\n return Response('Experiment with given ID not found', status=HTTP_404_NOT_FOUND)\n\n if experiment.deleted is True:\n return Response('Experiment with given ID is already deleted', status=HTTP_400_BAD_REQUEST)\n\n experiment.deleted = True;\n experiment.save()\n\n return Response(HTTP_200_OK)", "title": "" }, { "docid": "c3d2f2168376364b60de974564c9a455", "score": "0.48573977", "text": "def patch(self, incident_id):\n data = request.get_json()\n\n # script to allow only location request.body\n key_list = [*data]\n if (len(key_list) == 1 and key_list[0] == \"location\"):\n\n new_incident_instance = IncidentsModel()\n res = new_incident_instance.edit_incident(incident_id, data)\n\n if res:\n return {\n \"status\": 200,\n \"data\": [{\n \"id\": res[\"id\"],\n \"message\": \"incident record has been updated\"\n }]\n }, 200\n else:\n return {\n \"status\": 404,\n \"error\": \"Not found for id {}\".format(incident_id)\n }, 404\n else:\n return {\n \"status\": 403,\n \"error\": \"Bad Request only location is allowed\"\n }, 403", "title": "" }, { "docid": "b76af9e574ad062068ef873994d3d48a", "score": "0.48331708", "text": "def update_teste_cmd(teste_id, **teste_properties):\n return UpdateTesteCommand(teste_id, **teste_properties)", "title": "" }, { "docid": "b77d5e6d015b83efb6dd8d19931b53fe", "score": "0.48324943", "text": "def get_experiment(request, pk):\n\n try:\n experiment = Experiment.objects.get(pk=pk)\n except Experiment.DoesNotExist:\n return Response('Experiment with given ID not found', status=HTTP_404_NOT_FOUND)\n\n if experiment.deleted is True:\n return Response('Experiment with given ID not found', status=HTTP_404_NOT_FOUND)\n\n if experiment.disabled is True:\n return Response('This experiment is currently unavailable', status=HTTP_423_LOCKED)\n\n if experiment.expiration < timezone.now():\n return Response('Experiment with given ID already expired', status=HTTP_400_BAD_REQUEST)\n\n serialized = ExperimentSerializer(experiment).data\n\n return Response(serialized, status=HTTP_200_OK)", "title": "" }, { "docid": "834ae376dc46243ee0e99aa11693c662", "score": "0.48319104", "text": "def test_update_resolution(self):\n self.client = APIClient()\n login = self.client.login(username=self.username, password=self.password)\n self.assertEqual(login, True)\n url = reverse('resolution-detail', args=[str(self.upd_pk)])\n\n data = {\n 'description': 'Updated description',\n }\n response = self.client.put(url, data=data, format='json')\n\n self.assertEqual(response.status_code, status.HTTP_200_OK, 'Resolution update Failed')\n self.assertDictContainsSubset(data, response.data, \"Modified Resolution didn't contain the updated description\")\n\n #lets doublecheck we get updated data back\n response = self.client.get(url, format='json')\n self.assertEqual(response.status_code, status.HTTP_200_OK, 'Resolution failed to be Retrieved')\n self.assertDictContainsSubset(data, response.data, \"Resolution didn't contain the updated description\")", "title": "" }, { "docid": "4601be0b78971f6a8aa160d438fd449c", "score": "0.48254716", "text": "def test_modify_report(self):\n print \"(\"+self.test_modify_report.__name__+\")\", self.test_modify_report.__doc__\n resp = self.client.put(self.url,\n data=json.dumps(self.report_1_req),\n headers={\"Content-Type\": JSON})\n self.assertEquals(resp.status_code, 204)\n #Check that the report has been modified\n resp2 = self.client.get(self.url)\n self.assertEquals(resp2.status_code, 200)\n data = json.loads(resp2.data)\n #Check that the report has been modified with the new data\n self.assertEquals(data[\"details\"], self.report_1_req[\"details\"])", "title": "" }, { "docid": "747712a9fe8b9ca2a54838b34b33ff28", "score": "0.48183075", "text": "def edit(id):\n sensor = Sensor.query.get(id)\n form = SensorForm(request.form, obj=sensor)\n if request.method == 'POST':\n form.populate_obj(sensor)\n if form.validate():\n db.session.commit()\n flash('You have successfully modified the sensor.', 'success')\n return redirect(url_for('sensors.edit', id=sensor.id))\n return render_template('sensors/edit.html', form=form, sensor=sensor)", "title": "" }, { "docid": "a19c62d0f4ff880e9eb69dbb30f04d74", "score": "0.48122746", "text": "def update(self, obj): \n if isinstance(obj, Entity):\n url_filter = \"v1/ai/skillEntity/name/{}\"\n else:\n url_filter = \"v1/ai/skill/name/{}\"\n url_filter = url_filter.format(obj.name)\n obj._update_json_data()\n response = self.carol.call_api(url_filter, method = 'PUT', data=obj._json_data)\n return response", "title": "" }, { "docid": "6175d3e3097744ca2912d63fc9461e1a", "score": "0.48087907", "text": "def put(self, id):\n pass", "title": "" }, { "docid": "07dd4375dada7f132f3e9aad7897d557", "score": "0.48071754", "text": "def update(ctx, name, image, ingredients, id):\n repo = getDrinkRepo()\n\n updateArgs = {}\n if image is not None:\n updateArgs[\"image\"] = image\n\n if ingredients is not None:\n updateArgs[\"ingredients\"] = ingredients\n\n if name is not None and name != \"\":\n updateArgs[\"name\"] = name\n\n success = False if len(updateArgs) <= 0 else repo.updateDrink(id, **updateArgs)\n\n if success:\n ctx.invoke(describe, drink=id)\n else:\n click.echo(\"Unable to update drink. Make sure the ID is right.\")", "title": "" }, { "docid": "568612e39e13d479cbcd2c0e0e449a7d", "score": "0.4804053", "text": "def update_research_plan(self, id, title, description, assigned_to_id, objective, evaluation_process):\n if (id is None or id == '' or id == 0) or \\\n (title is None or title == '') or \\\n (assigned_to_id is None or assigned_to_id == '' or assigned_to_id == 0):\n raise Exception('id and title, assigned_to_id are required.')\n\n op = Operation(silverpond_schema.Mutation)\n parent = op.update_research_plan(id=id, title=title, description=description, assigned_to_id=assigned_to_id,\n objective=objective, evaluation_process=evaluation_process)\n\n # Field Set\n parent.errors()\n researchplan = parent.research_plan()\n fieldset_research_plan(researchplan)\n fieldset_experiment(researchplan.experiments())\n fieldset_research_plan_metrics(researchplan.research_plan_metrics())\n\n ql_endpoint = HTTPEndpoint(self.GRAPHQL_API_ENDPOINT, self.headers)\n data = ql_endpoint(op)\n obj = op + data\n if len(obj.update_research_plan.errors) > 0:\n raise Exception(obj.update_research_plan.errors[0])\n return obj.update_research_plan.research_plan", "title": "" }, { "docid": "9b0ef8b4263c6629412f32c2845466ca", "score": "0.47978824", "text": "def edit_recipe(id):\n recipe = mongo.db.recipes.find_one({\"_id\": ObjectId(id)})\n\n if session[\"user\"] != recipe['created_by']:\n return render_template(\"403.html\")\n\n if request.method == \"POST\":\n edit = {\n \"created_by\": session[\"user\"],\n \"image_url\": request.form.get(\"image_url\"),\n \"name\": request.form.get(\"name\"),\n \"desc\": request.form.get(\"desc\"),\n \"instructions\": request.form.getlist(\"step\"),\n \"ingredients\": request.form.getlist(\"ingredient\"),\n \"amount\": request.form.getlist(\"amount\"),\n \"measure\": request.form.getlist(\"measurement\"),\n }\n mongo.db.recipes.update({\"_id\": ObjectId(id)}, edit)\n flash(\"Changes Successfully Saved\")\n\n return render_template(\"edit_recipe.html\", recipe=recipe)", "title": "" }, { "docid": "f62bedbce7972cef7d5388864810ea47", "score": "0.47965991", "text": "def update(self, request, pk=None):\n raise MethodNotAllowed(method=\"PUT\")", "title": "" }, { "docid": "f62bedbce7972cef7d5388864810ea47", "score": "0.47965991", "text": "def update(self, request, pk=None):\n raise MethodNotAllowed(method=\"PUT\")", "title": "" }, { "docid": "db18595a922a3022c241d41ec0dd8953", "score": "0.47930816", "text": "def put(self, id):\n\n data = request.json\n update_service(id, data)\n return None, 204", "title": "" }, { "docid": "0e53e28a07124d9371460d81cf2a1079", "score": "0.4790036", "text": "def get_experiment_id(self):\n return self.experiment_id", "title": "" }, { "docid": "53e553e91fdc6114550e464616fdb09f", "score": "0.47860163", "text": "def experiment_id(self) -> str:\n return self._experiment_id", "title": "" }, { "docid": "8f101fff17f778b28c18fdd5714ef7e6", "score": "0.47858354", "text": "def _update_details(self, details, obj):\n pass", "title": "" }, { "docid": "8a81408420c0f78ee0daaee351debc12", "score": "0.4765387", "text": "def put(self, id):\n data = request.json\n Team.query.filter(Team.id == id).update({'name': data.get('name')})\n team = Team.query.filter(Team.id == id).one()\n enrollments = data.get('enrollments')\n team = add_enrollments(enrollments, team)\n\n db.session.commit()\n return team, 204", "title": "" } ]
97251e022090c1c096acfb522379fe37
Check to see if the tolerance has been met. Returns False if tolerance is met.
[ { "docid": "18dae62af5d8fcb325b8c556954aefee", "score": "0.0", "text": "def check_tol(g, tol, log_scale_param):\n s = np.exp(log_scale_param)\n for tolerance, gradient in zip(tol, g):\n if abs(gradient) > tolerance / s:\n return True\n return False", "title": "" } ]
[ { "docid": "ce055bab6287f8377db1128076fa364f", "score": "0.8071129", "text": "def _tolerance_check(self, tolerance, value):\n if tolerance != None and np.linalg.norm(value) < tolerance:\n print(\"Variable update tolerance was reached. Terminating Search.\")\n return True\n return False", "title": "" }, { "docid": "352bced7ca72b726560ef38f52549699", "score": "0.7769892", "text": "def __bool__(self):\n return bool(abs(self) > tolerance)", "title": "" }, { "docid": "a1971746879f68f3648bf7f1b42f064d", "score": "0.76606834", "text": "def reached_tolerance(self):\n if sum(self.dp) < self.tolerance:\n rospy.logwarn('[%s] Tolerance of %f achieved kp=%f, ki=%f, kd=%f with best error: %f',\n self.name, self.tolerance, self.p[0], self.p[1], self.p[2], self.best_error)\n return True\n return False", "title": "" }, { "docid": "f482c72478d2317b312759080e7312ba", "score": "0.7599516", "text": "def passed(self):\n return self.value_diff <= self.tolerance", "title": "" }, { "docid": "5e0066709419ead4f4919127aa2b7f3d", "score": "0.7546813", "text": "def tolerance(computedValue,targetValue,tolerance):\n\n if abs(computedValue-targetValue)<tolerance:\n return True\n return False", "title": "" }, { "docid": "f7a93d71559ef30aa4a60c670707cb2c", "score": "0.7538815", "text": "def within_tolerance(self, value: float, reference: float) -> bool:", "title": "" }, { "docid": "0b83b4cce34827842e7ef27ba067fd22", "score": "0.69806385", "text": "def tolerance(self):\n return self._tolerance", "title": "" }, { "docid": "0b83b4cce34827842e7ef27ba067fd22", "score": "0.69806385", "text": "def tolerance(self):\n return self._tolerance", "title": "" }, { "docid": "0b83b4cce34827842e7ef27ba067fd22", "score": "0.69806385", "text": "def tolerance(self):\n return self._tolerance", "title": "" }, { "docid": "0b83b4cce34827842e7ef27ba067fd22", "score": "0.69806385", "text": "def tolerance(self):\n return self._tolerance", "title": "" }, { "docid": "0b83b4cce34827842e7ef27ba067fd22", "score": "0.69806385", "text": "def tolerance(self):\n return self._tolerance", "title": "" }, { "docid": "fdc569d7b54c21a1edbc947696b74106", "score": "0.6949747", "text": "def CheckTolerance(self, value):\n tolerance = self._unitop.GetTolerance()\n error = self.CalculateError(value)\n if error > tolerance:\n self._unitop.PushConsistencyError(self, value)", "title": "" }, { "docid": "5bb76f3f9c3cbbc265d428d33cda9b7b", "score": "0.67973673", "text": "def is_satisfied(self, tol=1.0e-6):\n return abs(self()) < tol", "title": "" }, { "docid": "01eed1f9e6d4c494b70631c89a5020eb", "score": "0.67568636", "text": "def check_tolerance(self, last_obj, new_obj=0.0):\n if last_obj is not None:\n delta_obj = abs(float(new_obj) - float(last_obj))\n self.mention('delta obj ratio: %.2e' % (delta_obj / self.TOLERANCE))\n return delta_obj < self.TOLERANCE\n return False", "title": "" }, { "docid": "1f484cc19e63f22b759133eb18b11f88", "score": "0.6700461", "text": "def inTolerance(self):\n ...", "title": "" }, { "docid": "78cb818baf81b4d40eeb15a3d8f3427e", "score": "0.6625738", "text": "def within(a, b, tolerance=1e-5):\n if a==0 and b==0:\n return True\n elif a!=0 and b==0:\n #Avoid divide by zero\n return False\n else:\n return abs((a/b)-1) < tolerance", "title": "" }, { "docid": "df68bc8c4fad2af46f1f98168c37eb99", "score": "0.6526349", "text": "def check_difference(shift1, shift2, tolerance=0.05):\r\n mean_diff = (shift1[1] - shift2[1]) ** 2\r\n sigma_diff = (shift1[2] + shift2[2]) ** 2\r\n res = mean_diff > sigma_diff\r\n if abs(shift1[1] - shift2[1]) < tolerance:\r\n res = False\r\n return res", "title": "" }, { "docid": "052e05a351af76d3d07aabb0d11920b3", "score": "0.6519439", "text": "def check_difference(shift1, shift2, tolerance=0.05):\n mean_diff = (shift1[1] - shift2[1]) ** 2\n sigma_diff = (shift1[2] + shift2[2]) ** 2\n res = mean_diff > sigma_diff\n if abs(shift1[1] - shift2[1]) < tolerance:\n res = False\n return res", "title": "" }, { "docid": "54b7b3c683e3287d731d3fc658b0b49e", "score": "0.65151095", "text": "def approx_eq(x, y, tolerance=1e-5):\n return abs(x - y) < tolerance", "title": "" }, { "docid": "b21b7db657d408a524edb16b0a76a8c5", "score": "0.64969355", "text": "def relaxed_allclose(x, y, atol=0.01, pct_wrong_allowable=0.1):\n res = torch.isclose(x, y, atol=atol)\n n_wrong = res.numel() - res.sum()\n n_wrong_allowable = pct_wrong_allowable * res.numel()\n return n_wrong <= n_wrong_allowable", "title": "" }, { "docid": "c98b6d633c65381e53c549fda9118dc5", "score": "0.64799714", "text": "def match(self, other, tolerance=3, amtdev=0.00):\n if (tolerance == ()):\n return (abs(abs(self.amount) - abs(other.amount)) <= amtdev)\n else:\n return (abs(abs(self.amount) - abs(other.amount)) <= amtdev) and \\\n (abs(self.date-other.date) <= datetime.timedelta(days=tolerance))", "title": "" }, { "docid": "66b0b45511093d926ef07d9c350ee676", "score": "0.64740145", "text": "def check_convergence(self):\n return False", "title": "" }, { "docid": "f179475d3fa9daa37883a197df0fde02", "score": "0.647181", "text": "def _check_convergence(self, tol):\n if hasattr(self, 'coeffs_prev'):\n if torch.abs(self.coeffs - self.coeffs_prev).max() < tol:\n return True\n self.coeffs_prev = self.coeffs.detach().clone()\n return False", "title": "" }, { "docid": "cafa9423f1cf921e7a02d68925ddd74a", "score": "0.6433718", "text": "def check_convergence(new_measure, old_measure, direction, threshold):\n\n sign = 1.0 if direction == 'higher' else -1.0\n\n if sign * (new_measure - old_measure) / old_measure < threshold:\n return True\n else:\n return False", "title": "" }, { "docid": "c897e612f977361f4bbbb71eeb1adec3", "score": "0.6422954", "text": "def converged(self):\n\n top_lnls = [abs(x['lnL']) for x in self.estimates()][0:2]\n\n # dif = (top_lnls[0] - top_lnls[1]) / (0.5 * (top_lnls[0] + top_lnls[1]))\n dif = top_lnls[0] - top_lnls[1]\n\n if abs(dif) < 0.1:\n return True\n\n else:\n return False", "title": "" }, { "docid": "fae09a0e92a3b5f68c494935dbbc3b46", "score": "0.6420245", "text": "def tolerance(*args, **kwargs):\n pass", "title": "" }, { "docid": "dfa65081f2e310884cc7d5b8c35c0003", "score": "0.63993156", "text": "def _check_tolerance(self, content, name, value, mos_file):\n\n if (name + \"=\" == \"tolerance=\" and float(value) > 1e-6):\n self._wrong_parameter(mos_file, name, value)", "title": "" }, { "docid": "97ba0fccb0fd64a1b60f0c57d8f1ae49", "score": "0.6392067", "text": "def is_not_close_to(self, other, tolerance):\n self._validate_close_to_args(self.val, other, tolerance)\n\n if self.val >= (other-tolerance) and self.val <= (other+tolerance):\n if type(self.val) is datetime.datetime:\n tolerance_seconds = tolerance.days * 86400 + tolerance.seconds + tolerance.microseconds / 1000000\n h, rem = divmod(tolerance_seconds, 3600)\n m, s = divmod(rem, 60)\n self._err('Expected <%s> to not be close to <%s> within tolerance <%d:%02d:%02d>, but was.' % (self.val.strftime('%Y-%m-%d %H:%M:%S'), other.strftime('%Y-%m-%d %H:%M:%S'), h, m, s))\n else:\n self._err('Expected <%s> to not be close to <%s> within tolerance <%s>, but was.' % (self.val, other, tolerance))\n return self", "title": "" }, { "docid": "598e173b3b57da6b873a58422c1b89d2", "score": "0.6380607", "text": "def check_below_threshold(distance, epsilon):\n return distance < epsilon", "title": "" }, { "docid": "ad4e1852afe57b0efaf7eb41c4d73868", "score": "0.63688385", "text": "def test_equal_time(t1, t2, tol=1E-3):\n if abs(t1) <= tol:\n res = abs(t1 - t2) <= tol\n else:\n res = abs(t1 - t2) <= tol * abs(t1)\n return res", "title": "" }, { "docid": "55450f03a699b012ba500303f637fc20", "score": "0.6359971", "text": "def almost_less(x, y, rtol=1e-14, atol=1e-14):\n return x - y <= atol + np.abs(y) * rtol", "title": "" }, { "docid": "f0ba06fef13eccb4c5e30f8990ea616b", "score": "0.6354376", "text": "def get_tolerance():\n return parameters.TOLERANCE", "title": "" }, { "docid": "ea156158ab9f5d2aaf9fc4bdbf001e45", "score": "0.6347623", "text": "def tolerance(self, tol):\n self._tolerance = tol", "title": "" }, { "docid": "ea156158ab9f5d2aaf9fc4bdbf001e45", "score": "0.6347623", "text": "def tolerance(self, tol):\n self._tolerance = tol", "title": "" }, { "docid": "25409b090e1ef08c7cd44cce91d0d556", "score": "0.6339426", "text": "def require_accurate_evaluation(self, noisy_val):\n # Check if the point improves over existing points, \n # or if it could be optimal according to tolerances. \n # In this case, perform a double evaluation.\n best_possible = self.fmin\n if ((noisy_val <= best_possible -\n self.l_settings.eps_impr*max(1.0, abs(best_possible))) or\n (noisy_val <= self.l_settings.target_objval +\n self.l_settings.eps_opt*abs(self.l_settings.target_objval))):\n return True\n else:\n return False", "title": "" }, { "docid": "c7e85303ea78fbed7177461921ba142e", "score": "0.63144785", "text": "def assertNowish(self, timestamp, tolerance=1):\n delta = datetime.now() - timestamp \n self.assertTrue(delta.seconds <= tolerance)", "title": "" }, { "docid": "66fe57c29b7f2dc32be82220469ad628", "score": "0.63110167", "text": "def is_almost_equal(num_1, num_2):\n threshold = 0.03\n if abs(num_1 - num_2) < threshold:\n return True\n else:\n return False", "title": "" }, { "docid": "e96566cf853f4af55a1845bc40f16ae7", "score": "0.6305576", "text": "def close_to(self, other, tol=1e-3):\n for diff in self.iter_differences(other):\n if abs(diff) > tol:\n return False\n return True", "title": "" }, { "docid": "97ec08d14fe9c90904c986522080af42", "score": "0.62932694", "text": "def validate(self, maze, tolerance):\n pos = maze.turtlebot_pos\n\n # all negative positions are invalid\n if any(np.asarray(pos) < 0):\n return False\n elif self.state_pixel is None:\n return True\n # if the position does not change dramatically, then it is valid\n elif np.sum(abs(self.state_pixel - pos)) < tolerance:\n return True\n else:\n return False", "title": "" }, { "docid": "c729962a8c959955b7ce8295b5bcdf55", "score": "0.6289345", "text": "def tolerance(self):\n return self.settings.hu_tolerance", "title": "" }, { "docid": "c729962a8c959955b7ce8295b5bcdf55", "score": "0.6289345", "text": "def tolerance(self):\n return self.settings.hu_tolerance", "title": "" }, { "docid": "59c0b8ed0b1ce02b75778415e3dc7bb9", "score": "0.628875", "text": "def tol(self):\n\n return self._tol", "title": "" }, { "docid": "4e00eb39f18c5d9c9b7cfb437e6f9f4d", "score": "0.62810886", "text": "def is_goal(self, tolerance):\n \n # if self.goal_pixel is not None:\n # print(\"Distance to goal: \", np.linalg.norm(self.goal_pixel - self.state_pixel))\n return (self.goal_pixel is not None) and np.linalg.norm(self.goal_pixel - self.state_pixel) < tolerance", "title": "" }, { "docid": "e0b5989ad0bf9e75eb987728af518a83", "score": "0.6276838", "text": "def make_check_values(self, value_calculated:np.array, value_returned:np.array) -> np.ndarray:\n return all(abs(x-y) <= self.eps for x, y in zip(value_calculated, value_returned))", "title": "" }, { "docid": "4e4c8aba4006b0dfda076ed8c32293b3", "score": "0.62742877", "text": "def converged(self, rtol, atol):\n return self.err < rtol * abs(self.mean) + atol", "title": "" }, { "docid": "81a8fd864474efc6c9860a1953ce10be", "score": "0.6266887", "text": "def checkAnswer(comment,value,expected,tol=1e-7,updateResults=True):\n if abs(value - expected) > tol:\n print(\"checking answer\",comment,value,\"!=\",expected)\n if updateResults:\n results[\"fail\"] += 1\n return False\n else:\n if updateResults:\n results[\"pass\"] += 1\n return True", "title": "" }, { "docid": "7c499c9f239a76df6adfde8157564e6c", "score": "0.6262354", "text": "def close_to_exceeding(self) -> bool:\n if self.max_cuts is not None and self.num_cuts >= self.max_cuts:\n return True\n\n thresh = self.longest_seen\n\n if self.max_duration is not None:\n return self.current + thresh >= self.max_duration - 1e-3 # float precision\n return False", "title": "" }, { "docid": "ca048b2f7eed20824b9ec4952b296ff1", "score": "0.626226", "text": "def testEqualityInvalid(self):\n self.assertNotEqual(mox.IsAlmost(1.899), 1.9)", "title": "" }, { "docid": "3aa76e22b72faa026c4886b1c1042820", "score": "0.62560344", "text": "def approx_less_eq(number1, number2):\n if number1 < number2 or abs(number1-number2) < 0.0001:\n return True\n else:\n return False", "title": "" }, { "docid": "b20ed3b6e514e81eed5fc1f427ae2ed8", "score": "0.62403435", "text": "def is_fulfilled(self, tolerance=1e-2) -> bool:\r\n self.satisfied = True\r\n ianionsite = 0\r\n for isite, site in enumerate(self.lse.structure):\r\n if self.lse.valences[isite] < 0:\r\n if (abs(self.anions_bond_strengths[ianionsite]['bond_strengths_sum'] - 2.0)) > tolerance:\r\n self.satisfied = False\r\n\r\n ianionsite = ianionsite + 1\r\n\r\n return self.satisfied", "title": "" }, { "docid": "7ba8649b0078c87521af46929105746a", "score": "0.6238806", "text": "def should_stop(cls, interval_delta, middle_point, epsilon_rel, epsilon_abs):\n return abs(interval_delta) <= epsilon_rel * abs(middle_point) + epsilon_abs", "title": "" }, { "docid": "16a938f0aa9ae53b0660ef67c3505ce2", "score": "0.6237505", "text": "def checkAnswer(comment,value,expected,tol=1e-10,updateResults=True):\n if abs(value - expected) > tol:\n print(\"checking answer\",comment,value,\"!=\",expected)\n if updateResults:\n results[\"fail\"] += 1\n return False\n else:\n if updateResults:\n results[\"pass\"] += 1\n return True", "title": "" }, { "docid": "16a938f0aa9ae53b0660ef67c3505ce2", "score": "0.6237505", "text": "def checkAnswer(comment,value,expected,tol=1e-10,updateResults=True):\n if abs(value - expected) > tol:\n print(\"checking answer\",comment,value,\"!=\",expected)\n if updateResults:\n results[\"fail\"] += 1\n return False\n else:\n if updateResults:\n results[\"pass\"] += 1\n return True", "title": "" }, { "docid": "bf8e86ebc3665ab1a22e210725724cc2", "score": "0.6235275", "text": "def first_failure(self) -> bool:\n if self.error_count == (self._tolerance + 1):\n return True\n return False", "title": "" }, { "docid": "862ceb422d3e20a1aa4297ca1f61debc", "score": "0.6231477", "text": "def overTolerance(self):\n ...", "title": "" }, { "docid": "761e8922afafb75e52b936db13da0302", "score": "0.62229717", "text": "def _check_convergence(self, ll_old = None, ll_new = None):\n\t\tif np.isnan(ll_old) or np.isnan(ll_new):\n\t\t\tif np.sum(np.fabs(self.score)) < self.tol:\n\t\t\t\treturn 1\n\t\t\telse:\n\t\t\t\treturn 0\n\t\telse:\n\t\t\tif np.abs(ll_old - ll_new) < self.tol:\n\t\t\t\treturn 1\n\t\t\telse:\n\t\t\t\treturn 0", "title": "" }, { "docid": "42818455aef80a63675e73aa5aa1ce48", "score": "0.62129897", "text": "def passed(self):\n return self.nominal_length_mm - self.tolerance < self.length_mm < self.nominal_length_mm + self.tolerance", "title": "" }, { "docid": "ff6845c40999f73e9c0068282d2d8d90", "score": "0.6206111", "text": "def satisfied(self):\n if self.current_t is None:\n return False\n return self.current_t >= self.target_t", "title": "" }, { "docid": "9e43890b5778c1c8e01bd2caef100059", "score": "0.62052995", "text": "def check_within_epsilon(true_values, estimated_values, epsilon):\n for i in range(len(true_values)):\n if not is_within_epsilon(true_values[i], estimated_values[i], epsilon):\n return False\n return True", "title": "" }, { "docid": "6003c157d1137bfc396a53043442dd09", "score": "0.6186162", "text": "def _check_termination(self):\n convergence_radius = 0.1\n\n abs_dist = self._get_dist_to_goal()\n if (abs_dist < convergence_radius):\n logging.debug(\"done - success!\")\n return True\n if (self.num_steps > self.MAX_STEPS):\n logging.debug(\"done - max steps reached\")\n logging.debug(\"final delta to goal \\t{}\".format(abs_dist))\n return True\n else:\n return False", "title": "" }, { "docid": "c3968786a87d2361ac9a3411b5ce4ad9", "score": "0.6183106", "text": "def is_convergence(self, eps=1e-6):\n return 1.0 - self.convergence() < eps", "title": "" }, { "docid": "81a478e3f5c74597d60defb839359e63", "score": "0.6168033", "text": "def isclose(a, b, rel_tol=1e-09, abs_tol=0.0):\n\treturn abs(a-b) <= max(rel_tol * max(abs(a), abs(b)), abs_tol)", "title": "" }, { "docid": "6297d904a6c5ecb1de360644656c890f", "score": "0.61669457", "text": "def check(self, point):\n value = self.calculate_value(point)\n\n if self._equation:\n if math.fabs(value) > EPSILON:\n return False\n return True\n else:\n if value >= 0:\n return True\n return False", "title": "" }, { "docid": "3c466c5bf1a782b9ebeae9e830d5d875", "score": "0.61604327", "text": "def result_is_close_enough(self, result):\n return (np.linalg.norm(result) / np.linalg.norm(self.b)) < self.precision", "title": "" }, { "docid": "4648393a7a9f827347aea27d99f85684", "score": "0.6160404", "text": "def converged(self):\n # This insures that the cost has been computed at least twice without checking iterations\n try:\n is_converged = abs(self.cost[-2] - self.cost[-1]) <= self.tol\n except IndexError:\n is_converged = False\n return is_converged", "title": "" }, { "docid": "3ce06aa43bb5ad73239467f4103b9ed0", "score": "0.6154088", "text": "def withinEpsilon(x, y, epsilon):\r\n return abs(x - y) <= epsilon", "title": "" }, { "docid": "b2fc040f3290c78528ec01bdab21f953", "score": "0.6151508", "text": "def check_answer(self, x, ftol):\n if (self.lb is not None and np.any(x < self.lb) or\n self.ub is not None and np.any(x > self.ub)):\n return False\n\n f = np.sum(self.fun(x) ** 2)\n return f < (1 + ftol) * self.fopt", "title": "" }, { "docid": "3751932a2738cc6efda700136490a635", "score": "0.6150774", "text": "def withinToleranceOfGoal(self, possible_node, goal_point, tolerance_m):\n dx, dy = goal_point.x - possible_node.point.x, goal_point.y - possible_node.point.y\n dist = math.sqrt(dx * dx + dy * dy)\n if dist <= tolerance_m:\n return True\n return False", "title": "" }, { "docid": "7877ad34d7fdb9f8246a47f4d0f4651c", "score": "0.6150306", "text": "def pass_tol(self):\n pass_tol = self.stat < self.tol\n return pass_tol", "title": "" }, { "docid": "9b5665e2b8bf3a5e9a7c07c7467ccc21", "score": "0.6146118", "text": "def on_curve(self, curve, tol=1e-6):\n return self.distance_to_point(curve.closest_point(self)) < tol", "title": "" }, { "docid": "d7d2de72638ff94b890520db4a3ee527", "score": "0.614382", "text": "def is_close_to(self, other, tolerance):\n self._validate_close_to_args(self.val, other, tolerance)\n\n if self.val < (other-tolerance) or self.val > (other+tolerance):\n if type(self.val) is datetime.datetime:\n tolerance_seconds = tolerance.days * 86400 + tolerance.seconds + tolerance.microseconds / 1000000\n h, rem = divmod(tolerance_seconds, 3600)\n m, s = divmod(rem, 60)\n self._err('Expected <%s> to be close to <%s> within tolerance <%d:%02d:%02d>, but was not.' % (self.val.strftime('%Y-%m-%d %H:%M:%S'), other.strftime('%Y-%m-%d %H:%M:%S'), h, m, s))\n else:\n self._err('Expected <%s> to be close to <%s> within tolerance <%s>, but was not.' % (self.val, other, tolerance))\n return self", "title": "" }, { "docid": "1d3859dbee4ccea0f8a16f0893fceca7", "score": "0.61364233", "text": "def tolerance(focuser):\n return 0", "title": "" }, { "docid": "ea3c1cea00539b49b223f0de89c48038", "score": "0.6133284", "text": "def win(self):\n return abs(self) < 0.4", "title": "" }, { "docid": "e0075ced987e952c0fe93c9be7232991", "score": "0.6107463", "text": "def is_satisfied(\n self,\n x: \"CartesianCoordinates\",\n tol: float = 1e-4,\n ) -> bool:\n return abs(self.delta(x)) < tol", "title": "" }, { "docid": "b7d54b5fe2b7b828ebba13bce6f9f414", "score": "0.61056906", "text": "def approx_equal(x, y, atol=.01):\n return abs(x - y) <= 1e-2 * abs(x) or abs(x - y) <= 1e-2 * abs(y) or abs(x - y) <= atol", "title": "" }, { "docid": "b7d54b5fe2b7b828ebba13bce6f9f414", "score": "0.61056906", "text": "def approx_equal(x, y, atol=.01):\n return abs(x - y) <= 1e-2 * abs(x) or abs(x - y) <= 1e-2 * abs(y) or abs(x - y) <= atol", "title": "" }, { "docid": "3a1f4bdc249cb1e97c3ff44c4588e500", "score": "0.6102711", "text": "def check_convergence(\n prev_loss,\n cur_loss,\n cur_step,\n iter_below_tol,\n abs_loss_chg_tol=0.001,\n min_num_iter=100,\n max_iter_below_tol=50\n):\n has_converged = False\n\n # Check if we have reached the desired loss tolerance.\n loss_diff = abs(prev_loss - cur_loss)\n if loss_diff < abs_loss_chg_tol:\n iter_below_tol += 1\n else:\n iter_below_tol = 0\n\n if iter_below_tol >= max_iter_below_tol:\n has_converged = True\n\n if cur_step < min_num_iter:\n has_converged = False\n\n return has_converged, iter_below_tol", "title": "" }, { "docid": "eff80409fd268c1870d26d9ad330a405", "score": "0.6100981", "text": "def same(x,y):\n print x,y\n if abs(x-y) < 0.000001:\n return True\n else:\n return False", "title": "" }, { "docid": "666dc76b0995415179aa619056de6e3d", "score": "0.6099752", "text": "def test_rmsd_convergence(self):\n\n # http://en.wikipedia.org/wiki/\n # Non-linear_least_squares#Convergence_criteria\n try:\n r1 = self.history[\"rmsd\"][-1]\n r2 = self.history[\"rmsd\"][-2]\n except IndexError:\n return False\n\n tests = [\n abs((r2[0] - r1[0]) / r2[0]) < self._rmsd_tolerance if r2[0] > 0 else True\n ]\n\n return all(tests)", "title": "" }, { "docid": "f56d59064290fa3d889a3e8c5b3948dc", "score": "0.6090564", "text": "def withinEpsilon(x, y, epsilon):\n\t\t return abs(x - y) <= epsilon", "title": "" }, { "docid": "3b136615e4b36f758caaa630be12e493", "score": "0.60889125", "text": "def is_time_around(expected_time, loop=None, delta=.01):\n now = loop.time() if loop else time.time()\n return (expected_time - delta) <= now <= (expected_time + delta)", "title": "" }, { "docid": "53bc44057f3e8cfc7bf7c71e9074e729", "score": "0.608647", "text": "def almost_equal_floats(value_1: float, value_2: float, *, delta: float = 1e-8) -> bool:\n return abs(value_1 - value_2) <= delta", "title": "" }, { "docid": "5a4e870bbde087be74dc9e5b654a6cda", "score": "0.6082421", "text": "def check_below_threshold(distance, epsilon):\n\n accepted = False\n for i in range(len(epsilon)):\n if epsilon[i] >= distance[i] >= 0:\n accepted = True\n else:\n accepted = False\n break\n return accepted", "title": "" }, { "docid": "fcd33f8d6dc55e034f7e449659c687d8", "score": "0.60819095", "text": "def isViolated(self):\n upVar = self._findValue(\"upVar\")\n lowVar = self._findValue(\"lowVar\")\n freeVar = self._findValue(\"freeVar\")\n result = abs(upVar + lowVar) >= const.EPS\n if result:\n log.debug(\n \"isViolated %s, upVar %s, lowVar %s, freeVar %s result %s\"\n % (self.name, upVar, lowVar, freeVar, result)\n )\n log.debug(f\"isViolated value lhs {self.findLHSValue()} constant {self.RHS}\")\n return result", "title": "" }, { "docid": "3e153b1e868caa0e2263a65e3d9fd63d", "score": "0.60777473", "text": "def check_pass_or_fail(self,\n epsilon_failure_tolerance =np.nan, epsilon_failure_tolerance_default =None,\n non_finite_data_tolerance =np.nan, non_finite_data_tolerance_default =None,\n total_data_failure_tolerance=np.nan, total_data_failure_tolerance_default=None,\n min_acceptable_r_squared =np.nan, min_acceptable_r_squared_default =None\n ) :\n\n passValues = [ ]\n \n # test the epsilon value tolerance\n \n # get the tolerance for failures compared to epsilon\n epsilonTolerance = epsilon_failure_tolerance if epsilon_failure_tolerance is not np.nan else epsilon_failure_tolerance_default\n\n # did we fail based on the epsilon?\n failed_fraction = self.comparison.diff_outside_epsilon_fraction\n passed_epsilon = None if (epsilonTolerance is None) else (failed_fraction <= epsilonTolerance)\n passValues.append(passed_epsilon)\n\n # test the nonfinite tolerance\n \n # get the tolerance for failures in amount of nonfinite data (in spatially valid areas)\n nonfiniteTolerance = non_finite_data_tolerance if non_finite_data_tolerance is not np.nan else non_finite_data_tolerance_default\n \n # did we fail based on nonfinite data\n non_finite_diff_fraction = self.finiteData.finite_in_only_one_fraction\n passed_nonfinite = None if (nonfiniteTolerance is None) else (non_finite_diff_fraction <= nonfiniteTolerance)\n passValues.append(passed_nonfinite)\n\n # test if the total failed percentage is acceptable\n \n # get the total percentage of failed data that is acceptable\n totalFailTolerance = total_data_failure_tolerance if total_data_failure_tolerance is not np.nan else total_data_failure_tolerance_default\n \n # did we fail based on all data failures?\n passed_all_percentage = None if (totalFailTolerance is None) else ((non_finite_diff_fraction + failed_fraction) <= totalFailTolerance)\n passValues.append(passed_all_percentage)\n\n # test the r-squared correlation coefficent\n \n # get the minimum acceptable r-squared correlation coefficient\n min_r_squared = min_acceptable_r_squared if (min_acceptable_r_squared is not np.nan) else min_acceptable_r_squared_default\n\n # did we fail based on the r-squared correlation coefficient?\n r_squared_value = None if (min_r_squared is None) else self.comparison.r_squared_correlation\n passed_r_squared = None if (min_r_squared is None) else (r_squared_value >= min_r_squared)\n passValues.append(passed_r_squared)\n\n # figure out the overall pass/fail result\n didPass = None\n for passValue in passValues :\n # if passValue isn't none, we need to update didPass\n if passValue is not None :\n if didPass is not None :\n didPass = passValue and didPass\n else :\n didPass = passValue\n \n return didPass, failed_fraction, non_finite_diff_fraction, r_squared_value", "title": "" }, { "docid": "dd3456078c758c8906845f50e0b61fcb", "score": "0.60761505", "text": "def check_distractor_distance_threshold(self, goal, gripper):\n self.current_norm_distance = self.calc_distance(goal, gripper)\n threshold = 0.1\n return self.current_norm_distance < threshold", "title": "" }, { "docid": "3355029114166bf81231fd0d2d801c5d", "score": "0.6064035", "text": "def isclose(a, b, rel_tol=1e-04, abs_tol=0.0):\n return abs(a-b) <= max(rel_tol * max(abs(a), abs(b)), abs_tol)", "title": "" }, { "docid": "32e334610a88894d3aaa5bac1639704b", "score": "0.6063143", "text": "def testEqualityValid(self):\n self.assertEqual(mox.IsAlmost(1.8999999999), 1.9)", "title": "" }, { "docid": "92458285d0dafe3180d047c506589c65", "score": "0.6051033", "text": "def assert_approximately_equal(computed_potential, expected_potential, tolerance=ENERGY_TOLERANCE):\n\n # Compute error.\n error = (computed_potential - expected_potential)\n\n # Raise an exception if the error is larger than the tolerance.\n if abs(error) > tolerance:\n raise Exception(\"Computed potential %s, expected %s. Error %s is larger than acceptable tolerance of %s.\" % (computed_potential, expected_potential, error, tolerance))\n \n return", "title": "" }, { "docid": "a1b3eccca005b97c8d678bf85206ff99", "score": "0.6048701", "text": "def approx_equal(x,y):\n if abs(x-y) < 0.00001:\n return True\n else:\n return False", "title": "" }, { "docid": "fcbdd67840da82380b3179d13588b9e7", "score": "0.60423213", "text": "def isclose(a, b, rel_tol=1e-09, abs_tol=0.0):\n return abs(a-b) <= max(rel_tol * max(abs(a), abs(b)), abs_tol)", "title": "" }, { "docid": "fcbdd67840da82380b3179d13588b9e7", "score": "0.60423213", "text": "def isclose(a, b, rel_tol=1e-09, abs_tol=0.0):\n return abs(a-b) <= max(rel_tol * max(abs(a), abs(b)), abs_tol)", "title": "" }, { "docid": "bb6d4c5d6a88c6cdeaabba4604521bb3", "score": "0.6019731", "text": "def is_almost_equal(x, y, epsilon=1 * 10 ** (-8)):\r\n return abs(x - y) <= epsilon", "title": "" }, { "docid": "c6f0f831144f4f4bc4e2b444dda49c07", "score": "0.6016614", "text": "def terminate(fitness, tolerance):\n # for i in fitness:\n # if abs((2**(1.0 / 2)) - i) < tolerance:\n # return True\n return False", "title": "" }, { "docid": "4736fe1b1e1ae33fa99ac5411b20e523", "score": "0.6011596", "text": "def isclose(a, b, rel_tol=1e-09, abs_tol=0.0):\n return abs(a - b) <= max(rel_tol * max(abs(a), abs(b)), abs_tol)", "title": "" }, { "docid": "45f49e14c0c463fd5b66be61780bae72", "score": "0.60109454", "text": "def isViolated(self):\n if abs(value(self.denominator)) >= const.EPS:\n if self.lowTarget is not None:\n if self.lowTarget > self.findLHSValue():\n return True\n if self.upTarget is not None:\n if self.findLHSValue() > self.upTarget:\n return True\n else:\n # if the denominator is zero the constraint is satisfied\n return False", "title": "" }, { "docid": "2f14dead98b6ba6800efd45cdbe674d5", "score": "0.6010444", "text": "def float_cmp(x, y, rtol=1e-14, atol=1e-14):\n return np.abs(x - y) <= atol + np.abs(y) * rtol", "title": "" }, { "docid": "636a908b2b82f32903e61aae0d60938c", "score": "0.5998174", "text": "def approx_equal(a, b, tol=1e-9):\n return fabs(a-b) <= max(fabs(a), fabs(b)) * tol", "title": "" }, { "docid": "ced2eec7247f91ef01d988715d9d6ab9", "score": "0.5985774", "text": "def is_close_enough(x, y):\n\n return abs(x - y) < 0.0001", "title": "" }, { "docid": "4d7d4ae19a201d3f531de04477e5da05", "score": "0.5979146", "text": "def _check_epsilon(epsilon):\n if not (isinstance(epsilon, float) and 1e-5 <= epsilon <= 0.3):\n print(\"> ERROR: epsilon must be a float between 0.3 and 10^-5\")\n return False\n\n return True", "title": "" }, { "docid": "ffcf0711a0d3f286558a560e3298c417", "score": "0.59740263", "text": "def getTolerance(self):\n ...", "title": "" } ]
3267fcb6ce40a8d0595e6979dd8f0ae1
Return filenames that have a spcified set of extensions.
[ { "docid": "841a54f260628de898d042118a6d2804", "score": "0.671288", "text": "def filenameMatchesAListOfExtensions(filename, extensionList=None):\n if extensionList is not None:\n for currExt in extensionList:\n if filename.lower().endswith(currExt.lower()):\n return True\n return False", "title": "" } ]
[ { "docid": "980d3d0380131c6070595da9574d0e44", "score": "0.7548063", "text": "def get_files(dirname, extensions=['.png', '.tif', '.jpg']):\n dir_path = Path(dirname)\n\n files = dir_path.glob('**/*')\n\n files = [path.resolve() for path in files]\n\n match = [f for f in files if f.suffix in extensions]\n return match", "title": "" }, { "docid": "756bf2887c3883a51543e44a722ba14b", "score": "0.74919146", "text": "def ext_filter(directory, exts, ABS_PATHS = True):\n if isinstance(exts,list) or isinstance(exts,tuple):\n exts = list(exts)\n if ABS_PATHS:\n return [opj(directory,file) for file in os.listdir(directory) if \n os.path.splitext(file)[1] in exts]\n else:\n return [file for file in os.listdir(directory) if \n os.path.splitext(file)[1] in exts]", "title": "" }, { "docid": "59861aab08ffbe5d5f4534e9f1ede444", "score": "0.73087174", "text": "def filter_by_extension(ext):\n def filt(filename):\n return filename.endswith(ext)\n return filt", "title": "" }, { "docid": "1418fdb60218040b473b134833d3322a", "score": "0.7305053", "text": "def get_files_with_extension(dir, names, ext):\n list = []\n for name in names:\n e = os.path.splitext(name)[1]\n if e == ext:\n list.append(name)\n list.sort()\n return [os.path.join(dir, n) for n in list]", "title": "" }, { "docid": "b4aa8be8863df9601ac1abf7504c66b8", "score": "0.726645", "text": "def _get_allowed_extensions():\n return [\n ext.lower()\n for ext, file_type in Image.EXTENSION.items()\n if file_type.upper() in MIME_TYPE_TO_PIL_IDENTIFIER.values()\n ]", "title": "" }, { "docid": "aaae5bf503325a9a14f507df67b18904", "score": "0.7261796", "text": "def filter_by_suffix(files):\n if includesuffix:\n for insuffix in includesuffix:\n extname = '*.' + insuffix\n if fnmatch.fnmatch(files, extname):\n return True\n\n if excludesuffix:\n for exsuffix in excludesuffix:\n extname = '*.' + exsuffix\n if fnmatch.fnmatch(files, extname):\n return False\n\n if includedefault:\n return True\n else:\n return False", "title": "" }, { "docid": "f6ad56e049784f05cd7a715e0f302633", "score": "0.7199362", "text": "def filter_files(directory, extensions):\n\n return map(lambda x: os.path.join(directory, x),\n filter(lambda x: re.search('\\.({0})$'.format(\n '|'.join(extensions)), x), sorted(os.listdir(directory))))", "title": "" }, { "docid": "069c2ef98f7e6ab26e42a762fee47e2c", "score": "0.71652", "text": "def allowed_file_exts(files: List, extensions: List[str]) -> bool:\n for f in files:\n ext = get_file_extension(f.filename)\n if ext not in extensions:\n return False\n return True", "title": "" }, { "docid": "1cf1ff4052794423d8f2d4f6e16fb27f", "score": "0.71020085", "text": "def file_extensions_get(fname_list):\n return [os.path.splitext(fname)[-1] for fname in fname_list]", "title": "" }, { "docid": "df91497740c894b23886e74235c15f45", "score": "0.69904", "text": "def list_files_with_extensions(directory, ext_list):\n filepath_list = sum(\n [\n glob(\"{0}/*{1}\".format(directory, ext))\n for ext in ext_list\n ],\n [],\n )\n filenames = [os.path.basename(filepath) for filepath in filepath_list]\n return filenames", "title": "" }, { "docid": "8c7239a42c8c11c31617909d28b183ed", "score": "0.6975149", "text": "def get_file_list(path, ext):\n return [f for f in os.listdir(path) if \n os.path.isfile(os.path.join(path, f)) and f.endswith(ext)]", "title": "" }, { "docid": "65c3dee1bd54f7da56b4079011cad679", "score": "0.69076073", "text": "def getFileNames():\n included_extenstions = ['json' ]\n file_names = [fn for fn in os.listdir(os.getcwd()) \n if any([fn.endswith(ext) \n for ext in included_extenstions])] \n return file_names", "title": "" }, { "docid": "276a32c3bedac93c824d12a57fa12d6c", "score": "0.6899496", "text": "def _generate_filename_list(self):\n fq_filename, extension = os.path.splitext(self._fq_infilename)\n\n la_ext_list = [\n 'la{0}'.format(char) for char in string.lowercase]\n da_ext_list = [\n 'da{0}'.format(char) for char in string.lowercase]\n\n extension_list = la_ext_list + da_ext_list\n\n filename_list = [fq_filename + '.{0}'.format(ext)\n for ext in extension_list\n if os.path.exists(fq_filename + '.{0}'.format(ext))]\n\n return filename_list", "title": "" }, { "docid": "f8ec45f3da344c3e14e5a94841f8ca67", "score": "0.68790245", "text": "def _file_has_extensions(self, filestr, extensions):\r\n for extension in extensions:\r\n if filestr.find(extension) > -1:\r\n return True\r\n return False", "title": "" }, { "docid": "1dd879bf8ef32a15b5c6accde3a3c28d", "score": "0.6872849", "text": "def get_exts(filepath, all_exts=True):\n \n ext = 42 # initialize and set to non-0 value\n out = [] # the output array of extensions\n \n while ext:\n filepath, ext = path.splitext(filepath)\n\n if ext:\n clean_ext = ext[1:] # strip the \".\" off the front of ext\n out = [clean_ext] + out # append to output array\n if not all_exts: break # if all_exts is False then end the\n # loop after grabbing the first ext\n return out", "title": "" }, { "docid": "b01cc22ff929e71d1968875d4d7497db", "score": "0.6780839", "text": "def has_file_allowed_extension(filename, extensions):\r\n return filename.lower().endswith(extensions)", "title": "" }, { "docid": "799cf33845c24e316c0f6012022c9d39", "score": "0.6763899", "text": "def get_files(dir_path, extension):\n files = []\n for file in os.listdir(dir_path):\n if file.endswith(f\".{extension}\"):\n files.append(file)\n return files", "title": "" }, { "docid": "dc9a2178e41285df4b777ad0ee69655b", "score": "0.6762181", "text": "def test_find_files_returns_only_files_with_correct_extensions(tmpdir):\n folder = tmpdir.mkdir('sub')\n txt = folder.join('file1.txt').write('foo') #noqa\n py = folder.join('file2.py').write('foo') # noqa\n\n exts = ('.txt')\n files = find_files(folder, exts=exts)\n\n # only keep the extensions - makes the assert simpler.\n files = [f[-4:] for f in files]\n\n assert '.txt' in files", "title": "" }, { "docid": "b0dee152af03710c76a66f7b2baa3081", "score": "0.67420954", "text": "def has_file_allowed_extension(filename, extensions):\n return filename.lower().endswith(extensions)", "title": "" }, { "docid": "b0dee152af03710c76a66f7b2baa3081", "score": "0.67420954", "text": "def has_file_allowed_extension(filename, extensions):\n return filename.lower().endswith(extensions)", "title": "" }, { "docid": "b0dee152af03710c76a66f7b2baa3081", "score": "0.67420954", "text": "def has_file_allowed_extension(filename, extensions):\n return filename.lower().endswith(extensions)", "title": "" }, { "docid": "b0dee152af03710c76a66f7b2baa3081", "score": "0.67420954", "text": "def has_file_allowed_extension(filename, extensions):\n return filename.lower().endswith(extensions)", "title": "" }, { "docid": "2c50aae55f171a1e973c614e2ca4de44", "score": "0.67118245", "text": "def has_file_allowed_extension(filename: str, extensions: Tuple[str, ...]) -> bool:\r\n return filename.lower().endswith(extensions)", "title": "" }, { "docid": "5421761bd07d486d4c06a376bbeae8b2", "score": "0.67003095", "text": "def list_files_with_extension(directory, extension):\n tmp = os.listdir(directory)\n logging.debug(tmp)\n extens = [f for f in tmp if os.path.splitext(f)[1] == \".\"+extension]\n return [os.path.join(directory, f) for f in extens]", "title": "" }, { "docid": "f5eab32a1febb229cc347dceb867e23d", "score": "0.667502", "text": "def getFiles(self):\n fileList = [os.path.normcase(f) for f in os.listdir(self.directory)]\n fileList = [os.path.join(self.directory, f) for f in fileList if os.path.splitext(f)[1] in self.extList ]\n return [self.getAssertion(f) for f in fileList]", "title": "" }, { "docid": "19726e185e58dfb46ca6cecbfa3fab44", "score": "0.66600364", "text": "def list():\r\n return [stem for stem, extension in [\r\n os.path.splitext(filename) for filename in os.listdir(_dirname)\r\n ] if extension == '.dat' and stem != 'root']", "title": "" }, { "docid": "81ebd23d3bf8322b15b144d2fafb50b7", "score": "0.6659508", "text": "def list_of_files_in_dir_for_ext(dir, ext='.xtc') :\n if dir is None : return []\n if not os.path.exists(dir) : return [] \n return sorted([f for f in os.listdir(dir) if os.path.splitext(f)[1] == ext])", "title": "" }, { "docid": "645da5d74ffa9a97b65fa0142e44c844", "score": "0.6638439", "text": "def file_list(directory, extension='jpg'):\n return [os.path.join(directory,f) for f in os.listdir(directory) if os.path.isfile(os.path.join(directory,f)) and f.endswith(\".{}\".format(extension))]", "title": "" }, { "docid": "7916ced72f4dfb18ecf328bee7073f74", "score": "0.6617648", "text": "def remove_unwanted_files(files: List[str], extensions: List[str]=['.DS_Store', '.gitignore']):\n for source_file in files:\n for ex in extensions:\n if ex in source_file:\n files.remove(source_file)\n break\n return files", "title": "" }, { "docid": "4318629eeb540df122d4835547b3180e", "score": "0.6605068", "text": "def has_file_allowed_extension(filename, extensions):\r\n filename_lower = filename.lower()\r\n return any(filename_lower.endswith(ext) for ext in extensions)", "title": "" }, { "docid": "acef0f93bcfae5beca5c606154193b84", "score": "0.66009", "text": "def filter_srt(files):\n\n out = []\n for filename in files:\n splitted = os.path.splitext(filename)\n srt_identifier = \".srt\"\n if splitted[1] == srt_identifier: \n out.append(filename)\n\n return out", "title": "" }, { "docid": "b0f7c84fe8b6dfd6c72f8c698d1a9204", "score": "0.6594698", "text": "def search_file(path, ext):\n to_search = []\n for local_files in os.listdir(path):\n if local_files.endswith(ext):\n to_search.append(local_files)\n return to_search", "title": "" }, { "docid": "8bd9f2583a013c4869bd150f748a48cf", "score": "0.6586023", "text": "def file_extensions_valid(ext_list):\n ext_list_valid = ['.csv','.txt','.xls','.xlsx']\n return ext_list[0] in ext_list_valid", "title": "" }, { "docid": "7658e9c1622a6e670e73581653f33cb5", "score": "0.657985", "text": "def has_file_allowed_extension(filename: PATH_TYPE, extensions: Tuple[str, ...]) -> bool:\n return str(filename).lower().endswith(extensions)", "title": "" }, { "docid": "e5d429d8ce86144952e4aeb00b3126b7", "score": "0.65746367", "text": "def get_file_names(path: str, extension: Union[List, str]) -> List:\n if isinstance(extension, List):\n \n images: list = []\n for ext in extension:\n request = os.path.join(path, f'*.{ext}')\n images += glob(request) # to avoid merging opperation after\n\n return images\n\n elif isinstance(extension, str):\n\n request = os.path.join(path, f'*.{extension}')\n return glob(request) #list\n\n else:\n\n raise TypeError('Only list or str are expected')", "title": "" }, { "docid": "300624ac6baeeb201c602b5784d31828", "score": "0.6562819", "text": "def FileNameList(basepath='.', name='*.*', extension=True, excludename=''):\n files = fnmatch.filter(os.listdir(basepath), name)\n\n if not extension:\n files = [file.split('.')[0] for file in files]\n\n files = ExcludeFromList(files, excludename)\n\n return sorted(files)", "title": "" }, { "docid": "8d86f8d2d9d70ddaf014e1bd0ced7a58", "score": "0.65608174", "text": "def has_file_allowed_extension(filename, extensions):\n filename_lower = filename.lower()\n return any(filename_lower.endswith(ext) for ext in extensions)", "title": "" }, { "docid": "8d86f8d2d9d70ddaf014e1bd0ced7a58", "score": "0.65608174", "text": "def has_file_allowed_extension(filename, extensions):\n filename_lower = filename.lower()\n return any(filename_lower.endswith(ext) for ext in extensions)", "title": "" }, { "docid": "6ba5686840208032086ef18a90ec8b73", "score": "0.6553543", "text": "def get_filenames_names_only(*filenames: str) -> List[str]:\n return [os.path.splitext(file)[0] for file in filenames]", "title": "" }, { "docid": "0e5acc64ff532175cfdabccea7afe32a", "score": "0.6542695", "text": "def _find_files(self, chapter, extension):\n find_pattern = os.path.join(build_config.IMAGES_DIRECTORY,\n chapter, '*%s' % extension)\n found_files = Glob(find_pattern, strings=True)\n # Strip off the extensions.\n length_extension = len(extension)\n found_files = [x[x.index(os.sep) + 1:-length_extension]\n for x in found_files]\n return found_files", "title": "" }, { "docid": "91091c1aac2c6f91219737ba2f01e1a4", "score": "0.65398926", "text": "def list_files(path, extension=\".cpp\", exclude=\"S.cpp\"):\n return [\"%s/%s\" % (path, f) for f in listdir(path) if f.endswith(extension) and (not f.endswith(exclude))]", "title": "" }, { "docid": "7b35943bad645aa56528ba618476ed27", "score": "0.6533559", "text": "def _get_files(paths, types):\n files = set()\n for path in paths:\n for dirpath, dirnames, filenames in os.walk(path):\n for filename in filenames:\n try:\n # try-except because [1] and [1:] can fail\n file_ext = os.path.splitext(filename)[1].decode('utf8')[1:]\n if file_ext.lower() in types:\n full_path = os.path.join(dirpath, filename)\n files.add(full_path)\n except:\n pass\n return files", "title": "" }, { "docid": "5077294894066df8af31e946ce04b105", "score": "0.6518748", "text": "def get_dir_filenames(dir_path, extension='.txt'):\n return [ i for i in listdir(dir_path) if i.find(extension) > 0 ]", "title": "" }, { "docid": "ff5fc702b37c7878ca58605e8048dea1", "score": "0.65176266", "text": "def allowed_file(filename):\n return '.' in filename and \\\n filename.split('.')[-1].lower() in ALLOWED_EXTENSIONS", "title": "" }, { "docid": "c2ede6286900cf09fee7270c3fa7bda3", "score": "0.64979035", "text": "def check_extensions(x, elist):\n if not elist: # if no qualifier then we are good\n return True\n\n extension = os.path.splitext(x)[1].lower()\n\n for y in elist: # for each extension we just have to match one\n if '.'+ y == extension:\n return True\n return False", "title": "" }, { "docid": "6abc209052dcc275c09192e3081b8755", "score": "0.6495629", "text": "def get_filenames(self, prefix, ext='gz'):\n if self.langs:\n return [f'{prefix}.{lang}.{ext}' for lang in self.langs]\n num = len(self.files)\n return [f'{prefix}.{i}.{ext}' for i in range(1, num + 1)]", "title": "" }, { "docid": "651ee5bc19ff5d41c7290c1fb1ddd1c7", "score": "0.64897215", "text": "def list_of_pathes_in_dir_for_ext(dir, ext='.xtc') :\n return [os.path.join(dir,f) for f in list_of_files_in_dir_for_ext(dir, ext)]", "title": "" }, { "docid": "f3ba78dd868419a763fabf267dbd907d", "score": "0.6488085", "text": "def get_files(fpath,extensions,recursive=False):\n p = Path(fpath)\n if recursive:\n filelist = [str(x) for x in p.rglob('*') if x.suffix in extensions]\n else:\n filelist = [str(x) for x in p.glob('*') if x.suffix in extensions]\n assert len(filelist) > 0, f\"No files matching extensions {extensions} found in {fpath}\"\n return(filelist)", "title": "" }, { "docid": "1bf7d95d9cbbf7cbbb7487588a5548c1", "score": "0.6485401", "text": "def filter_lst_files(files):\r\n return [lst_file for lst_file in files if lst_file.endswith(\".lst\")]", "title": "" }, { "docid": "d75973d2539760dbb06cee8f1505f558", "score": "0.6484452", "text": "def listFiles(dir, ext, ignoreExt=None):\n matches = []\n for root, dirs, files in os.walk(dir):\n for f in files:\n if f.endswith(ext):\n if not ignoreExt or (ignoreExt and not f.endswith(ignoreExt)):\n matches.append(os.path.join(root, f))\n return matches", "title": "" }, { "docid": "d75973d2539760dbb06cee8f1505f558", "score": "0.6484452", "text": "def listFiles(dir, ext, ignoreExt=None):\n matches = []\n for root, dirs, files in os.walk(dir):\n for f in files:\n if f.endswith(ext):\n if not ignoreExt or (ignoreExt and not f.endswith(ignoreExt)):\n matches.append(os.path.join(root, f))\n return matches", "title": "" }, { "docid": "0bd1deb96d18b65d580a0899b7268dec", "score": "0.64773446", "text": "def mfilter(f):\n return os.path.isfile(f) and f.endswith(ext)", "title": "" }, { "docid": "afbcff433711999ef61b24b30b431e91", "score": "0.64706737", "text": "def _list_dir(ext):\n return [os.path.join(upload_dir, name) for name in os.listdir(upload_dir) if\n os.path.isfile(os.path.join(upload_dir, name)) and name.endswith(ext)]", "title": "" }, { "docid": "75cee534d7e9514a87d45c139de47294", "score": "0.6459723", "text": "def get_file_extension_list(self, tpc_type=None):\n file_names = []\n file_extensions = []\n for data_file_name in self.get_data_file_list(tpc_type=tpc_type):\n datum = data_file_name.split(\".\")\n if datum[0] is None or datum[1] is None or datum[0] == \"\" or datum[1] == \"\":\n raise ValueError(\"An attempt was made to parse non eligible files!\")\n file_names.append(datum[0])\n file_extensions.append(datum[1])\n if len(file_names) != len(file_extensions):\n raise ValueError(\"File name list does not match list of file extensions!\")\n return file_names, file_extensions", "title": "" }, { "docid": "954e5050c09733909745fff458aaed44", "score": "0.6458051", "text": "def lookupExtension(filename):", "title": "" }, { "docid": "d3194b2c6b22e636b9ed62b4210f7497", "score": "0.6446683", "text": "def _get_fortran_files(srcfiles, extensions=False):\n files_out = []\n for srcfile in srcfiles:\n ext = os.path.splitext(srcfile)[1]\n if ext.lower() in [\".f\", \".for\", \".f90\", \".fpp\"]:\n if extensions:\n # save unique extension\n if ext not in files_out:\n files_out.append(ext)\n else:\n files_out.append(srcfile)\n if len(files_out) < 1:\n files_out = None\n return files_out", "title": "" }, { "docid": "f4291c27f6503ca61ac99590c229b061", "score": "0.64121467", "text": "def ext_ambiguous_file_list(fn, ext, do_sub_dirs = None) :\n\n ext = ext or \"\"\n if ext and (ext[0] != '.') :\n ext = '.' + ext\n\n if os.path.isdir(fn) :\n fn = os.path.join(fn, '*' + ext)\n else :\n fns = ambiguous_file_list(fn, do_sub_dirs = do_sub_dirs)\n if not len(fns) : # if there are no ambiguous files to find,\n fn += '*' + ext # then add the extension in case it's not there.\n pass\n fns = ambiguous_file_list(fn, do_sub_dirs = do_sub_dirs) # find the files\n\n return(fns)", "title": "" }, { "docid": "42aa6b0a35ee1c729ca08ab4246b9dec", "score": "0.6408415", "text": "def list_all_files(dir_name, exts=[\"jpg\", \"bmp\", \"png\"]):\n result = []\n for dir_, subdirs, file_names in os.walk(dir_name):\n for file_name in file_names:\n if any(file_name.endswith(ext) for ext in exts):\n result.append(os.path.join(dir_, file_name)) \n return result", "title": "" }, { "docid": "c86969a31ae3ab540e6cf0fb67d500bc", "score": "0.6398939", "text": "def allowed_file(filename):\n return '.' in filename and filename.rsplit('.', 1)[1].lower() in ALLOWED_EXTENSIONS", "title": "" }, { "docid": "c86969a31ae3ab540e6cf0fb67d500bc", "score": "0.6398939", "text": "def allowed_file(filename):\n return '.' in filename and filename.rsplit('.', 1)[1].lower() in ALLOWED_EXTENSIONS", "title": "" }, { "docid": "c86969a31ae3ab540e6cf0fb67d500bc", "score": "0.6398939", "text": "def allowed_file(filename):\n return '.' in filename and filename.rsplit('.', 1)[1].lower() in ALLOWED_EXTENSIONS", "title": "" }, { "docid": "4693c015a2f86460acdecbf08217851b", "score": "0.63641214", "text": "def allowed_file(filename):\n return '.' in filename and \\\n filename.rsplit('.', 1)[1].lower() in ALLOWED_EXTENSIONS", "title": "" }, { "docid": "e45a64d233e1122b068e3d50407b9620", "score": "0.63629353", "text": "def allowed_file(filename):\n return '.' in filename and filename.rsplit('.', 1)[1] \\\n in ALLOWED_EXTENSIONS", "title": "" }, { "docid": "f107566b36eb4d52c54343511a9eb42f", "score": "0.63594925", "text": "def test_filter_to_preferred_ext(image_files):\n tiff_files = list(Path(image_files).glob(\"*.tif\"))\n assert len(tiff_files) == 2000\n jpeg_files = list(Path(image_files).glob(\"*.jpg\"))\n assert len(jpeg_files) == 1000\n image_files = list(Path(image_files).iterdir())\n return_files = list(core.filter_to_preferred_ext(image_files, [\".jpg\"]))\n assert len(return_files) == 2000", "title": "" }, { "docid": "d5f2936129dd4078f5c2d4712324b785", "score": "0.6341543", "text": "def allowed_file(filename):\n return '.' in filename and \\\n filename.rsplit('.', 1)[1] in ALLOWED_EXTENSIONS", "title": "" }, { "docid": "d5f2936129dd4078f5c2d4712324b785", "score": "0.6341543", "text": "def allowed_file(filename):\n return '.' in filename and \\\n filename.rsplit('.', 1)[1] in ALLOWED_EXTENSIONS", "title": "" }, { "docid": "d5f2936129dd4078f5c2d4712324b785", "score": "0.6341543", "text": "def allowed_file(filename):\n return '.' in filename and \\\n filename.rsplit('.', 1)[1] in ALLOWED_EXTENSIONS", "title": "" }, { "docid": "530c663dd2481cfb9df2f98b2e798f24", "score": "0.633947", "text": "def has_file_allowed_extension(self, filename, extensions):\n filename_lower = filename.lower()\n return any(filename_lower.endswith(ext) for ext in extensions)", "title": "" }, { "docid": "9b9cc413fd83ab2c29e4e9db0769890a", "score": "0.633573", "text": "def _kwik_filenames(filename):\n basename, ext = op.splitext(filename)\n return {ext: '{basename}.{ext}'.format(basename=basename, ext=ext)\n for ext in _KWIK_EXTENSIONS}", "title": "" }, { "docid": "e3472c2429f376eda94d88e246754f0d", "score": "0.63155866", "text": "def allowed_file_extension(filename):\n\n return '.' in filename and filename.rsplit('.', 1)[1].lower() in app.config['UPLOAD_EXTENSIONS']", "title": "" }, { "docid": "d3ec7c26f73886c8002746eda9cbb2fd", "score": "0.63105655", "text": "def allowedFile(filename):\n return '.' in filename and \\\n filename.rsplit('.', 1)[1] in ALLOWED_EXTENSIONS", "title": "" }, { "docid": "1ec4943a2176c4c5d643e1c0a952924b", "score": "0.63059044", "text": "def get_files(path, extensions='*'):\n if path[-1] is not '/' and path[-1] is not '\\\\':\n path = path + '/'\n\n if isinstance(extensions, str) or isinstance(extensions, unicode):\n extensions = [extensions]\n\n all_files = [os.path.join(root, filename)\n for root, dirnames, filenames in os.walk(path)\n for filename in filenames]\n\n filtered_files = [\n filename for filename in all_files\n if get_extension(filename) in extensions or '*' in extensions\n ]\n\n return filtered_files", "title": "" }, { "docid": "2a4add0b8a0cf1a8a2dcb24bf22e8f9a", "score": "0.6276475", "text": "def GetImageExtWildcard():", "title": "" }, { "docid": "b93b2a34639ddd4c3bf1281667a59826", "score": "0.62707496", "text": "def file_extensions_all_equal(ext_list):\n return len(set(ext_list))==1", "title": "" }, { "docid": "e55ab259acc0438e8e1a7c98a5e1a18c", "score": "0.62676054", "text": "def find_by_ext(folder, extension):\n found_files = [path.join(dirpath, f)\n for dirpath, dirnames, files in walk(folder)\n for f in files if f.endswith('.' + extension)]\n return found_files", "title": "" }, { "docid": "6a65a86abf2079431459ba72d2a3cf19", "score": "0.6267129", "text": "def is_valid_extension(filename: str, extensions: Tuple[str, ...]) -> bool:\n\n return any(filename.lower().endswith(ext) for ext in extensions)", "title": "" }, { "docid": "1afcce354f31a092af483f465d296d42", "score": "0.62572294", "text": "def split_matching_extensions_and_other(self, srcs, exts):\n\n matches = []\n leftovers = []\n\n for src in (srcs or []):\n base, ext = os.path.splitext(src)\n if ext in exts:\n matches.append(src)\n else:\n leftovers.append(src)\n\n return (matches, leftovers)", "title": "" }, { "docid": "8fa9688926f891714b18fb38bf371e1c", "score": "0.6253049", "text": "def list_files_with_ext(*paths, ext, maxdepth=-1):\n needed_files = []\n for path in paths:\n if ext in path:\n needed_files.append(path)\n continue\n if not os.path.isdir(path):\n print_red(\n qte(path) + \" is not a directory nor does it contain \" +\n qte(ext) + \". Skipping.\"\n )\n continue\n path = os.path.realpath(path)\n start_level = path.count(os.sep)\n # pylint: disable=unused-variable\n # Need dirs to iterate\n for root, dirs, files in os.walk(path):\n depth = root.count(os.sep) - start_level\n if depth > maxdepth and (maxdepth != -1):\n break\n for filename in files:\n if ext in filename:\n needed_files.append(os.path.join(root, filename))\n return needed_files", "title": "" }, { "docid": "b6340c4e87b1f4f711576f1699c15b5c", "score": "0.62498426", "text": "def get_filtered_files(root_folder, extensions, keywords, strict=False):\n result = []\n unfiltered_files = UtilityBox.get_all_files(root_folder, *extensions)\n for file_path in unfiltered_files:\n if (not strict and any(kw.lower() in file_path.lower() for kw in keywords))\\\n or (strict and all(kw.lower() in file_path.lower() for kw in keywords)):\n result.append(file_path)\n return result", "title": "" }, { "docid": "9073547ed295dc0f959d4a012820c136", "score": "0.6249226", "text": "def get_paths_by_ext(directory, exts=['JPEG']):\n if not directory.endswith('/'):\n directory += '/'\n\n img_paths = []\n for ext in exts:\n img_paths = img_paths + glob(os.path.join(directory) + '**/*.' + ext, recursive=True)\n return sorted(img_paths)", "title": "" }, { "docid": "e54f73d0236ebe7aad1f4c86f4fca73f", "score": "0.624537", "text": "def get_file_info(d, extensions):\n for key in extensions:\n for f in os.listdir(d):\n if f.endswith(key):\n pass", "title": "" }, { "docid": "ff03cf7eb8834954b337babf077d85a7", "score": "0.622903", "text": "def get_related_files(filename):\n related_files = []\n if filename.endswith(\".img\") or filename.endswith(\".hdr\"):\n path, name, ext = split_filename(filename)\n for ext in ['.hdr', '.img', '.mat']:\n related_files.append(os.path.join(path, name + ext))\n elif filename.endswith(\".BRIK\") or filename.endswith(\".HEAD\"):\n path, name, ext = split_filename(filename)\n for ext in ['.BRIK', '.HEAD']:\n related_files.append(os.path.join(path, name + ext))\n if not len(related_files):\n related_files = [filename]\n return related_files", "title": "" }, { "docid": "73a2dceed81f4f2da1fc9fb3e5c52545", "score": "0.62266505", "text": "def grab_names():\n\n output = []\n original = os.listdir()\n \n for filename in original:\n if \".\" in filename:\n continue\n else:\n output.append(filename)\n\n return output", "title": "" }, { "docid": "d767db9cf7334383a8d7456127fc741b", "score": "0.6209032", "text": "def get_all_files(dir, ext):\n extension_suffix = f\".{ext}\"\n files = []\n for file in os.listdir(dir):\n if file.endswith(extension_suffix):\n files.append(os.path.join(dir, file))\n return files", "title": "" }, { "docid": "e47ab99363540050623a47b24d81fc9a", "score": "0.62019986", "text": "def load_images_without_extention(path):\n images = []\n valid_images = [\".jpg\", \".png\", \".jpeg\"]\n for f in os.listdir(path):\n file, ext = os.path.splitext(f)\n if ext.lower() not in valid_images:\n continue\n images.append(file)\n return images", "title": "" }, { "docid": "7eb177ef54ee82e41b2fb88a46dfe5bf", "score": "0.6201061", "text": "def read_directory(extensions):\n for file in os.listdir('.'):\n filenames = file.split('.')\n if os.path.isdir(file):\n continue\n if filenames[1] not in extensions:\n extensions.append(filenames[1])\n print(extensions)", "title": "" }, { "docid": "8755751c0c0eede4b772f14be8436f93", "score": "0.6200504", "text": "def get_filenames(format: str = 'json') -> List[str]:\n filenames = []\n for type in get_filetypes(format):\n filenames.extend(glob.glob(f'*.{type}'))\n return filenames", "title": "" }, { "docid": "2cd5d5d20c08c7a82fb7c096ac33b951", "score": "0.6196302", "text": "def get_files(directory):\n files = []\n for item in os.listdir(directory):\n item = os.path.join(directory, item)\n if not os.path.isfile(item):\n continue\n parts = os.path.splitext(item)\n if parts and len(parts) > 1 and parts[1] == '.py'\\\n and parts[0].lower().find(\"test\") == -1:\n files.append(item)\n return files", "title": "" }, { "docid": "fceba3ab8b472f6765e4facaea1c5bee", "score": "0.61914957", "text": "def _collect_files(self, path, extensions):\n path_list = []\n for dirpath, _, filenames in os.walk(path):\n path_list.extend(\n [\n os.path.join(dirpath, filename)\n for filename in filenames\n if os.path.splitext(filename)[1].lower() in extensions\n ]\n )\n return path_list", "title": "" }, { "docid": "feecc2c670c46c78eacd50b912e03f26", "score": "0.61866087", "text": "def filter_files(pattern, file_names):\n p = re.compile(pattern)\n return [p.match(f) for f in file_names if p.match(f) is not None]", "title": "" }, { "docid": "f73372929bb991939b97752d0da98eda", "score": "0.6159966", "text": "def get_files(dir, suffix):\n files = []\n for file in os.listdir(dir):\n if file.endswith(suffix):\n files.append(file)\n return files", "title": "" }, { "docid": "f73372929bb991939b97752d0da98eda", "score": "0.6159966", "text": "def get_files(dir, suffix):\n files = []\n for file in os.listdir(dir):\n if file.endswith(suffix):\n files.append(file)\n return files", "title": "" }, { "docid": "ddf7989ea21b31799113ffb7259206cc", "score": "0.6147999", "text": "def stringsWithImageFileExtensions(listOfStrings):\r\n\r\n #todo: string comparison should ignore case\r\n result = []\r\n for s in listOfStrings:\r\n if (s.find('.tif') != -1) or\\\r\n (s.find('.bmp') != -1) or\\\r\n (s.find('.pgm') != -1) or\\\r\n (s.find('.gif') != -1) or\\\r\n (s.find('.png') != -1):\r\n result.append(s)\r\n return result", "title": "" }, { "docid": "9ea904a64b978053bd910323a78d3c9e", "score": "0.6144758", "text": "def _get_c_files(srcfiles, extensions=False):\n files_out = []\n for srcfile in srcfiles:\n ext = os.path.splitext(srcfile)[1]\n if ext.lower() in [\".c\", \".cpp\"]:\n if extensions:\n if ext not in files_out:\n files_out.append(ext)\n else:\n files_out.append(srcfile)\n if len(files_out) < 1:\n files_out = None\n return files_out", "title": "" }, { "docid": "00a19e8793e3621beb428a0710c1a5af", "score": "0.6140286", "text": "def get_file_list(path, extensions):\n if not os.path.isdir(path):\n log_error_exit('Directory invalid.')\n else:\n files_list = []\n # The FITS files are named in numerically ascending order with an optional\n for file in os.listdir(path):\n if file.split('.')[-1] in extensions:\n files_list.append(os.path.join(path, file))\n if len(files_list) == 0:\n log_error_exit('No files with given extensins found !')\n return sorted(files_list)", "title": "" }, { "docid": "c7430a703d144531116f0ef4a00b716f", "score": "0.6116649", "text": "def get_files_with_suffix(dir_name, suffixes):\n import os\n found_filenames = []\n for localpath, directories, filenames in os.walk(dir_name):\n for filename in filenames:\n for suffix in suffixes:\n if filename.endswith(suffix):\n found_filenames.append(os.path.join(localpath, filename))\n found_filenames.sort()\n return found_filenames", "title": "" }, { "docid": "8227919c4a92055b6e0f0aae097b9067", "score": "0.6115884", "text": "def _splitext(fname):\n dir, filename = os.path.split(fname)\n for special_ext in ['.nii.gz', '.tar.gz']:\n if filename.endswith(special_ext):\n stem, ext = filename[:-len(special_ext)], special_ext\n return os.path.join(dir, stem), ext\n # If no special case, behaves like the regular splitext\n stem, ext = os.path.splitext(filename)\n return os.path.join(dir, stem), ext", "title": "" }, { "docid": "c98eb8adabba7af1cf0a28c40a232498", "score": "0.6114838", "text": "def listfiles(mypath, mytype):\n result = []\n # Match Extension.\n for foundfile in Path(mypath).glob(\"**/*.*\"):\n filetype = str(foundfile).split(\".\")[-1]\n if filetype in mytype:\n # Force string type was Posix(path)\n result.append(str(foundfile))\n return result", "title": "" }, { "docid": "73d0dc0b8cca82a9ec101f45f6470ac3", "score": "0.61128", "text": "def remove_all_file_extensions(filepath: Path, extensions: list = None):\n\n def _remove_next_suffix(f):\n if extensions is not None and len(extensions) > 0:\n return f.suffix in extensions\n return len(f.suffix) > 0\n\n while _remove_next_suffix(filepath):\n filepath = filepath.with_suffix(\"\")\n\n return filepath", "title": "" }, { "docid": "81288495451141c1b16e070d8a7535db", "score": "0.61059415", "text": "def list_all_extension_files(directory_path, extension='.txt'):\n files_paths = []\n for r, d, f in os.walk(directory_path):\n f = [os.path.join(r,file) for file in f if file.find(extension) != -1]\n files_paths.append(f)\n\n files_paths = list(np.hstack(files_paths))\n files_paths = [tuple((in_file, in_file.replace(\"text_volumes\",\"speeches\").replace(\".txt\",\".csv\"))) for in_file in files_paths]\n\n files_paths = [tuple((in_file,out_file)) for in_file, out_file in files_paths if not os.path.exists(out_file)]\n return files_paths", "title": "" } ]
dfc4c0b6369e884b99877afc55bfd359
Build a Cypher query based on given parameters.
[ { "docid": "d75bc1828ce83ede1c9f954653f24192", "score": "0.5487266", "text": "def _build_dependencies_query(\n cls, team_id, topic, label, node, filter_on_config=False, impacted=False\n ):\n where = \"\"\n order = \"(n)<-[r]-(m)\" if impacted else \"(n)-[r]->(m)\"\n query = (\n \"MATCH(n:{topic}_{label}{{name: '{name}'}}) \"\n \"OPTIONAL MATCH {order} {where}\"\n \"RETURN n,r,m ORDER BY m.name LIMIT 10\"\n )\n\n # Filter the dependencies using the labels declared in the configuration\n if filter_on_config:\n label_config = ConfigController.get_label_config(team_id, label)\n regex = r\"^.*(\\[[A-Za-z]+(, [A-Za-z]+)*?\\])$\"\n\n match = re.search(regex, label_config[\"qos\"])\n if match:\n deps = match.group(1)[1:-1].split(\", \")\n where += \"WHERE '{}_{}' IN LABELS(m) \".format(topic, deps[0])\n\n for dep in deps[1:]:\n where += \"OR '{}_{}' IN LABELS(m) \".format(topic, dep)\n\n return query.format(\n where=where, order=order, topic=topic, label=label, name=node\n )", "title": "" } ]
[ { "docid": "72ebf75d0424beb0d9398dd9dcc5f821", "score": "0.7439297", "text": "def build_query(self, params):\n pass", "title": "" }, { "docid": "a95e410d68282d18b99170eb9c871fae", "score": "0.70802385", "text": "def _build_query(self, params, boolean='AND'):\n query = []\n for p in params:\n if isinstance(p, str):\n # direct query string\n query.append(p)\n\n elif isinstance(p, tuple) and len(p) == 3:\n # <field> <op> <value> triplet\n (field, op, value) = p\n op_ = '-' if op == '!=' else ''\n\n if isinstance(value, list):\n value_ = '(\"' + '\",\"'.join(value) + '\")'\n elif isinstance(value, tuple):\n value_ = '[%s TO %s]' % value\n else:\n quote = '\"' if not '*' in str(value) else ''\n value_ = quote + str(value) + quote\n\n query.append('%s%s:%s' % (op_, field, value_))\n\n elif isinstance(p, tuple) and len(p) == 2:\n # group/subquery with boolean operator\n (op, subquery) = p\n query.append('(' + self._build_query(subquery, op) + ')')\n\n return (' '+boolean+' ').join(query)", "title": "" }, { "docid": "2c96f74b26acea483825aadcbb969353", "score": "0.6558855", "text": "def build_query(self, params):\n phrase = params[\"phrase\"]\n slop = params[\"slop\"]\n\n query = {}\n query[\"query\"] = {\"match\":{}}\n\n infl_form = {}\n infl_form[\"type\"] = \"phrase\"\n infl_form[\"query\"] = phrase\n infl_form[\"slop\"] = slop\n query[\"query\"][\"match\"][\"inflected_form\"] = infl_form\n\n query = json.dumps(query)\n return query", "title": "" }, { "docid": "2a8397a2a8e9edc0608094386c96404c", "score": "0.653392", "text": "def _build_query_string(cls, parameters: dict) -> str:\n\t\tquery_string = '?'\n\n\t\t# Builds a list of tuples for easy iteration like:\n\t\t# [('channel', 'channel-1'), ('channel', 'channel-2'), ('assetId', 'asset-1'), ...]\n\t\tparameter_tuples = [(parameter_name, parameter_value) \n\t\t\tfor parameter_name, parameter_values in parameters.items()\n\t\t\t\tfor parameter_value in parameter_values]\n\n\t\tfor index, parameter_tuple in enumerate(parameter_tuples):\n\t\t\tname, value = parameter_tuple\n\t\t\tvalue = quote(value.encode()) if type(value) == str else value\n\n\t\t\tif name == 'orderBy':\n\t\t\t\tvalue = cls._convert_order_by(value)\n\n\t\t\tquery_string += f'{name}={value}' if index == 0 else f'&{name}={value}'\n\n\t\treturn query_string", "title": "" }, { "docid": "0a18169ab89094b2a6bab17605534b4f", "score": "0.6477496", "text": "def _build_query(self, *args, **kwargs):\n raise NotImplementedError", "title": "" }, { "docid": "76313a0e15cad414416780ca9e335fb0", "score": "0.6379244", "text": "def _build_search(query, params):\n #PLACEHOLDER -- only searches the objects table and does the simplest\n #search possible for now\n query_terms = params.copy() if( params and params != {} ) else _translate_query(query)\n arguments = _args()\n keywords = \"\"\n query_string = \"\"\n\n if 'query_string' in query_terms:\n query_string = query_terms['query_string']\n arguments.update({'query_string':query_string})\n del query_terms['query_string']\n if 'keywords' in query_terms:\n arguments.update({'keywords':query_terms['keywords']})\n del query_terms['keywords']\n # TODO: need to do all the query string processing here \n # but, for now just a simple search assumption will work by default\n\n if query_string != \"\":\n keywords += query_string\n \n return \"ca_objects?q=\"+keywords, arguments", "title": "" }, { "docid": "c157292ed4cc40e351d63d9d0af82576", "score": "0.6359702", "text": "def generate_query(self):\n return ''.join(['SELECT ',\n ', '.join(self.columns),\n ' ',\n 'FROM molecule ',\n ' '.join(self.joins),\n ' ',\n 'WHERE ',\n '(', ') AND ('.join(self.wheres), ')'])", "title": "" }, { "docid": "cba63cfec2c1ae532de6e05ad75c5ffe", "score": "0.6332386", "text": "def _set_sql_query(self, query_params):\n query = f'SELECT * FROM {CATS_TABLE}'\n # attributes in query string\n if query_params.get('attribute'):\n query += ' ORDER BY '\n # may be 1>= attrs\n for attr in query_params['attribute']:\n query += f'{attr}, '\n query = query.rstrip(', ')\n\n # order in query string\n if query_params.get('order'):\n order = query_params['order'][0].upper()\n query += f' {order}'\n\n # offset in query string\n if query_params.get('offset'):\n offset = query_params['offset'][0]\n query += f' OFFSET {offset}'\n\n # limit in query string\n if query_params.get('limit'):\n limit = query_params['limit'][0]\n query += f' LIMIT {limit}'\n\n return query", "title": "" }, { "docid": "6aafa03146dc86896e8b4203a577f391", "score": "0.6295957", "text": "def make_query(self, **kw):\n query = kw.pop(\"query\", {})\n\n query.update(self.get_request_query())\n query.update(self.get_custom_query())\n query.update(self.get_keyword_query(**kw))\n\n sort_on, sort_order = self.get_sort_spec()\n if sort_on and \"sort_on\" not in query:\n query.update({\"sort_on\": sort_on})\n if sort_order and \"sort_order\" not in query:\n query.update({\"sort_order\": sort_order})\n\n logger.info(\"make_query:: query={} | catalog={}\".format(\n query, self.catalog))\n\n return query", "title": "" }, { "docid": "1216cd9552ca67c038d89c1a33795cb6", "score": "0.6221052", "text": "def build_query(self):\n\n # initalize config\n self.config.update_config()\n\n # get the base query from the config\n query = self.config.get_base_query()\n return query", "title": "" }, { "docid": "d11b2046780debe8f975dcb0277e234c", "score": "0.62166256", "text": "def buildQuery(self):\n if not self.Valid:\n print(\"Invalid usage: cannot build.\")\n return False\n if not self.needsBuilt:\n return self.queryString\n\n filterDict = dict()\n theFilters = dict()\n if len(self._andFilters) > 0:\n theFilters[\"and\"] = self._andFilters\n if len(self._orFilters) > 0:\n theFilters[\"or\"] = self._orFilters\n filterDict[\"filter\"] = theFilters\n queryString = json.dumps(filterDict)\n\n self.queryString = queryString\n return queryString", "title": "" }, { "docid": "bb15855e6fd47d6e9d3a76d89a8b5ace", "score": "0.6071394", "text": "def _build_query(symbol: str, start_date: str, end_date: str, order_by: str):\n query = \"select * from stock_price where symbol = '{symbol}'\".format(symbol=symbol)\n if start_date:\n query += \" and time >= '{}'\".format(start_date)\n if end_date:\n query += \" and time <= '{}'\".format(end_date)\n\n if order_by == 'ASC':\n # Default\n pass\n elif order_by == 'DESC':\n query += ' order by time desc'\n\n return query", "title": "" }, { "docid": "d46b3852c7f93eb9f1037b46961e8edb", "score": "0.60509795", "text": "def query_generator(self, kwargs):\n\n query_addition = \"\"\n\n if \"quarter\" in kwargs:\n query_addition += f\" & qtr == {kwargs['quarter']}\"\n if \"down\" in kwargs:\n query_addition += f\" & down == {kwargs['down']}\"\n if \"half\" in kwargs:\n query_addition += f\" & game_half == 'Half{kwargs['half']}'\"\n if \"close\" in kwargs:\n if kwargs['close'] is True:\n query_addition += f\" & score_differential >= -12 & score_differential <= 12\"\n else:\n query_addition += f\" & (score_differential < -12 | score_differential > 12)\"\n if \"rz\" in kwargs:\n if kwargs['rz'] is True:\n query_addition += f\" & yardline_100 <= 25\"\n else:\n query_addition += f\" & yardline_100 > 25\"\n if \"where\" in kwargs:\n if kwargs['where'] is 'home':\n query_addition += f\" & home_team == '{self.team}'\"\n else:\n query_addition += f\" & away_team == '{self.team}'\"\n if \"play\" in kwargs:\n if kwargs['play'] is 'run':\n query_addition += f\" & rush_attempt == 1\"\n else:\n query_addition += f\" & pass_attempt == 1\"\n if \"since\" in kwargs:\n if kwargs['since'] is 'sep':\n query_addition += f\" & (game_date.str.slice(5, 7) == '09' | game_date.str.slice(5, 7) == '10' | game_date.str.slice(5, 7) == '11' | game_date.str.slice(5, 7) == '12')\"\n elif kwargs['since'] is 'oct':\n query_addition += f\" & (game_date.str.slice(5, 7) == '10' | game_date.str.slice(5, 7) == '11' | game_date.str.slice(5, 7) == '12')\"\n elif kwargs['since'] is 'nov':\n query_addition += f\" & (game_date.str.slice(5, 7) == '11' | game_date.str.slice(5, 7) == '12')\"\n else:\n query_addition += f\" & game_date.str.slice(5, 7) == '12'\"\n\n return query_addition", "title": "" }, { "docid": "c570052933c609ad72ebabe6e69a8bfd", "score": "0.598974", "text": "def data_query(self, parents=None):\n filter_q = self.filter_query(parents=parents)\n q = db.session.query()\n stmt, q = self._add_statement(q)\n\n filter_sq = filter_q.subquery()\n q = q.filter(stmt.subject == filter_sq.c.subject)\n q = q.filter(stmt._attribute.in_(self.project()))\n\n q = q.add_column(stmt.subject.label('id'))\n q = q.add_column(stmt._attribute.label('attribute'))\n q = q.add_column(stmt._value.label('value'))\n\n if parents is not None and self.node.attribute:\n parent_stmt, q = self._add_statement(q)\n q = q.filter(stmt.subject == parent_stmt._value)\n q = q.filter(parent_stmt._attribute == self.node.attribute.name)\n q = q.add_column(parent_stmt.subject.label('parent_id'))\n\n q = q.order_by(filter_sq.c.subject.desc())\n q = q.order_by(stmt.created_at.asc())\n return q", "title": "" }, { "docid": "dd1bf23e219921f97adc765b4e477518", "score": "0.5976437", "text": "def make_query(columns, filter):\n if columns is not None:\n # the user only wants to report a subset of the columns\n query = \"SELECT \" + columns + \" FROM variants\"\n# elif cancers != 'none':\n# query = \"SELECT \" + columns + \",civic_gene_abbreviations,cgi_gene_abbreviations FROM variants\"\n else:\n # report the kitchen sink\n query = \"SELECT * FROM variants\"\n if filter is not None:\n # add any non-genotype column limits to the where clause\n query += \" WHERE \" + filter\n return query", "title": "" }, { "docid": "3662e9f0259cba0673eaafff569ccaaa", "score": "0.59676003", "text": "def make_query(request, **kw):\n\n # build the catalog query\n query = {\n \"sort_limit\": get_sort_limit(request),\n \"sort_on\": get_sort_on(request),\n \"sort_order\": get_sort_order(request),\n \"SearchableText\": get_query(request),\n }\n\n # inject keyword args\n query.update(kw)\n\n # inject the creator if given\n if get_creator(request):\n query[\"Creator\"] = get_creator(request)\n\n logger.info(\"Catalog Query --> %r\", query)\n return query", "title": "" }, { "docid": "a0196bcfee0829e1a676506cb8f483df", "score": "0.59151345", "text": "def query_from_params(self, params):\n q = {}\n start, rows = 0, 10\n if \"start\" in params:\n start = int(params[\"start\"])\n if \"rows\" in params:\n rows = int(params[\"rows\"])\n\n if \"lat\" in params and \"lon\" in params:\n lat = float(params[\"lat\"])\n lon = float(params[\"lon\"])\n q[\"loc\"] = {\n \"$near\": [lat, lon]\n }\n for atr in [\"city\", \"state\", \"country\"]:\n if atr in params:\n q[atr] = params[atr]\n return self.query(start=start, rows=rows, **q)", "title": "" }, { "docid": "324d683b973caa2910387cddae4780e5", "score": "0.5909418", "text": "def _create_query_params(self, kwargs):\n if None == kwargs or 0 == len(kwargs):\n return (\"\", [])\n\n if not isinstance(kwargs, basestring):\n pairs = kwargs.items()\n stmt = \" WHERE \" + \\\n \" AND \".join([ x[0] + (None is x[1] and \" IS NULL\" or \" = ?\")\n for x in pairs ])\n\n args = [x[1] for x in filter(lambda x: None is not x[1], pairs)]\n else:\n stmt = \" WHERE \" + kwargs\n args = []\n return (stmt, args)", "title": "" }, { "docid": "49409b5c49174457e063fc4a1f2caf74", "score": "0.5909326", "text": "def build_query(cols=None, tables=None, group_by=None, order_by=None, **kwargs):\n assert len(tables)\n\n # Create SELECT line\n cols_str = \", \".join(cols) if cols else \"*\"\n query = f'SELECT {cols_str} FROM '\n\n # Create FROM line\n # Automatically add bracelets around strings with space inside them. Useful for nested queries.\n query += ' NATURAL JOIN '.join(f'({table})' if ' ' in table else table for table in tables)\n\n # Apply all filters in the kwargs dict\n constraints = []\n for col_name, value in kwargs.items():\n # Empty filters don't need to be applied\n if value not in (None, ''):\n # Use \"LIKE\" for strings, and \"==\" otherwise\n if isinstance(value, str):\n constraints.append(f'{col_name} LIKE \"{value}\"')\n else:\n constraints.append(f'{col_name} == {value}')\n\n # Create WHERE line if there are constraints\n if constraints:\n query += ' WHERE ' + ' AND '.join(constraints)\n\n # Create GROUP BY line if needed\n if group_by:\n query += ' GROUP BY ' + group_by\n\n # Create ORDER BY line if needed\n if order_by:\n query += ' ORDER BY ' + order_by\n\n return query", "title": "" }, { "docid": "1fb4cc87b5249fb99610ec14d3f62cf8", "score": "0.58879614", "text": "def GenerateQuery(unused_ref, args, request):\n customer_id = ConvertOrgIdToObfuscatedCustomerId(args.organization)\n labels = FilterLabels(args.labels)\n labels_str = ','.join(labels)\n request.query = 'parent==\\\"customerId/{0}\\\" && \\\"{1}\\\" in labels'.format(\n customer_id, labels_str)\n\n return request", "title": "" }, { "docid": "ff8ba2a0ee0c22e8f3b0dc0581901893", "score": "0.5856376", "text": "def make_query(self, ns):\n if issubclass(self.model_class, db.Model):\n query = db.Query(self.model_class, namespace=ns)\n for f in self.filters:\n query.filter(\"%s %s\" % (f[0], f[1]), f[2])\n else:\n query = self.model_class.query(namespace=ns)\n for f in self.filters:\n query = query.filter(ndb.FilterNode(*f))\n return query", "title": "" }, { "docid": "a5aeac92141db6643f5dae341690653e", "score": "0.5830022", "text": "def build_query(\n self,\n regions=None,\n genes=None,\n effect_types=None,\n family_ids=None,\n person_ids=None,\n inheritance=None,\n roles=None,\n sexes=None,\n variant_type=None,\n real_attr_filter=None,\n ultra_rare=None,\n frequency_filter=None,\n return_reference=None,\n return_unknown=None,\n limit=None,\n pedigree_fields=None):\n # pylint: disable=too-many-arguments\n self.query_builder.reset_product()\n\n self.query_builder.build_select()\n\n self.query_builder.build_from()\n\n self.query_builder.build_join()\n\n self.query_builder.build_where(\n regions=regions,\n genes=genes,\n effect_types=effect_types,\n family_ids=family_ids,\n person_ids=person_ids,\n inheritance=inheritance,\n roles=roles,\n sexes=sexes,\n variant_type=variant_type,\n real_attr_filter=real_attr_filter,\n ultra_rare=ultra_rare,\n frequency_filter=frequency_filter,\n return_reference=return_reference,\n return_unknown=return_unknown,\n pedigree_fields=pedigree_fields,\n )\n\n self.query_builder.build_group_by()\n self.query_builder.build_limit(limit)", "title": "" }, { "docid": "9bf3e320668377af9f499baa5b818919", "score": "0.5816653", "text": "def make_query(config):\n\n query = {}\n\n items = ['object', 'radius', 'max_out', 'source', 'output', 'mime', 'sort']\n\n for item in items\n query[item] = str(config[item])\n\n return query", "title": "" }, { "docid": "2159f3098a64c4db73cdb716ccac6fd2", "score": "0.57821774", "text": "def make_where_statement(self, table_name, params):\n if not params:\n return (\" 1 \", ())\n column_names = [row[1] for row in self.get_rows(\"PRAGMA table_info(%s)\" % table_name)]\n for column in params.keys():\n if column not in column_names:\n raise KeyError(\"Error: Database column %s not in our database\" % column)\n # Construct our SQL statement\n where_statement = ' AND '.join(['\"%s\" = ?' % column for (column, value) in sorted(params.items())])\n where_values = tuple([value for (column, value) in sorted(params.items())])\n return (where_statement, where_values)", "title": "" }, { "docid": "dd14224ced22e5c08b3414302d16b36f", "score": "0.5775194", "text": "def build_query(self) -> Query:\n logger.info(\n {\n \"message\": f\"Start building query for resource {self.analysis.definition_id}\",\n \"resource_id\": self.analysis.resource_id,\n },\n )\n\n # We don't need to have columns duplicated in the dataframe\n # so we keep the one we've already seen here.\n # Note that the primary key column is added at the query initialization.\n self._cur_query_columns = {self.analysis.primary_key_column}\n # To avoid duplicated joins, we keep them here\n self._cur_query_join_tables = {}\n\n query = self.session.query(self.get_column(self.analysis.primary_key_column, self._sqlalchemy_pk_table))\n\n # Add attributes to query\n for attribute in self.analysis.attributes:\n query = self.add_attribute_to_query(query, attribute)\n\n # Add filters to query\n query = self.apply_filters(query)\n\n logger.info(\n {\n \"message\": f\"Built query for resource {self.analysis.definition_id}: {query.statement}\",\n \"resource_id\": self.analysis.resource_id,\n },\n )\n return query", "title": "" }, { "docid": "deb662581d7042718da96ca7baef1b0e", "score": "0.5773945", "text": "def query(self):\n if 'order' in self.config['database']:\n order = 'ORDER BY {order}'.format(**self.config['database'])\n else:\n order = ''\n\n if isinstance(self.cols, tuple):\n cols = self.cols\n else:\n cols = (self.cols,)\n\n where, params = parse_queries(self.selection, self.mapping)\n if where:\n where = 'WHERE {conditions}'.format(conditions=' AND '.join(where))\n else:\n where = ''\n limit = (self.slice[0].stop or sys.maxint) - (self.slice[0].start or 0)\n\n sql = \"\"\"\n SELECT {cols} FROM {table}\n {where}\n {order}\n LIMIT {limit}\n OFFSET {offset}\n \"\"\".format(\n cols=', '.join(self.config[key]['col'] for key in cols),\n table=self.config['database']['table'],\n where=where, order=order, limit=limit,\n offset=self.slice[0].start or 0)\n\n return sql, params", "title": "" }, { "docid": "86f87622ad6a714500a7b6ccb0df8944", "score": "0.5773749", "text": "def sql_with_params(self):\r\n return self.get_compiler(DEFAULT_DB_ALIAS).as_sql()", "title": "" }, { "docid": "a12abdcbed66b2452ab47d4061b8ce7d", "score": "0.5761608", "text": "def build_query(self, query_dict):\n\n # Loop over keys in request dictionary, match them up to database fields\n # Build query accounting for the exceptions\n qry_sci_name = ''\n qry_common_name = ''\n qry_group = ''\n qry_native = ''\n qry_pollut_tol = ''\n qry_rarity = ''\n qry_range = ''\n qry_max_age = ''\n qry_mean_weight = ''\n qry_mean_length = ''\n qry_max_length = ''\n qry_habitat = ''\n qry_ubiquity = ''\n qry_extent = ''\n qry_tolerance = ''\n qry_robustness = ''\n\n #Used in a couple of places to remove a trailing 'or '\n trailing_or = ' or'\n trailing_and = ' and'\n\n #This will be used later for habitat related fields that contain 1 for true. E.g. caves=1\n set_keys = set(query_dict.keys())\n\n for req_key, req_val in query_dict.items():\n # Is request param a valid query param\n if req_key in self.attrib:\n\n if (req_key.lower() == 'scientific_name'):\n words = req_val.split('_')\n if len(words) == 1:\n qry_sci_name = (\"( genus LIKE INITCAP('%%{0}%%') or species LIKE LOWER('%%{0}%%'))\")\n qry_sci_name = str.format(qry_sci_name, words[0])\n elif len(words) == 2:\n qry_sci_name = (\" ((genus LIKE INITCAP('%%{0}%%') and species LIKE LOWER('%%{1}%%')) or (species LIKE LOWER('%%{0}%%') and genus LIKE INITCAP('%%{1}%%')))\")\n qry_sci_name = str.format(qry_sci_name, words[0], words[1])\n\n if req_key.lower() == 'common_name':\n words = req_val.split('_')\n if len(words) == 1:\n qry_common_name = (\" commonname LIKE INITCAP('%%{0}%%')\")\n qry_common_name = str.format(qry_common_name, words[0])\n elif len(words) == 2:\n qry_common_name = (\" (commonname LIKE INITCAP('%%{0}%%') and commonname LIKE INITCAP('%%{1}%%'))\")\n qry_common_name = str.format(qry_common_name, words[0], words[1])\n elif len(words) == 3:\n qry_common_name = (\" (commonname LIKE INITCAP('%%{0}%%') and commonname LIKE INITCAP('%%{1}%%') and commonname LIKE INITCAP('%%{2}%%'))\")\n qry_common_name = str.format(qry_common_name, words[0], words[1], words[2])\n\n\n # Can be multiple groups\n #e.g. grp='Black Bass' or grp='Mullet'\n if (req_key.lower() == 'group'):\n words = req_val.lower()\n words = words.replace('_', ' ')\n words = words.split(',')\n for idx, grp in enumerate(words):\n if idx != 0:\n qry_group += \" or \"\n qry_group += str.format(\" lower(grp) = '{0}'\", grp.lower())\n continue\n\n if (req_key.lower() == 'native'):\n qry_native = str.format(\" native='{0}'\",req_val)\n continue\n\n # pollution tolerance can be: \"I\", \"T\", \"M\", or \"U\"\n # if (req_key.lower() == 'pollut_tol'):\n # if (req_key.lower() == 'tolerance'):\n # words = req_val.lower()\n # words = words.split(',')\n # for idx, pollut in enumerate(words):\n # if idx != 0:\n # qry_pollut_tol += \" or \"\n # qry_pollut_tol += str.format(\" lower(pollut_tol) = '{0}'\", pollut)\n # continue\n\n if (req_key.lower() == 'rarity'):\n range = req_val.split('_')\n if len(range) == 2:\n qry_rarity = str.format(\" rarity>={0} and rarity<={1}\", range[0], range[1])\n continue\n\n if (req_key.lower() == 'max_age'):\n range = req_val.split('_')\n if len(range) == 2:\n qry_max_age = str.format(\" max_age>={0} and max_age<={1}\", range[0], range[1])\n continue\n\n if (req_key.lower() == 'mean_weight'):\n range = req_val.split('_')\n if len(range) == 2:\n qry_mean_weight = str.format(\" mean_weight>={0} and mean_weight<={1}\", range[0], range[1])\n continue\n\n if (req_key.lower() == 'mean_length'):\n range = req_val.split('_')\n if len(range) == 2:\n qry_mean_length = str.format(\" mean_length>={0} and mean_length<={1}\", range[0], range[1])\n continue\n\n if (req_key.lower() == 'max_length'):\n range = req_val.split('_')\n if len(range) == 2:\n qry_max_length = str.format(\" max_length>={0} and max_length<={1}\", range[0], range[1])\n continue\n\n ubiquity_range = [0, 75]\n if (req_key.lower() == 'ubiquity'):\n range = req_val.split('_')\n if len(range) == 2 and (int(range[0]) != ubiquity_range[0] or int(range[1]) != ubiquity_range[1]):\n qry_ubiquity = str.format(\" ubiquity>={0} and ubiquity<={1}\", range[0], range[1])\n continue\n\n extent_range = [0, 77]\n if (req_key.lower() == 'extent'):\n range = req_val.split('_')\n if len(range) == 2 and (int(range[0]) != extent_range[0] or int(range[1]) != extent_range[1]):\n qry_extent = str.format(\" extent>={0} and extent<={1}\", range[0], range[1])\n continue\n\n tolerance_range = [0, 94]\n if (req_key.lower() == 'tolerance'):\n range = req_val.split('_')\n if len(range) == 2 and (int(range[0]) != tolerance_range[0] or int(range[1]) != tolerance_range[1]):\n qry_tolerance = str.format(\" tolerance>={0} and tolerance<={1}\", range[0], range[1])\n continue\n\n robustness_range = [0, 47]\n if (req_key.lower() == 'robustness'):\n range = req_val.split('_')\n if len(range) == 2 and (int(range[0]) != robustness_range[0] or int(range[1]) != robustness_range[1]):\n qry_robustness = str.format(\" robustness>={0} and robustness<={1}\", range[0], range[1])\n continue\n\n if qry_habitat.endswith(trailing_and):\n qry_habitat = qry_habitat[:-len(trailing_and)]\n\n # The rest of the query parameters are are used if value == 1\n # e.g. caves=1, springs=1, headwaters=1\n for cat in self.categories:\n intersect = set_keys & self.categories[cat]\n if len(intersect) > 0:\n print(cat)\n print(intersect)\n for param in intersect:\n val = query_dict[param]\n if (val == '1'):\n self.query_categories[cat].append(param)\n\n qry_habitat = ''\n query_cat = ''\n for category in self.query_categories:\n query_cat = ''\n for val in self.query_categories[category]:\n query_cat += str.format(\" {0}='1' or\", val)\n\n if query_cat.endswith(trailing_or):\n query_cat = query_cat[:-len(trailing_or)]\n\n if len(query_cat) > 0:\n qry_habitat += ' (' + query_cat + ') and'\n print(qry_habitat)\n\n if qry_habitat.endswith(trailing_and):\n qry_habitat = qry_habitat[:-len(trailing_and)]\n\n query = \"select * from fishproperties where\"\n first_condition = True\n\n if qry_sci_name != '':\n query += qry_sci_name\n first_condition = False\n\n if qry_common_name != '':\n if not first_condition:\n query += ' and'\n query += qry_common_name\n first_condition = False\n\n if qry_group != '':\n if not first_condition:\n query += ' and'\n query += qry_group\n first_condition = False\n\n if qry_native != '':\n if not first_condition:\n query += ' and'\n query += qry_native\n first_condition = False\n\n if qry_pollut_tol != '':\n if not first_condition:\n query += ' and'\n query += qry_pollut_tol\n first_condition = False\n\n if qry_rarity != '':\n if not first_condition:\n query += ' and'\n query += qry_rarity\n first_condition = False\n\n if qry_range != '':\n if not first_condition:\n query += ' and'\n query += qry_range\n first_condition = False\n\n if qry_max_age != '':\n if not first_condition:\n query += ' and'\n query += qry_max_age\n first_condition = False\n\n if qry_mean_weight != '':\n if not first_condition:\n query += ' and'\n query += qry_mean_weight\n first_condition = False\n\n if qry_mean_length != '':\n if not first_condition:\n query += ' and'\n query += qry_mean_length\n first_condition = False\n\n if qry_max_length != '':\n if not first_condition:\n query += ' and'\n query += qry_max_length\n first_condition = False\n\n if qry_ubiquity != '':\n if not first_condition:\n query += ' and'\n query += qry_ubiquity\n first_condition = False\n\n if qry_extent != '':\n if not first_condition:\n query += ' and'\n query += qry_extent\n first_condition = False\n\n if qry_tolerance != '':\n if not first_condition:\n query += ' and'\n query += qry_tolerance\n first_condition = False\n\n if qry_robustness != '':\n if not first_condition:\n query += ' and'\n query += qry_robustness\n first_condition = False\n\n if qry_habitat != '':\n if not first_condition:\n query += ' and'\n query += qry_habitat\n first_condition = False\n\n print(query)\n\n return query", "title": "" }, { "docid": "c45b3d40d8a26bb7193088e942d5a655", "score": "0.5736782", "text": "def _construct_query(self, query_chain):\n # here we take the query_chain and convert to a real sql sentence\n res_dict = query_chain.pop(0)\n\n for q in query_chain:\n if q[\"action\"] == \"_db__where\":\n if res_dict[\"action\"] == \"_db__select_all\":\n res_dict.update({\"action\": \"_db__select\"})\n\n condition = res_dict.get(\"condition\", \"\")\n if condition:\n condition = \" AND \".join([condition, q[\"condition\"]])\n else:\n condition = q[\"condition\"]\n\n res_dict.update({\"condition\": condition})\n elif q[\"action\"] == \"_db__select_related\":\n for model_join in q[\"fields\"]:\n join_const = getattr(self, q[\"action\"]).format(**model_join)\n res_dict[\"join\"] += join_const\n\n select = res_dict[\"select\"][:]\n\n if select == \"COUNT(*)\":\n pass\n elif select == \"*\":\n select = select.replace(\n \"*\",\n \"{left_table}.*, {f_formatter}\".format(\n left_table=model_join[\"left_table\"], f_formatter=model_join[\"fields_formatter\"]\n ),\n )\n res_dict[\"select\"] = select\n else:\n res_dict[\"select\"] += \", \" + model_join[\"fields_formatter\"]\n\n # if we are not counting, then we can assign ordering\n operations = [\"COUNT\", \"MAX\", \"MIN\", \"SUM\", \"AVG\", \"STDDEV\"]\n\n # we must get error, if we set only one field and update instance\n if res_dict[\"action\"] == \"_db__update\" and len(res_dict[\"field_values\"]) == 1:\n res_dict.update({\"action\": \"_db__update_single_field\"})\n\n if res_dict.get(\"select\", \"\").split(\"(\")[0] not in operations:\n res_dict[\"ordering\"] = self._ordering_syntax(res_dict.get(\"ordering\", []))\n else:\n res_dict[\"ordering\"] = \"\"\n query = getattr(self, res_dict[\"action\"]).format(**res_dict)\n query = self._query_clean(query)\n\n logger.debug(\"QUERY: {}, VALUES: {}\".format(query, res_dict.get(\"field_values\")))\n return query, res_dict.get(\"field_values\")", "title": "" }, { "docid": "0c13ae92a74d4f3943d1cd2abdd2d58f", "score": "0.572707", "text": "def _build_query(env, start, end, certname=None):\n query = ExtractOperator()\n query.add_field(FunctionOperator('count'))\n query.add_field('status')\n subquery = AndOperator()\n subquery.add(GreaterEqualOperator('producer_timestamp', start))\n subquery.add(LessOperator('producer_timestamp', end))\n if certname is not None:\n subquery.add(EqualsOperator('certname', certname))\n if env != '*':\n subquery.add(EqualsOperator('environment', env))\n query.add_query(subquery)\n query.add_group_by(\"status\")\n return query", "title": "" }, { "docid": "6a1dd0e6509804a1d36f57e0e6c5750a", "score": "0.5720286", "text": "def query() -> str:\n return \"\"\"\n MATCH (doc:Publication)<-[:located_in]-(subject)-[association:chemical_to_disease_or_phenotypic_feature_association]->(obj)-[:located_in]->(sec_doc:Publication)\n WHERE association.relation <> 'null' and obj.name <> 'null' and subject.name <> 'null' and doc.pmid in {pmids} and association.relation <> 'null' and type(association) <> 'could_refer_to' and type(association) <> 'located_in' and sec_doc.pmid = doc.pmid\n RETURN DISTINCT subject.name as subject, subject.synonym as subj_syn, association.relation as predicate, obj.name as obj, obj.synonym as disease_synonym, type(association) as rel_type, doc.pmid as pmid, doc.abstract as abstract\n \"\"\"", "title": "" }, { "docid": "6002db82a2bae451e4ed335385a5f2a2", "score": "0.57197946", "text": "def build_where_clause(args: dict, table_args_dict: dict) -> str:\n where_clause: str = ''\n for key in args.keys():\n if key in table_args_dict.keys():\n if key == 'query':\n # if query arg is supplied than we just need to parse it and only it\n return args[key].strip()\n else:\n values_list: list = argToList(args[key])\n for raw_value in values_list:\n for field in table_args_dict[key]:\n value = raw_value\n if key == 'url':\n value = f'*{raw_value}*'\n if not where_clause:\n # the beginning of the where part should start without OR\n where_clause += f\"{field}'{value}'\"\n else:\n where_clause += f\" OR {field}'{value}'\"\n return where_clause", "title": "" }, { "docid": "fd48cb6a7ac2b019d07866613d70b519", "score": "0.5693227", "text": "def generate_where_clause(state=None, product=None,\n issue=None, min_complaints=None):\n where_inner = []\n where_outer = []\n\n if state and not state == \"All\":\n where_inner.append(\"ccdb.state = '{}'\".format(state))\n\n if product and not product == \"All\":\n where_inner.append(\"ccdb.product = '{}'\".format(product))\n\n if issue and not issue == \"All\":\n where_inner.append(\"ccdb.issue = '{}'\".format(issue))\n\n if min_complaints:\n where_outer.append(\"complaint_count > {}\".format(min_complaints))\n\n if len(where_inner) > 0:\n where_inner = \"WHERE \" + \" AND \".join(where_inner)\n else:\n where_inner = \"\"\n\n if len(where_outer) > 0:\n where_outer = \"WHERE \" + \" AND \".join(where_outer)\n else:\n where_outer = \"\"\n\n return where_inner, where_outer", "title": "" }, { "docid": "a7e6e63e44f78e69cf88a741ac17d2fb", "score": "0.5682573", "text": "def build_query(self, params):\n term = params[\"term\"]\n size = params[\"size\"]\n page = params[\"page\"]\n\n query = {}\n query[\"query\"] = {\"match\":{}}\n query[\"query\"][\"match\"][\"inflected_form\"] = term\n\n query[\"sort\"] = []\n query[\"sort\"].append({\"_score\": \"desc\"})\n query[\"sort\"].append({\"word_count\": \"asc\"})\n\n query[\"from\"] = (page - 1) * size\n query[\"size\"] = size\n\n query = json.dumps(query)\n return query", "title": "" }, { "docid": "930b18d9b4e9205bd3e078001ad05e29", "score": "0.5674045", "text": "def generate(self):\n result = self.query\n for param in self.params:\n result = result.replace(param.raw, param.sql_value())\n\n return result", "title": "" }, { "docid": "2aa19aae92a018fc0f26673cb0f78bfb", "score": "0.56575936", "text": "def filter_query(self, parents=None):\n q = db.session.query()\n stmt, q = self._add_statement(q)\n q = q.add_column(stmt.subject)\n\n if parents is not None and self.node.attribute:\n parent_stmt, q = self._add_statement(q)\n q = q.filter(stmt.subject == parent_stmt._value)\n q = q.filter(parent_stmt._attribute == self.node.attribute.name)\n q = q.filter(parent_stmt.subject.in_(parents))\n\n q = self.filter(q, stmt)\n q = q.group_by(stmt.subject)\n q = q.order_by(stmt.subject.asc())\n\n if self.node.root:\n q = q.limit(self.node.limit)\n q = q.offset(self.node.offset)\n\n return q", "title": "" }, { "docid": "ddd52219e9c4b5adb4afd6a139da0d56", "score": "0.56358266", "text": "def derived(self, statement, params=(), replace=False):\n return Query(\n db=self.db,\n sql=(statement,) if replace else self.sql + (statement,),\n params=self.params + params,\n )", "title": "" }, { "docid": "2cd8fe36d01097c20060583de2e8bc83", "score": "0.562555", "text": "def make_query(self):", "title": "" }, { "docid": "9fe5061d0a503021cd5b78f56d96ffed", "score": "0.5612804", "text": "def build_query(media_type=None, tags=None, sort=None, max_duration=None, published_after=None, categories=None,\n region=None, random=False, limit=constants.DEFAULT_QUERY_LIMIT,\n offset=constants.DEFAULT_QUERY_OFFSET):\n # NOTE: query time skyrockets with added OPTIONAL patterns, see:\n # https://stackoverflow.com/questions/25609691/alternative-for-optional-keyword-in-sparql-queries\n #\n # NOTE: query assumes ordering of group concat operations are same across tag and source - have eyeballed and\n # looks good\n if published_after is not None:\n raise NotImplementedError('The parameter `publishedAfter` is not yet implemented')\n if region is not None:\n raise NotImplementedError('The parameter `region` is not yet implemented')\n if sort is not None and random:\n raise InvalidInputParameterCombination('Cannot specify both `sort` and `random`.')\n\n regular_fields = '?title ?image ?version ?programme ?pid ?media ?duration ?publicationDate ?masterBrand'\n query_string = f\"\"\"\n SELECT\n {regular_fields}\n {build_tags_select_statement()}\n {build_genres_select_statement()}\n\n WHERE {{\n {build_regular_fields_pattern_statement()}\n {build_tags_pattern_statement()}\n {build_genres_pattern_statement()}\n\n {build_sparql_anding_statement(\n object_list=tags,\n predicate_str='datalab:tag/datalab:tagValue',\n object_binding='programme'\n )}\n {build_values_oring_statement('media', media_type)}\n {build_filter_statement('duration', '<', max_duration)}\n {build_filter_statement('published_date', '>', published_after)}\n {build_sparql_anding_statement(\n object_list=categories,\n predicate_str='po:genre/datalab:genreKey',\n object_binding='programme'\n )}\n }}\n GROUP BY {regular_fields}\n {'ORDER BY RAND()' if random else 'ORDER BY ' + build_sort_statement(sort) if sort is not None else ''}\n LIMIT {limit}\n OFFSET {offset}\n \"\"\"\n\n return query_string", "title": "" }, { "docid": "864d460b0beb79116a83d3be73eff240", "score": "0.5609568", "text": "def compiledQuery(self, columns='', where='', order_by='',\n distinct='', limit='', offset='',\n group_by='', having='', for_update=False,\n relationDict=None,\n bagFields=False,\n count=False, excludeLogicalDeleted=True,excludeDraft=True,\n ignorePartition=False,ignoreTableOrderBy=False,\n addPkeyColumn=True):\n # get the SqlCompiledQuery: an object that mantains all the informations to build the sql text\n self.cpl = SqlCompiledQuery(self.tblobj.sqlfullname,relationDict=relationDict,maintable_as=self.aliasCode(0))\n distinct = distinct or '' # distinct is a text to be inserted in the sql query string\n \n # aggregate: test if the result will aggregate db rows\n aggregate = bool(distinct or group_by)\n \n # group_by == '*': if all columns are aggregate functions, there will be no GROUP BY columns, \n # but SqlQueryCompiler need to know that result will aggregate db rows\n if group_by == '*':\n group_by = None\n\n if not ignoreTableOrderBy and not aggregate:\n order_by = order_by or self.tblobj.attributes.get('order_by')\n self.init()\n if not 'pkey' in self.cpl.relationDict:\n self.cpl.relationDict['pkey'] = self.tblobj.pkey\n \n # normalize the columns string\n columns = columns or ''\n columns = columns.replace(' ', ' ')\n columns = columns.replace('\\n', '')\n columns = columns.replace(' as ', ' AS ')\n columns = columns.replace(' ,', ',')\n if columns and not columns.endswith(','):\n columns = columns + ','\n \n # expand * and *filters: see self.expandMultipleColumns\n if '*' in columns:\n col_list = [col for col in gnrstring.split(columns, ',') if col]\n new_col_list = []\n for col in col_list:\n col = col.strip()\n if col.startswith('*'):\n new_col_list = new_col_list + self.expandMultipleColumns(col[1:], bagFields)\n else:\n new_col_list.append(col)\n columns = ','.join(new_col_list)\n \n # translate @relname.fldname in $_relname_fldname and add them to the relationDict\n if where:\n where = PERIODFINDER.sub(self.expandPeriod, where)\n \n currentEnv = self.db.currentEnv\n env_conditions = dictExtract(currentEnv,'env_%s_condition_' %self.tblobj.fullname.replace('.','_'))\n if env_conditions:\n wherelist = [where] if where else []\n for condition in env_conditions.values():\n wherelist.append('( %s )' %condition)\n where = ' AND '.join(wherelist)\n\n partition_kwargs = dictExtract(self.tblobj.attributes,'partition_')\n\n if not ignorePartition and partition_kwargs:\n wherelist = [where] if where else []\n for k,v in partition_kwargs.items():\n if currentEnv.get('current_%s' %v):\n wherelist.append('( $%s=:env_current_%s )' % (k,v))\n where = ' AND '.join(wherelist)\n columns = self.updateFieldDict(columns)\n where = self.updateFieldDict(where or '')\n order_by = self.updateFieldDict(order_by or '')\n group_by = self.updateFieldDict(group_by or '')\n having = self.updateFieldDict(having or '')\n \n col_list = uniquify([col for col in gnrstring.split(columns, ',') if col])\n new_col_list = []\n for col in col_list:\n col = col.strip()\n if col.startswith('SUM'):\n aggregate = True\n if not ' AS ' in col:\n if col.startswith('$') and col[1:].replace('_', '').isalnum():\n as_ = col[1:]\n else:\n # replace non word char with _ and check for numbers\n as_ = self.db.colToAs(col)\n col = '%s AS \"%s\"' % (col, as_)\n else:\n colbody, as_ = col.split(' AS ', 1)\n # leave the col as is, but save the AS name to recover the db column original name from selection result\n self.cpl.aliasDict[as_.strip()] = colbody.strip()\n new_col_list.append(col)\n \n # build the clean and complete sql string for the columns, but still all fields are expressed as $fieldname\n columns = ',\\n'.join(new_col_list)\n \n # translate all fields and related fields from $fldname to t0.fldname, t1.fldname... and prepare the JOINs\n colPars = {}\n for key, value in self.cpl.relationDict.items():\n # self._currColKey manage exploding columns in recursive getFieldAlias without add too much parameters\n self._currColKey = key\n colPars[key] = self.getFieldAlias(value)\n if count: # if the query is executed in count mode...\n order_by = '' # sort has no meaning\n if group_by: # the number of rows is defined only from GROUP BY cols, so clean aggregate functions from columns.\n columns = group_by # was 't0.%s' % self.tblobj.pkey # ???? \n elif distinct:\n pass # leave columns as is to calculate distinct values\n else:\n columns = 'count(*) AS gnr_row_count' # use the sql count function istead of load all data\n elif addPkeyColumn and self.tblobj.pkey and not aggregate:\n columns = columns + ',\\n' + '%s.%s AS pkey' % (self.aliasCode(0),self.tblobj.pkey) # when possible add pkey to all selections\n columns = columns.lstrip(',') # if columns was '', now it starts with ','\n else:\n columns = columns.strip('\\n').strip(',')\n \n # replace $fldname with tn.fldname: finally the real SQL columns!\n columns = gnrstring.templateReplace(columns, colPars, safeMode=True)\n \n # replace $fldname with tn.fldname: finally the real SQL where!\n \n where = gnrstring.templateReplace(where, colPars)\n #if excludeLogicalDeleted==True we have additional conditions in the where clause\n logicalDeletionField = self.tblobj.logicalDeletionField\n draftField = self.tblobj.draftField\n if logicalDeletionField:\n if excludeLogicalDeleted is True:\n extracnd = '%s.%s IS NULL' % (self.aliasCode(0),logicalDeletionField)\n if where:\n where = '%s AND %s' % (extracnd, where)\n else:\n where = extracnd\n elif excludeLogicalDeleted == 'mark':\n if not (aggregate or count):\n columns = '%s, %s.%s AS _isdeleted' % (columns, self.aliasCode(0),logicalDeletionField) #add logicalDeletionField\n if draftField:\n if excludeDraft is True:\n extracnd = '%s.%s IS NOT TRUE' %(self.aliasCode(0),draftField)\n if where:\n where = '%s AND %s' % (extracnd, where)\n else:\n where = extracnd\n # add a special joinCondition for the main selection, not for JOINs\n if self.joinConditions:\n extracnd, one_one = self.getJoinCondition('*', '*', self.aliasCode(0))\n if extracnd:\n if where:\n where = '(%s) AND (%s)' % (where, extracnd)\n else:\n where = extracnd\n order_by = gnrstring.templateReplace(order_by, colPars)\n having = gnrstring.templateReplace(having, colPars)\n group_by = gnrstring.templateReplace(group_by, colPars)\n \n if distinct:\n distinct = 'DISTINCT '\n elif distinct is None or distinct == '':\n if self._explodingRows:\n if not aggregate: # if there is not yet a group_by\n distinct = 'DISTINCT ' # add a DISTINCT to remove unusefull rows: eg. a JOIN used only for a where, not for columns\n if order_by:\n xorderby=(('%s '%order_by.lower()).replace(' ascending ','').replace(' descending ','').replace(' asc ','').replace(' desc','')).split(',')\n lowercol=columns.lower()\n for xrd in xorderby:\n if not xrd.strip() in lowercol:\n columns = '%s, \\n%s' % (columns, xrd)\n #order_by=None\n if count:\n columns = '%s.%s' % (self.aliasCode(0),self.tblobj.pkey)\n # Count the DISTINCT maintable pkeys, instead of count(*) which will give the number of JOIN rows.\n # That gives the count of rows on the main table: the result is different from the actual number\n # of rows returned by the query, but it is correct in terms of main table records.\n # It is the right behaviour ???? Yes in some cases: see SqlSelection._aggregateRows\n \n self.cpl.distinct = distinct\n self.cpl.columns = columns\n self.cpl.where = where\n self.cpl.group_by = group_by\n self.cpl.having = having\n self.cpl.order_by = order_by\n self.cpl.limit = limit\n self.cpl.offset = offset\n self.cpl.for_update = for_update\n #raise str(self.cpl.get_sqltext(self.db)) # uncomment it for hard debug\n return self.cpl", "title": "" }, { "docid": "3f2a3634474b6bca3382e5a869663d66", "score": "0.5586637", "text": "def rebuild_query(self, comments=True, pretty=True):\n if not self.tree:\n self._logger.error('El arbol no ha sido generado todavia. Por tanto, todavia no hay ninguna query que '\n 'reconstruir. Por favor ejecuta la funcion parse_query() para generar el arbol')\n raise LookupError\n\n if not self.__queries:\n self._logger.error('El arbol no ha sido procesado todavia. Por favor ejecuta la funcion rename_tree() para '\n 'procesar el arbol.')\n raise LookupError\n\n query = ' '.join(self.tree.leaves())\n query = (self._untokenize(query)\n .replace(' , ', ', ')\n .replace(' ; ', ';')\n .replace(' ( ', ' (')\n .replace(' ) ', ') ')\n #.replace(' = ', '=')\n .replace(' . ', '.')\n )\n\n if self.udfs:\n for point, scape in zip(self.udfs, self._udfs_norm):\n pattern = re.compile(scape, re.IGNORECASE)\n query = pattern.sub(point, query)\n\n if pretty:\n if comments:\n final = '\\n'.join(self.__comments) + '\\n' + sqlparse.format(query, reindent=True, keyword_case='upper')\n self.__comments = []\n return final\n else:\n return sqlparse.format(query, reindent=True, keyword_case='upper')\n else:\n return query", "title": "" }, { "docid": "4df307799efca220bbeb0a0c969810c8", "score": "0.55771285", "text": "def construct_query(*args, dialect='MSSS', allow_coalesce=True):\n n = len(args)\n assert all([isinstance(o, UserOption) for o in args]), \"Not all args are UserOptions\"\n invalids = [not o.validate() for o in args]\n assert not any(invalids), \"{:d}/{:d} options have not been validated\".format(\n sum(invalids), n)\n assert all([args[0].context == args[i+1].context for i in range(n-1)]), \"Different\" +\\\n \"contexts associated with the User Opts. Ensure these are the same.\"\n stmt = Statement(args[0].context)\n\n # ==== GET ALL TABLES AND FORM BASIC JOIN SUBTREE ============\n context = args[0].context\n nodes = [o.get_table() for o in args]\n unique_nodes = list(set(nodes))\n join_tree = minimum_subtree(unique_nodes)\n\n # The primary table is considered the \"root node\" of the tree -- edges are undirected.\n primary = [o for o in args if not o.is_secondary]\n assert len(primary) == 1, f'Expecting one primary field. Got {len(primary)}.'\n primary = primary[0]\n primary_tbl = primary.get_table()\n\n # topological sort\n edges = [(k, v[0][0]) for (i,(k,v)) in enumerate(join_tree.items()) if i > 0]\n G = graph.Graph(edges, directed=False)\n sorted_nodes = G.topological_sort(leave_until_last=primary_tbl)\n # (Recreate G since the topological_sort mutates G (ikr - should fix this!))\n G = graph.Graph(edges, directed=False)\n\n # ======= CREATE CTEs WHENEVER WE FIND A NESTED AGGREGATION ============\n field_tbl_lkp = defaultdict(list)\n for arg in args:\n field_tbl_lkp[arg.get_table()].append(arg)\n all_fields = dict()\n\n # Traverse the join tree of the selected tables, making CTEs wherever\n # there exists secondary aggregation (i.e. not of the primary variable).\n for i, v in enumerate(sorted_nodes[:-1]):\n v_fields = field_tbl_lkp[v]\n any_v_has_agg = any([arg.has_aggregation for arg in v_fields])\n\n # Get 'child tables' and 'parent tables' (wrt topological sort)\n child_tbls = G._graph[v] - set(sorted_nodes[i:])\n parent_tbl = join_tree[v][0][0]\n parent_fks, v_pks = join_tree[v][0][1], join_tree[v][1][1]\n\n # The current table contains an aggregation, so create a CTE.\n if any_v_has_agg:\n # Get all arguments for table which are not aggregated\n f_non_agg = [arg for arg in v_fields if not arg.has_aggregation]\n f_non_agg += flatten([all_fields[w] for w in child_tbls])\n\n # Add in table PKs if not already included in `f_non_agg`.\n f_non_agg_names = [f.sql_fieldname for f in f_non_agg]\n pk_names_to_add = [f for f in v_pks if f not in f_non_agg_names]\n pks_to_add = [construct_simple_field(f, v, context) for f in pk_names_to_add]\n\n # Make a copy of all non-aggregated AND aggregated fields.\n f_non_agg_cte = [f.copy() for f in f_non_agg]\n f_agg_cte = [arg for arg in v_fields if arg.has_aggregation]\n f_agg = [f.copy() for f in f_agg_cte]\n\n # consolidate CTE fields and add primary designator to (any) field in root\n cte_fields = pks_to_add + f_non_agg_cte + f_agg_cte\n for field in cte_fields:\n if field.sql_fieldname in v_pks:\n field.is_secondary = False\n break\n\n # Defer lookups until the final query\n for field in cte_fields:\n field.perform_lkp = False\n\n # Construct CTE\n cte = CTENode([parent_tbl], # parent\n v_pks, # pk\n cte_fields) # cte_fields\n stmt.ctes.append(cte)\n\n # Point the [references to the aggregations] outside the CTE to the CTE field\n for f in f_agg:\n f.field_alias = f.field_alias # this is *not* a noop! (@property...)\n f.sql_item = '{alias}' + f.field_alias\n f.table = cte\n f.set_aggregation(None, force=True)\n f.set_transform(None, force=True)\n if allow_coalesce:\n f.coalesce = 0 # assumes that aggregation is numeric\n\n for f in f_non_agg:\n f.table = cte\n f.sql_item = '{alias}'+f._field_alias_logic(will_perform_lkp=False)\n\n # Push these pointers to within the CTE up to the parent (although not PKs)\n all_fields[v] = f_non_agg + f_agg\n\n # # overwrite the node `v` in the subtree with `cte`\n # join_cond = tree_final[v]\n # join_cond = (join_cond[0], (cte, join_cond[1][1]))\n # tree_final = replace_in_ordered_dict(tree_final, v, cte, join_cond)\n else:\n all_fields[v] = v_fields + flatten([all_fields[w] for w in child_tbls])\n\n # Set args to the args propagated up to the root node\n primary_children = G._graph[primary_tbl]\n args = field_tbl_lkp[primary_tbl] + flatten([all_fields[c] for c in primary_children])\n # Place the resulting tree into the FROM clause of the Statement object.\n\n nodes = [o.get_table() for o in args]\n unique_nodes = list(set(nodes))\n tree_final = minimum_subtree(unique_nodes)\n\n stmt._from.update(tree_final)\n\n # Add dimension tables (if requested to joins)\n lkp_joins = []\n for o in args:\n if o.perform_lkp:\n dtbl = o.dimension_table\n assert not dtbl.is_cte, \"Currently unable to support CTEs for dimension \" + \\\n \"tables. (But it only requires thinking about how \" + \\\n \"to avoid multiple copies.)\"\n fk = o.sql_fieldname\n lkp_joins.append(((o.table, fk), (dtbl, dtbl.pk[0])))\n\n # Generate statement in order to populate aliases dict\n stmt._from.generate_basic_statement(force_alias=(len(lkp_joins) > 0))\n stmt._from.add_lookups_to_statement(lkp_joins)\n\n # === CONSTRUCT SELECT / WHERE ===========\n has_agg = any([arg.has_aggregation for arg in args])\n\n for o in args:\n # Get table alias for field, depending on whether has lookup table or not\n if not o.perform_lkp:\n alias = stmt.aliases[o.get_table()]\n # coalesce is usually None anyway\n coalesce = o.coalesce if allow_coalesce else None\n else:\n dtbl = o.dimension_table\n fk = rm_alias_placeholder(o.sql_item)\n alias = stmt.lkp_aliases[((o.table, fk), (dtbl, dtbl.pk[0]))]\n coalesce = context.coalesce_default if allow_coalesce else None\n\n sel, where = o.sql_transform(alias=alias, dialect=dialect, coalesce=coalesce)\n # SELECT\n stmt.select.append(sel)\n # WHERE\n if len(where) > 0:\n stmt.where.extend(where)\n # GROUP BY\n if has_agg and not o.has_aggregation:\n gby = re.sub('AS [a-zA-Z0-9_]+$', '', sel).strip()\n stmt.groupby.append(gby)\n\n return stmt.generate_statement(dialect=dialect)", "title": "" }, { "docid": "97b15f5e2a70c1eb7088c3897a2bfef2", "score": "0.5570735", "text": "def _build_query(self, metric, node, ts_from, ts_to):\n resolution = ts_to - ts_from\n if resolution > PROMETHEUS_TS_LIMIT:\n ts_from = ts_to - PROMETHEUS_TS_LIMIT\n\n query_head = \"http://{}:{}/api/v1/query_range?query=\".format(\n self.tsdb_ip, self.tsdb_port)\n query_times = \"&start={}&end={}&step=1s\".format(ts_from, ts_to)\n\n query_selectors = self._get_query_selectors(metric, node)\n query_selector = '{}{{{}}}'.format(metric, query_selectors)\n\n query = \"{}{}{}\".format(query_head, query_selector, query_times)\n return query", "title": "" }, { "docid": "923405ac27fc863e4ecf89c859843768", "score": "0.55545264", "text": "def get_clean_query(self, additional_params={}):\n # NOTE: right now this makes us pretty vulnerable\n # to SQL Injection. We should make this safer at some point.\n # Graphs likely have the same problem.\n reg = re.compile('(\\{\\{.*?\\}\\})')\n query = self.query\n matches = reg.findall(query)\n if matches:\n for match in matches:\n attr = match[2:len(match)-2]\n if attr in additional_params:\n query = query.replace(match, additional_params[attr])\n else:\n query = query.replace(match, \"\")\n return query", "title": "" }, { "docid": "374044813c297baa727f456ec5a2a772", "score": "0.5546569", "text": "def buildQueryFromFilters(date, rainfall, temperature, month, year):\n\t# Main part of the query\n\tquery = sql.SQL(\"SELECT * FROM climate\")\n\n\tconditions = []\n\tvalues = []\n\n\t# Check if each filter is actually applied, \n\t# if so, its respective part of the query is appended to the main part\n\tif date != None:\n\t\tclause = sql.SQL(\"date = to_date(%s, 'DD-MM-YYYY')\")\n\t\tconditions.append(clause)\n\t\tvalues.append(date)\n\tif temperature != None:\n\t\tclause = sql.SQL(\"temperature = (%s)\")\n\t\tconditions.append(clause)\n\t\tvalues.append(temperature)\n\tif rainfall != None:\n\t\tclause = sql.SQL(\"rainfall = real '%s'\")\n\t\tconditions.append(clause)\n\t\tvalues.append(rainfall)\n\tif month != None:\n\t\tclause = sql.SQL(\"EXTRACT(MONTH FROM date) = (%s)\")\n\t\tconditions.append(clause)\n\t\tvalues.append(month)\n\tif year != None:\n\t\tclause = sql.SQL(\"EXTRACT(YEAR FROM date) = (%s)\")\n\t\tconditions.append(clause)\n\t\tvalues.append(year)\n\n\t# Put all together\n\tif len(conditions) > 0:\n\t\tquery = sql.SQL(\" \").join([query, sql.SQL(\"WHERE\")])\n\n\t\tconj = sql.SQL(\" AND \").join(conditions)\n\n\t\tquery = sql.SQL(\" \").join([query, conj])\n\n\t\n\treturn query, values", "title": "" }, { "docid": "e9b14da9066124004791a46561a7d775", "score": "0.55110025", "text": "def query(**kw):", "title": "" }, { "docid": "dfaf1e939ed62d271c7e53d6dfd18c69", "score": "0.54942393", "text": "def create_query(vaccine=None, age_group=\"adult\", without_registration=None, self_payer=False, monday=None,\n tuesday=None, wednesday=None, thursday=None, friday=None, saturday=None, sunday=None):\n\n open_hours = {'monday': monday, 'tuesday': tuesday, 'wednesday': wednesday, 'thursday': thursday, 'friday': friday,\n 'saturday': saturday, 'sunday': sunday}\n\n query = \"\"\"\n SELECT loc.vacc_id, loc.latitude, loc.longitude\n FROM vacc_center_location as loc\n INNER JOIN vacc_center_type as typ USING(vacc_id)\n \"\"\"\n if age_group not in AGE_GROUP_SET: raise Exception('age_group argument can be only: adult, teenage, child')\n if int(self_payer) not in [0, 1]: raise Exception('self_payer argument can be only: True, False')\n query_where = f\"\"\"WHERE typ.{age_group} = 1\n AND typ.self_payers = {int(self_payer)}\n \"\"\"\n if without_registration is not None:\n if int(without_registration) not in [0, 1]: raise Exception('without_registration argument can be only: True, False, None')\n query_where += f\"AND typ.without_registration = {int(without_registration)} \\n\"\n\n if vaccine is not None:\n if vaccine.lower() not in VACCINES_SET:\n raise Exception('vaccine argument can be only: COMIRNATY, SPIKEVAX, JANSSEN, Vaxzevria, None')\n query += \"INNER JOIN vacc_center_vaccines AS vacc USING(vacc_id) \\n\"\n query_where += f\"AND vacc.{vaccine.lower()} = 1 \\n\"\n\n open_hours = {day: hour for (day, hour) in open_hours.items() if hour is not None}\n if len(open_hours) != 0:\n query += \"INNER JOIN vacc_center_hours as hour USING(vacc_id) \\n\"\n list_hours = []\n for (day, hour) in open_hours.items():\n if not ((hour >= 0) and (hour <= 24)): raise Exception('days arguments can be only set to None or number between 0 to 24')\n list_hours.append(f\"(hour.{day}_open <= {hour} AND hour.{day}_closed >= {hour})\")\n\n query_hours = \"AND (\"\n n = len(list_hours)\n for i in range(n):\n query_hours += list_hours[i]\n if (i+1) == n:\n query_hours += \")\"\n else:\n query_hours += \" OR \"\n query_where += query_hours\n return query + query_where", "title": "" }, { "docid": "3f83fb856d12b26ca2684c9d9c9dfb40", "score": "0.5437405", "text": "def build_goods_query(\n good_ids: List[str],\n currency_id: str,\n is_searching_for_sellers: bool,\n is_search_query: bool,\n) -> Query:\n if is_search_query:\n # the OEF does not accept attribute names consisting of integers only\n good_ids = [PREFIX + good_id for good_id in good_ids]\n\n data_model = _build_goods_datamodel(\n good_ids=good_ids, is_supply=is_searching_for_sellers\n )\n constraints = [Constraint(good_id, ConstraintType(\">=\", 1)) for good_id in good_ids]\n constraints.append(Constraint(\"currency_id\", ConstraintType(\"==\", currency_id)))\n constraint_expr = cast(List[ConstraintExpr], constraints)\n\n if len(good_ids) > 1:\n constraint_expr = [Or(constraint_expr)]\n\n query = Query(constraint_expr, model=data_model)\n return query", "title": "" }, { "docid": "3716fc1f19951155d518f81b705b75c9", "score": "0.54263324", "text": "def cypher(self, query, params=None):\r\n path = cypher_path\r\n params = dict(query=query,params=params)\r\n resp = self.request.post(path, params)\r\n\r\n # Cypher data hack\r\n resp.total_size = len(resp.results.data)\r\n resp.results = (Neo4jResult(result[0], self.config) for result in resp.results.data)\r\n return resp", "title": "" }, { "docid": "800590f0ff9faf64788181eb91302965", "score": "0.5422505", "text": "def buildQuery (self, obj_type, ids=None, params=None, attributes=None):\n\n if type(obj_type) is type(''):\n wrapper = KNOWN_WRAPPERS.get(obj_type.lower(), None)\n if wrapper is None:\n raise KeyError(\"obj_type of %s not supported by getOjbects(). E.g. use 'Image' etc\" % obj_type)\n else:\n raise AttributeError(\"getObjects uses a string to define obj_type, E.g. 'Image'\")\n\n q = self.getQueryService()\n if params is None:\n params = omero.sys.Parameters()\n if params.map is None:\n params.map = {}\n\n # get the base query from the instantiated object itself. E.g \"select obj Project as obj\"\n query = wrapper()._getQueryString()\n\n clauses = []\n # getting object by ids\n if ids != None:\n clauses.append(\"obj.id in (:ids)\")\n params.map[\"ids\"] = rlist([rlong(a) for a in ids])\n\n # support filtering by owner (not for some object types)\n if params.theFilter and params.theFilter.ownerId and obj_type.lower() not in [\"experimentergroup\", \"experimenter\"]:\n clauses.append(\"owner.id = (:eid)\")\n params.map[\"eid\"] = params.theFilter.ownerId\n\n # finding by attributes\n if attributes != None:\n for k,v in attributes.items():\n clauses.append('obj.%s=:%s' % (k, k) )\n params.map[k] = omero_type(v)\n\n if clauses:\n query += \" where \" + (\" and \".join(clauses))\n\n return (query, params, wrapper)", "title": "" }, { "docid": "3193232c6e9b86e2cb3be30784d0c07c", "score": "0.5413109", "text": "def make_query(store, query, prefix=\"\"):\r\n query = common.parse_query(query)\r\n q = Query()\r\n q.prefix = prefix\r\n q.offset = common.safeint(query.pop('offset', None), 0)\r\n q.limit = common.safeint(query.pop('limit', 20), 20)\r\n if q.limit > 1000:\r\n q.limit = 1000\r\n sort = query.pop('sort', None)\r\n \r\n nested = (prefix != \"\")\r\n \r\n for k, v in query.items():\r\n # key foo can also be written as label:foo\r\n k = k.split(':')[-1]\r\n if v is None:\r\n q.requested[k] = v\r\n elif isinstance(v, dict):\r\n # make sure op is ==\r\n v = dict((k + '.' + key, value) for key, value in v.items())\r\n q2 = make_query(store, v, prefix=prefix + k + \".\")\r\n #@@ Anand: Quick-fix\r\n # dbstore.things looks for key to find whether type is required or not. \r\n q2.key = k \r\n if q2.conditions:\r\n q.conditions.append(q2)\r\n else:\r\n q.requested[k] = q2\r\n else:\r\n k, op = parse_key(k)\r\n q.add_condition(k, op, None, v)\r\n \r\n if not nested:\r\n q.assert_type_required()\r\n \r\n type = get_thing(store, q.get_type())\r\n #assert type is not None, 'Not found: ' + q.get_type()\r\n for c in q.conditions:\r\n if not isinstance(c, Query):\r\n c.datatype = find_datatype(type, c.key, c.value)\r\n \r\n if sort:\r\n parse_key(sort) # to validate key\r\n q.sort = web.storage(key=sort, datatype=find_datatype(type, sort, None))\r\n else:\r\n q.sort = None\r\n \r\n return q", "title": "" }, { "docid": "bac716271d29523c3ec034478d633e21", "score": "0.54028887", "text": "def create_query(neo4j_entities, entity_value):\n\n def get_value(index):\n return SINONIMOS_NEO4J[entity_value[index]] if SINONIMOS_NEO4J.get(entity_value[index])\\\n else entity_value[index]\n\n get_query_part = {\n \"espacio\": lambda x: \"(n:Room {{ type: '{}' }})\".format(get_value(x)),\n \"sensor\": lambda x: (\"(d:Device)\", f\" '{get_value(x)}' in d.type\"),\n \"orientacion\": lambda x: \" n.orientacion CONTAINS '{}'\".format(get_value(x)),\n \"edificio\": lambda x: (\"(b:Building)\", \" b.name CONTAINS '{}'\".format(get_value(x))),\n \"nombre\": lambda x: (\"(b:Building)\", \" b.name CONTAINS '{}'\".format(get_value(x))),\n \"lugares\": lambda x: \" n.name CONTAINS '{}'\".format(get_value(x))\n }\n\n\n dic = {}\n for entity in neo4j_entities:\n key = entity[1]\n if entity[1] == \"nombre\":\n key = \"edificio\"\n dic[key] = get_query_part[entity[1]](entity[0])\n\n espacio = dic[\"espacio\"] if dic.get(\"espacio\") else \"\"\n sensor = dic[\"sensor\"] if dic.get(\"sensor\") else (\"\", \"\")\n orientacion = dic[\"orientacion\"] if dic.get(\"orientacion\") else \"\"\n edificio = dic[\"edificio\"] if dic.get(\"edificio\") else (\"\", \"\")\n lugares = dic[\"lugares\"] if dic.get(\"lugares\") else \"\"\n ret = []\n\n # crear relaciones que son opcionales en la consulta\n if espacio is not \"\" and sensor != (\"\", \"\"):\n sensor = (sensor[0]+\"<-[:HAS]-\", sensor[1])\n if espacio is not \"\" and edificio != (\"\", \"\"):\n espacio += \"<-[:HAS]-(f:Floor)<-[:HAS]-\"\n\n #Añadir información a la consulta que no se infica pero que es necesaria para poder recorrer las relaciones en Neo4j.\n \n # Si se pregunta por sensor y lugar pero no por espacio.\n if espacio is \"\" and sensor != (\"\", \"\") and lugares is not \"\":\n espaciocio = \"(n:Room)\"\n sensor = (sensor[0]+\"<-[:HAS]-\", sensor[1])\n # Si se pregunta por nombre u orientación pero no por espacio.\n if espacio is \"\" and (lugares is not \"\" or orientacion is not \"\"):\n espacio = \"(n:Room)\"\n # Si se pregunta por sensores en un edificio sin especificar un espacio.\n if espacio is \"\" and edificio != (\"\", \"\") and sensor != (\"\", \"\"):\n espacio = \"(n:Room)<-[:HAS]-(f:Floor)<-[:HAS]-\"\n sensor = (sensor[0]+\"<-[:HAS]-\", sensor[1])\n # Si hay 4\n # orientacion lugares edificio sensor\n # X X X X\n if orientacion is not \"\" and lugares is not \"\" and edificio != (\"\", \"\") and sensor != (\"\", \"\"):\n lugares = \"AND \" + lugares\n orientacion = \"WHERE \" + orientacion\n edificio = (edificio[0], \"AND \" + edificio[1])\n sensor = (sensor[0], \"AND \" + sensor[1])\n\n # Si hay 3\n # orientacion lugares edificio sensor\n # X X X -\n # X - X X\n # - X X X\n # X - X X\n if orientacion is not \"\" and lugares is not \"\" and edificio != (\"\", \"\") and sensor == (\"\", \"\"):\n lugares = \"AND \" + lugares\n orientacion = \"WHERE \" + orientacion\n edificio = (edificio[0], \"AND \" + edificio[1])\n if orientacion is not \"\" and lugares is not \"\" and edificio == (\"\", \"\") and sensor != (\"\", \"\"):\n lugares = \"AND \" + lugares\n orientacion = \"WHERE \" + orientacion\n sensor = (sensor[0], \"AND \" + sensor[1])\n if orientacion is \"\" and lugares is not \"\" and edificio != (\"\", \"\") and sensor != (\"\", \"\"):\n lugares = \"WHERE \" + lugares\n sensor = (sensor[0], \"AND \" + sensor[1])\n edificio = (edificio[0], \"AND \" + edificio[1])\n if orientacion is not \"\" and lugares is \"\" and edificio != (\"\", \"\") and sensor != (\"\", \"\"):\n orientacion = \"WHERE \" + orientacion\n sensor = (sensor[0], \"AND \" + sensor[1])\n edificio = (edificio[0], \"AND \" + edificio[1])\n\n # Si hay 2\n # orientacion lugares edificio sensor\n # X X - -\n # X - X -\n # X - - X\n # - X X -\n # - X - X\n # - - X X\n\n if orientacion is not \"\" and lugares is not \"\" and edificio == (\"\", \"\") and sensor == (\"\", \"\"):\n lugares = \"AND \" + lugares\n orientacion = \"WHERE \" + orientacion\n if orientacion is not \"\" and lugares is \"\" and edificio != (\"\", \"\") and sensor == (\"\", \"\"):\n edificio = (edificio[0], \"AND \" + edificio[1])\n orientacion = \"WHERE \" + orientacion\n if orientacion is not \"\" and lugares is \"\" and edificio == (\"\", \"\") and sensor != (\"\", \"\"):\n sensor = (sensor[0], \"AND \" + sensor[1])\n orientacion = \"WHERE \" + orientacion\n if orientacion is \"\" and lugares is not \"\" and edificio != (\"\", \"\") and sensor == (\"\", \"\"):\n edificio = (edificio[0], \"AND \" + edificio[1])\n lugares = \"WHERE \" + lugares\n if orientacion is \"\" and lugares is not \"\" and edificio == (\"\", \"\") and sensor != (\"\", \"\"):\n sensor = (sensor[0], \"AND \" + sensor[1])\n lugares = \"WHERE \" + lugares\n if orientacion is \"\" and lugares is \"\" and edificio != (\"\", \"\") and sensor != (\"\", \"\"):\n sensor = (sensor[0], \"AND \" + sensor[1])\n edificio = (edificio[0], \"WHERE \" + edificio[1])\n # Si hay 1\n # orientacion lugares edificio sensor\n # X - - -\n # - X - -\n # - - X -\n # - - - X\n if orientacion is not \"\" and lugares is \"\" and edificio == (\"\", \"\") and sensor == (\"\", \"\"):\n orientacion = \"WHERE \" + orientacion\n if orientacion is \"\" and lugares is not \"\" and edificio == (\"\", \"\") and sensor == (\"\", \"\"):\n lugares = \"WHERE \" + lugares\n if orientacion is \"\" and lugares is \"\" and edificio != (\"\", \"\") and sensor == (\"\", \"\"):\n edificio = (edificio[0], \"WHERE \" + edificio[1])\n if orientacion is \"\" and lugares is \"\" and edificio == (\"\", \"\") and sensor != (\"\", \"\"):\n sensor = (sensor[0], \"WHERE \" + sensor[1])\n\n\n\n # Preparar los atributos de salida\n if espacio is not \"\":\n ret.append(\"n.name, n.id\")\n if sensor != (\"\", \"\"):\n ret.append(\"d.id, d.type\")\n if orientacion is not \"\":\n ret.append(\"n.orientacion\")\n\n return TEMPLATE_NEO.format(sensor[0], espacio, edificio[0], orientacion,\\\n lugares, edificio[1], sensor[1], \", \".join(ret))", "title": "" }, { "docid": "4e23579f229cdea216279b1841dfff82", "score": "0.53795767", "text": "def generate_entrez_query(self):\n ending = ') AND ' + self.gene + '[Gene Name] ' + \\\n 'AND 1000:8000[Sequence Length]'\n types = []\n for hpv_type in itertools.chain(self.harmfull, self.safe):\n types.append('\"Human papillomavirus type %d\"[Organism]' % hpv_type)\n self._query = '(' + ' OR '.join(types) + ending\n return self._query", "title": "" }, { "docid": "68cd306f78e9a696fa45ccc17d112056", "score": "0.5368679", "text": "def sql_query_str(program):\n node_type, node_children = program[0], program[1:]\n assert node_type in sql_entry_tokens and syntax_valid(program, sql_cfg)\n\n if node_type == \"SELECT_AGG\":\n select_exprs_node, joins_node, group_by_exprs_node = node_children\n where_node = None\n elif node_type == \"SELECT_AGG_WHERE\":\n (select_exprs_node,\n joins_node,\n group_by_exprs_node,\n where_node) = node_children\n else:\n raise RuntimeError(\n \"Missing node-type decoding logic for generating SQL query from AST.\"\n )\n\n return query_str(\n select_exprs=list(sql_select_exprs(select_exprs_node)),\n joins=list(sql_joins(joins_node)),\n where_exprs=list(sql_and_exprs_strs(where_node)) if where_node else [],\n group_by_exprs=list(sql_group_by_exprs(group_by_exprs_node)),\n order_by_exprs=[],\n limit_exprs=[],\n )", "title": "" }, { "docid": "f43c43ecaf245aaab63b127c15a173ed", "score": "0.53679913", "text": "def _getQueryString(self):\n query = \"select obj from Plate as obj \" \\\n \"join fetch obj.details.owner as owner join fetch obj.details.group \"\\\n \"join fetch obj.details.creationEvent \"\\\n \"left outer join fetch obj.screenLinks spl \" \\\n \"left outer join fetch spl.parent sc\"\n return query", "title": "" }, { "docid": "51fd823b07a49d65be55acc10cb16971", "score": "0.53457093", "text": "def _create_qualifiedname_query(self, type_name, *values):\n query = {\n 'typeName': type_name,\n 'excludeDeletedEntities': True,\n 'limit': 10000\n }\n entity_filter = {\n 'condition': 'AND'\n }\n criterion = []\n n = 0\n for v in values:\n criteria = {\n 'attributeName': 'qualifiedName',\n 'operator': 'STARTSWITH' if n == 0 else 'CONTAINS',\n 'attributeValue': v\n }\n n += 1\n criterion.append(criteria)\n if type_name == 'hive_table':\n # Ignore temporary tables.\n criterion.append({'operator': '=', 'attributeName': 'temporary', 'attributeValue': False})\n entity_filter['criterion'] = criterion\n query['entityFilters'] = entity_filter\n return query", "title": "" }, { "docid": "44970fb2e787db5c9720c86d5e16d3f3", "score": "0.53439456", "text": "def create_query(\n self, query_text, locale=None, language=None, time_zone=None, timestamp=None\n ):\n if not query_text:\n query_text = \"\"\n if isinstance(query_text, (list, tuple)):\n return self._process_list(\n query_text,\n \"create_query\",\n locale=locale,\n language=language,\n time_zone=time_zone,\n timestamp=timestamp,\n )\n return self.resource_loader.query_factory.create_query(\n query_text,\n language=language,\n locale=locale,\n time_zone=time_zone,\n timestamp=timestamp,\n )", "title": "" }, { "docid": "8b4dabb7c03fa99fdfdd73c857a0b88d", "score": "0.5328662", "text": "def sql_with_where(self):\n e = get_executor(SqlRule)\n where_clause = \"WHERE \"\n where_time_filter = e.compose_where_time_filter(self)\n where_condition = e.compose_where_condition(self)\n if where_time_filter == \"\" and where_condition == \"\":\n where_clause = \"\"\n elif where_time_filter != \"\" and where_condition != \"\":\n where_clause = f\"{where_clause} {where_time_filter} AND {where_condition}\"\n else:\n where_clause = f\"{where_clause} {where_time_filter} {where_condition}\"\n final_sql = f\"{self.sql} {where_clause}\"\n return self.render_sql(final_sql)", "title": "" }, { "docid": "dae062c702cf1a20a574ddd68d4b28aa", "score": "0.532052", "text": "def construct_url(self\n , query_params: Dict\n , ) -> str:\n url_const = self.faers_ep\n initialized = False\n for param_q, param_val in query_params.items():\n # Need to add '&' to 2nd,3rd,... query_params\n if initialized:\n url_const += '&'\n else:\n initialized = True\n if type(param_val) == str and len(query_params) > 0 \\\n or type(param_val) == int:\n url_const += f'{param_q}={param_val}'\n elif type(param_val) == dict and len(param_val) > 0:\n url_const += f'{param_q}=' + \"+AND+\".join(\n [f'{sub_p_k}:{sub_p_v}' for sub_p_k, sub_p_v in param_val.items()])\n elif type(param_val) == list and len(param_val) > 0:\n url_const += f'{param_q}=' + \"+AND+\".join(param_val)\n else:\n raise TypeError\n return url_const", "title": "" }, { "docid": "f60d6e0b78286e4e35b88227402bb392", "score": "0.5318269", "text": "def get_custom_query(self):\n query = {}\n\n # searchable text queries\n q = req.get_query()\n if q:\n query[\"SearchableText\"] = q\n\n # physical path queries\n path = req.get_path()\n if path:\n query[\"path\"] = {'query': path, 'depth': req.get_depth()}\n\n # special handling for recent created/modified\n recent_created = req.get_recent_created()\n if recent_created:\n date = api.calculate_delta_date(recent_created)\n query[\"created\"] = {'query': date, 'range': 'min'}\n\n recent_modified = req.get_recent_modified()\n if recent_modified:\n date = api.calculate_delta_date(recent_modified)\n query[\"modified\"] = {'query': date, 'range': 'min'}\n\n return query", "title": "" }, { "docid": "88503be9fbc27ea6562b4928b4a8d6c0", "score": "0.530554", "text": "def get_where_clause(columns_to_query_lst):\n where_str = \"where \"\n equals_str =[]\n for row in columns_to_query_lst:\n temp_str = \"c.\" + row + \" = t.\" + row\n equals_str.append(temp_str)\n joined_str = \" AND \".join(equals_str)\n where_str = where_str + joined_str\n return where_str", "title": "" }, { "docid": "746bf5ea5425e576e9ff1f9bce37f702", "score": "0.52996916", "text": "def create_query(processed_request):\n query = {\n \"variant\": {\n \"referenceBases\": \"\" if not processed_request.get(\"referenceBases\") else processed_request.get(\"referenceBases\"),\n \"alternateBases\": \"\" if not processed_request.get(\"alternateBases\") else processed_request.get(\"alternateBases\"),\n \"referenceName\": \"\" if not processed_request.get(\"referenceName\") else processed_request.get(\"referenceName\"),\n \"start\": None if not processed_request.get(\"start\") else processed_request.get(\"start\"),\n \"end\": None if not processed_request.get(\"end\") else processed_request.get(\"end\"),\n \"assemblyId\": \"\" if not processed_request.get(\"assemblyId\") else processed_request.get(\"assemblyId\")\n },\n \"datasets\": {\n \"datasetIds\": None if not processed_request.get(\"datasetIds\") else processed_request.get(\"datasetIds\"),\n \"includeDatasetResponses\": \"ALL\" if not processed_request.get(\"includeDatasetResponses\") else processed_request.get(\"includeDatasetResponses\")\n },\n \"filters\": None if not processed_request.get(\"filters\") else processed_request.get(\"filters\"),\n \"customFilters\": None if not processed_request.get(\"customFilters\") else processed_request.get(\"customFilters\"),\n }\n\n return query", "title": "" }, { "docid": "e8e6478863c1ea4a7a2b784bc3c49cd9", "score": "0.529957", "text": "def build_where_stmt(self, ident, filters, q_filters=None, source_class=None):\n if q_filters is not None:\n stmts = self._parse_q_filters(ident, q_filters, source_class)\n if stmts:\n self._ast.where.append(stmts)\n else:\n stmts = []\n for row in filters:\n negate = False\n\n # pre-process NOT cases as they are nested dicts\n if \"__NOT__\" in row and len(row) == 1:\n negate = True\n row = row[\"__NOT__\"]\n\n for prop, operator_and_val in row.items():\n operator, val = operator_and_val\n if operator in _UNARY_OPERATORS:\n # unary operators do not have a parameter\n statement = (\n f\"{'NOT' if negate else ''} {ident}.{prop} {operator}\"\n )\n else:\n place_holder = self._register_place_holder(ident + \"_\" + prop)\n statement = f\"{'NOT' if negate else ''} {ident}.{prop} {operator} ${place_holder}\"\n self._query_params[place_holder] = val\n stmts.append(statement)\n\n self._ast.where.append(\" AND \".join(stmts))", "title": "" }, { "docid": "3c5f867e7aef319a9323809c18719262", "score": "0.5287673", "text": "def __init__ (self, database, var, conditions = (), echo = False):\n\n self.session = buildSession(\n database=database,\n echo=echo)\n if var == \"ncommits\":\n self.query = self.session.query().select_nscmlog([\"commits\",])\n elif var == \"listcommits\":\n self.query = self.session.query().select_listcommits()\n elif var == \"nauthors\":\n self.query = self.session.query().select_nscmlog([\"authors\",])\n elif var == \"listauthors\":\n self.query = self.session.query().select_listauthors()\n for condition in conditions:\n self.query = condition.filter(self.query)", "title": "" }, { "docid": "d298399d7e7d292b986fa091d5abed50", "score": "0.5285017", "text": "def full_query_pattern(self, include_conclusion: bool = False) -> str:\n query_pattern = \"\"\n for literal in self.premise:\n query_pattern += literal.sparql_patterns()\n\n if include_conclusion:\n query_pattern += self.conclusion.sparql_patterns()\n\n if \"?b\" not in query_pattern and \"?a\" not in query_pattern:\n query_projection = \"ask \"\n else:\n # insert the selectors for subject and object into the select query if they exist in the query pattern\n query_projection = \"select where \"\n\n # the resulting query would look like \"select ?a ?b ...\" if both cases are true\n if \"?b\" in query_pattern:\n query_projection = query_projection.replace(\"select \", \"select ?b \")\n if \"?a\" in query_pattern:\n query_projection = query_projection.replace(\"select \", \"select ?a \")\n\n # build remaining part of the query and execute it\n query_pattern = \"{\" + query_pattern + \"}\"\n return query_projection + query_pattern", "title": "" }, { "docid": "3f90b7aa24950c089ec7f416c7f71163", "score": "0.5283903", "text": "def _getQueryString(self):\n return \"select obj from %s obj join fetch obj.details.owner as owner join fetch obj.details.group \"\\\n \"join fetch obj.details.creationEvent\" % self.OMERO_CLASS", "title": "" }, { "docid": "edec057b919f8d0df46ebd8515dfc0e2", "score": "0.5279631", "text": "def _getQueryString(self):\n return \"select obj from BooleanAnnotation obj join fetch obj.details.owner as owner join fetch obj.details.group \"\\\n \"join fetch obj.details.creationEvent\"", "title": "" }, { "docid": "5238770c84b4b535af51b45d8d738378", "score": "0.52578133", "text": "def query(self):\n\n if not hasattr(self, \"_query\"):\n\n # Set up some convenience aliases.\n Query = db.Query\n Condition = Query.Condition\n Or = Query.Or\n\n # Create a query object.\n columns = \"d.id\", \"d.title\"\n self._query = Query(\"document d\", *columns).order(\"d.title\")\n\n # Add the conditions: one for each field with a value.\n conditions = []\n have_doctype = False\n for field in self.fields:\n\n # See if we got a value for this field.\n value = field.var.strip() if field.var else None\n if not value:\n continue\n\n # Remember the criterion for display to the user.\n self.criteria.append(value)\n\n # If 'selectors' is a string, it's a column in `document`.\n value_op = getQueryOp(value)\n if isinstance(field.selectors, str):\n column = f\"d.{field.selectors}\"\n conditions.append(Condition(column, value, value_op))\n continue\n\n # Build up a subquery for the field.\n have_doctype = True\n subquery = Query(\"query_term\", \"doc_id\").unique()\n\n # These are ORd together.\n selector_conditions = []\n for path in field.selectors:\n path_op = \"LIKE\" if \"%\" in path else \"=\"\n\n # Simple case: test for a string stored in the doc.\n if not path.endswith(\"/@cdr:ref[int_val]\"):\n path_test = Condition(\"path\", path, path_op)\n value_test = Condition(\"value\", value, value_op)\n selector_conditions.append((path_test, value_test))\n\n # Trickier case: find values in linked documents.\n else:\n path = path.replace(\"[int_val]\", \"\")\n title_query = Query(\"document\", \"id\").unique()\n args = \"title\", value, value_op\n title_query.where(Condition(*args))\n args = \"int_val\", title_query, \"IN\"\n title_test = Condition(*args)\n path_test = Condition(\"path\", path, path_op)\n selector_conditions.append((path_test, title_test))\n\n # Add the conditions for this field's selectors to the mix.\n subquery.where(Or(*selector_conditions))\n conditions.append(Condition(\"d.id\", subquery, \"IN\"))\n\n # Sanity check.\n if not conditions:\n raise Exception(\"No search conditions specified\")\n\n # If all the selectors are from the document table (or,\n # view, to be precise), then we still need to narrow\n # the search to this document type.\n if not have_doctype:\n self._query.join(\"doc_type t\", \"t.id = d.doc_type\")\n self._query.where(Condition(\"t.name\", self.doctype))\n # Plug the top-level conditions into the query.\n if self.match_all:\n for condition in conditions:\n self._query.where(condition)\n else:\n self._query.where(Or(*conditions))\n\n # All the fields with values have been folded into the query.\n return self._query", "title": "" }, { "docid": "bc3e93874b247c4bb5a6d828028febe0", "score": "0.5255788", "text": "def query(self):\n self._check_mandatory_inputs()\n return self._gen_query()", "title": "" }, { "docid": "74e698e0dc148ed3565a45d92b4dec8c", "score": "0.5251471", "text": "def _get_clause(self):\r\n params = [(primary_key, sql.bindparam(None, type_=primary_key.type))\r\n for primary_key in self.primary_key]\r\n return sql.and_(*[k == v for (k, v) in params]), \\\r\n util.column_dict(params)", "title": "" }, { "docid": "bb58117d3fda3c2db7fe7688c4120b96", "score": "0.5243088", "text": "def build_query_text_request(\n **kwargs # type: Any\n):\n # type: (...) -> HttpRequest\n\n content_type = kwargs.pop('content_type', None) # type: Optional[str]\n\n api_version = \"2021-05-01-preview\"\n accept = \"application/json\"\n # Construct URL\n url = kwargs.pop(\"template_url\", '/:query-text')\n\n # Construct parameters\n query_parameters = kwargs.pop(\"params\", {}) # type: Dict[str, Any]\n query_parameters['api-version'] = _SERIALIZER.query(\"api_version\", api_version, 'str')\n\n # Construct headers\n header_parameters = kwargs.pop(\"headers\", {}) # type: Dict[str, Any]\n if content_type is not None:\n header_parameters['Content-Type'] = _SERIALIZER.header(\"content_type\", content_type, 'str')\n header_parameters['Accept'] = _SERIALIZER.header(\"accept\", accept, 'str')\n\n return HttpRequest(\n method=\"POST\",\n url=url,\n params=query_parameters,\n headers=header_parameters,\n **kwargs\n )", "title": "" }, { "docid": "5e0b0884588b76f9e715cfc138ae0212", "score": "0.5239496", "text": "def create_complex_query(self):\n columns = copy(self.columns)\n shuffle(columns)\n query = \"\"\"\\\n SELECT\n {col}\n FROM\n datadog_test.dbo.high_cardinality AS hc1\n JOIN (\n SELECT\n id,\n COUNT(*) col12_float\n FROM\n datadog_test.dbo.high_cardinality AS hc2\n WHERE\n hc2.col1_txt LIKE '%-%'\n AND hc2.col14_int > (\n SELECT\n AVG(hc3.col15_int)\n FROM\n datadog_test.dbo.high_cardinality AS hc3)\n GROUP BY\n hc2.id) AS hc4 ON hc4.id = hc1.id\n JOIN datadog_test.dbo.high_cardinality AS hc5 ON hc5.id = hc1.id\n WHERE\n CAST(hc5.col17_date AS VARCHAR)\n IN('2003-04-23', '2043-09-10', '1996-08-08')\n AND hc5.col13_float IS NOT NULL\n AND hc5.col17_date NOT LIKE '21%';\n \"\"\"\n # Select a range of random columns and prefix to match our alias\n return query.format(col=','.join(['hc1.' + col for col in columns[: randint(1, len(columns) - 1)]]))", "title": "" }, { "docid": "23ac357cf50e246b557978088edbcf6b", "score": "0.5237348", "text": "def __gencondition(self, querylist):\n conditions = []\n for key in self.PROPERTY_SEARCH_LIST:\n if querylist.get(key):\n conditions.append(self.PROPERTY_SEARCH_LIST[key][1] % (querylist.get(key)))\n return \" AND \".join(conditions)", "title": "" }, { "docid": "b90ad42bba7570be5827a9b27873f0c5", "score": "0.52357996", "text": "def build_sql(self, table, row, known_fkeys={}):\n # Initialize the builder properties\n self.row = row\n self.known_fkeys = known_fkeys\n return self.build_table_sql(table)", "title": "" }, { "docid": "a69e3debc69789bbd12bb2ee0e29ccfd", "score": "0.5234567", "text": "def __build_where_clause(where_columns_dictionary):\n return ' AND '.join(key + \" = ? \"\n if value is not None\n else key + \" IS NULL \"\n for key, value\n in where_columns_dictionary.items())", "title": "" }, { "docid": "df322a9786ad715a3d5849f5ed411c2d", "score": "0.5219735", "text": "def concat_conds(*args):\n stmt = \" WHERE 1\"\n for cond in args:\n stmt = stmt + \" AND \" + cond\n return stmt", "title": "" }, { "docid": "b443f879e890682442787a51400a4a67", "score": "0.5218033", "text": "def build_query(cls, request, model):\n # print(request.data)\n name = request.data.get(\"search_phrase\")\n criteria = request.data.get(\"search_criteria\")\n frequency = request.data.get(\"frequency\")\n age_distribution = request.data.get(\"age_distribution\")\n letters_range = request.data.get(\"letters_range\")\n\n resp = model.objects.filter()\n\n if frequency is not None and len(frequency) > 0:\n resp = model.objects.filter(frequency__in=frequency).defer(\"double_name\", \"number_of_letters\")\n\n double_name = True in letters_range\n\n # resp = resp.filter(double_name=True)\n\n if letters_range is not None and len(letters_range) > 0:\n if double_name:\n letters_range.remove(True)\n resp = resp.filter(Q(number_of_letters__in=letters_range) | Q(double_name=True))\n else:\n resp = resp.filter(double_name=False)\n resp = resp.filter(number_of_letters__in=letters_range)\n\n if age_distribution is not None and len(age_distribution) > 0:\n mask = \"age_distribution_{}__gt\"\n\n # AND age_distribution way\n # dd = {}\n #\n # for d in age_distribution:\n # key = mask.format(d)\n # dd[key] = 0\n # resp = resp.filter(**dd)\n\n # OR age_distribution way\n or_query_set = Q()\n for d in age_distribution:\n tmp = Q(**{mask.format(d): 0})\n or_query_set |= tmp\n\n resp = resp.filter(or_query_set)\n\n if criteria is not None and len(name) > 0:\n if criteria == \"start\":\n resp = resp.filter(name__lower__istartswith=name.lower())\n elif criteria == \"middle\":\n resp = resp.filter(name__lower__icontains=name.lower())\n elif criteria == \"end\":\n resp = resp.filter(name__lower__iendswith=name.lower())\n else:\n resp = resp.filter(name__lower=name)\n\n return resp", "title": "" }, { "docid": "9a699f49d4c0ba175277a65d66a2562a", "score": "0.52174175", "text": "def query_to_clojure(obj, params):\n # Serialize the query, sorted keys for deterministic substitution, no pretty printing\n data = json.dumps(obj, sort_keys=True)\n # Split the JSON into parts using 'PARAM' as seperator\n parts = data.split(\"PARAM\")\n # Escape each JSON part by serializing as a JSON list and removing the qruare brackets\n escaped_parts = [json.dumps([part])[1:-1] for part in parts]\n # The amount of parts should be one more than the amount ot parameters\n if len(parts) != len(params) + 1:\n raise RuntimeError(\"Param count mismatch: \" +\n str(len(params)) +\n \" given \" +\n str(len(parts) -1) +\n \" needed ; \" +\n data)\n # Interleave the escaped JSON parts and the parameters and put them in a (str ) clojure expression\n rval = \" \".join(list(itertools.chain(*zip([\"(query (str \"] + params, escaped_parts))) + [\") )\"])\n return rval", "title": "" }, { "docid": "c47f0f449170335a3dd6dfeb971f17fa", "score": "0.52149814", "text": "def process(self):\n q = \"?q=\"\n lst = []\n for field in self.filter_dict:\n for keyword in self.filter_dict[field]:\n for criteria in self.filter_dict[field][keyword]:\n lst.append(\"{0} {1} '{2}'\".format(field, kw_map[keyword], criteria))\n\n q += \" and \".join(lst)\n\n if self.count:\n q +=\"&count=True\"\n self.query = q\n\n return self", "title": "" }, { "docid": "35ea0ee25067279790679c78c36e66c2", "score": "0.52111125", "text": "def run_sql_statement(self, request, params=()):\n\n # simple string formatting RISK OF SQL INJECTION, DO NOT USE IN REAL DATABASE\n output = request % tuple([\"'\" + p + \"'\" for p in params])\n\n self.output_sql += output + \"\\n\"", "title": "" }, { "docid": "c29abca87c3ec6ed8e7655886a9e9437", "score": "0.52013975", "text": "def prepare_sql(self, **kwargs):\n source = kwargs.get(\"source\", self.source)\n if not source:\n raise ValueError(\"No source table or view specified.\")\n\n cutoff = kwargs.get(\"cutoff\", self.cutoff)\n start_time = \"start_time_local\" if self.local else \"start_time\"\n end_time = \"end_time_local\" if self.local else \"end_time\"\n\n predicates = kwargs.get(\"predicates\", [])\n predicates = [predicates] if not isinstance(predicates, list) else predicates\n\n provider_name = kwargs.get(\"provider_name\", self.provider_name)\n\n if provider_name:\n predicates.append(f\"lower(provider_name) = lower('{provider_name}')\")\n\n vts = \"'::vehicle_types,'\"\n vehicle_types = kwargs.get(\"vehicle_types\", self.vehicle_types)\n if vehicle_types:\n if not isinstance(vehicle_types, list):\n vehicle_types = [vehicle_types]\n predicates.append(f\"vehicle_type IN ('{vts.join(vehicle_types)}'::vehicle_types)\")\n\n if len(predicates) > 0:\n predicates = \" AND \".join(predicates) + \" AND \"\n else:\n predicates = \"\"\n\n timeranges = [\n f\"({start_time} <= %(start)s AND {end_time} > %(start)s)\",\n f\"({start_time} < %(end)s AND {end_time} >= %(end)s)\",\n f\"({start_time} >= %(start)s AND {end_time} <= %(end)s)\"\n ]\n\n if cutoff > 0:\n where = f\"\"\"\n {predicates}\n ({end_time} IS NULL AND {start_time} between %(start)s - '{cutoff} days'::interval and %(end)s OR\n (date_part('day', {end_time} - {start_time}) <= {cutoff} AND\n ({\" OR \".join(timeranges)})))\n \"\"\"\n else:\n timeranges.append(f\"({start_time} < %(end)s AND {end_time} IS NULL)\")\n where = f\"\"\"\n {predicates}\n ({\" OR \".join(timeranges)})\n \"\"\"\n\n order_by = kwargs.get(\"order_by\", self.order_by)\n if order_by:\n if not isinstance(order_by, list):\n order_by = [order_by]\n order_by = \",\".join(order_by)\n order_by = f\"ORDER BY {order_by}\"\n\n return f\"\"\"\n SELECT\n *\n FROM\n {source}\n WHERE\n {where}\n {order_by};\n \"\"\"", "title": "" }, { "docid": "37809edf146a1f0665c82f677f3754e7", "score": "0.51989144", "text": "def generate_select_request(self, conditions=None):\n\n if conditions is None:\n return self._SELECT_REQUEST_STRING\n else:\n request = self._SELECT_REQUEST_STRING + \" WHERE {}={}\"\n request = request.format(*conditions.split(\"=\"))\n return request", "title": "" }, { "docid": "d39ef560bb5a4fecbc1c6cd66834ec85", "score": "0.5198576", "text": "def _getQueryString(self):\n return \"select obj from CommentAnnotation obj join fetch obj.details.owner as owner join fetch obj.details.group \"\\\n \"join fetch obj.details.creationEvent\"", "title": "" }, { "docid": "f80c750afc8c0dee265f318d3f44d2f7", "score": "0.51965994", "text": "def generate_clause(vars, operator):\n return operator(*vars, evaluate=False)", "title": "" }, { "docid": "84e77ba0a35e57f35fb497a8cc5137a0", "score": "0.5192215", "text": "def subquery(self, withs, filters=None, optional=False):\n input_with = ', '.join(withs)\n new_withs = withs - {self.collection_alias}\n output_with = ', '.join(new_withs) + ', ' + self.with_statement()\n\n where_string = ''\n if filters is not None:\n relevant = []\n for c in filters:\n if c.involves(self):\n relevant.append(c.for_cypher())\n if relevant:\n where_string = 'WHERE ' + '\\nAND '.join(relevant)\n for_match = self.subquery_match_template.format(anchor_alias=self.anchor_node.alias,\n collection_alias=self.collection_alias)\n kwargs = {'for_match': for_match,\n 'optional': '',\n 'collection_alias': self.collection_alias,\n 'output_with_string': output_with}\n if optional:\n kwargs['optional'] = 'OPTIONAL '\n return self.subquery_template.format(**kwargs)", "title": "" }, { "docid": "0c91288d0f92106eb67c80c495661bb2", "score": "0.5183175", "text": "def build_query(self):\n # initalize config\n query = super(GlobalTaskTableSource, self).build_query()\n query = query.avoid_duplicates()\n return query", "title": "" }, { "docid": "f4c4035fb5ab40bfe3985d56797e9590", "score": "0.51793414", "text": "def prepareQuery(queryString, initNs={}, base=None):\n ret = translateQuery(parseQuery(queryString), base, initNs)\n ret._original_args = (queryString, initNs, base)\n return ret", "title": "" }, { "docid": "06ac992c3bdb35350862ce9f31ab0d1b", "score": "0.51777846", "text": "def buildQuery(self, formInfo, user, query, languages):\r\n return query.options(subqueryload('concepts'))\\\r\n .join(concept_list_concepts)\\\r\n .join(Concept)\\\r\n .join(formInfo.formModel, (formInfo.formModel.language_id == languages.foreign.id) & (formInfo.formModel.concept_id == Concept.id))\\\r\n .outerjoin(Mastery, (Mastery.user_id==user.id) & (getattr(Mastery, formInfo.masteryFieldName) == formInfo.formModel.id))\\\r\n .outerjoin(StalenessPeriod)\\\r\n .order_by(formInfo.listModel.averageRatingFor(user, languages.foreign))", "title": "" }, { "docid": "d1c27b14b20da118bc686538eb54c03e", "score": "0.51636916", "text": "def concat_query_make(query_ends):\n\n\tquery = result_queries.START\n\tstart = True\n\n\tif query_ends:\n\t\tfor end in query_ends:\n\t\t\tif start:\n\t\t\t\tquery += end\n\t\t\t\t#end is the varying return value to an otherwise identical query\n\n\n\t\t\tif not start:\n\t\t\t\tquery += \" || \\\"DELIM\\\" || \"\n\t\t\t\tquery += end\n\t\n\t\t\tstart = False\n\t\t\t\n\telse:\n\t\tquery = \"(no query selected)\"\n\t\n\treturn query", "title": "" }, { "docid": "0ba8011bc739cf5b088eb97c87937d2b", "score": "0.51636404", "text": "def evaluate_node_sql(node):\n if 'condition' in node:\n # Node is a condition, get the values of the sub-clauses\n sub_pairs = \\\n [evaluate_node_sql(x) for x in node['rules']]\n\n if not sub_pairs:\n # Nothing has been returned, so it is an empty query\n return '', []\n\n # Now combine\n if node['condition'] == 'AND':\n result = '((' + \\\n ') AND ('.join([x for x, __ in sub_pairs]) + '))'\n else:\n result = '((' + \\\n ') OR ('.join([x for x, __ in sub_pairs]) + '))'\n result_fields = \\\n list(itertools.chain.from_iterable([x for __, x in sub_pairs]))\n\n if node.get('not', False):\n result = '(NOT (' + result + '))'\n\n return result, result_fields\n\n # Get the variable name and duplicate the symbol % in case it is part of\n # the variable name (escape needed for SQL processing)\n varname = fix_pctg_in_name(node['field'])\n\n # Get the operator\n operator = node['operator']\n\n # If the operator is between or not_between, there is a special case,\n # the constant cannot be computed because the node['value'] is a pair\n constant = None\n if node['value'] is not None and not isinstance(node['value'], list):\n # Calculate the constant value depending on the type\n if node['type'] == 'integer':\n constant = int(node['value'])\n elif node['type'] == 'double':\n constant = float(node['value'])\n elif node['type'] == 'boolean':\n constant = node['value'] == '1'\n elif node['type'] == 'string':\n constant = node['value']\n elif node['type'] == 'datetime':\n constant = node['value']\n else:\n raise Exception(\n _('No function to translate type {0}').format(node['type'])\n )\n\n # Terminal Node\n result_fields = []\n if operator == 'equal':\n result = '(\"{0}\"'.format(varname) + \\\n ' = %s) AND (\"{0}\" is not null)'.format(varname)\n result_fields = [str(constant)]\n\n elif operator == 'not_equal':\n result = '(\"{0}\"'.format(varname) + \\\n '!= %s) OR (\"{0}\" is null)'.format(varname)\n result_fields = [str(constant)]\n\n elif operator == 'begins_with' and node['type'] == 'string':\n result = '(\"{0}\"'.format(varname) + \\\n ' LIKE %s) AND (\"{0}\" is not null)'.format(varname)\n result_fields = [node['value'] + \"%\"]\n\n elif operator == 'not_begins_with' and node['type'] == 'string':\n result = '(\"{0}\"'.format(varname) + \\\n ' NOT LIKE %s) OR (\"{0}\" is null)'.format(varname)\n result_fields = [node['value'] + \"%\"]\n\n elif operator == 'contains' and node['type'] == 'string':\n result = '(\"{0}\"'.format(varname) + \\\n ' LIKE %s) AND (\"{0}\" is not null)'.format(varname)\n result_fields = [\"%\" + node['value'] + \"%\"]\n\n elif operator == 'not_contains' and node['type'] == 'string':\n result = '(\"{0}\"'.format(varname) + \\\n ' NOT LIKE %s) OR (\"{0}\" is null)'.format(varname)\n result_fields = [\"%\" + node['value'] + \"%\"]\n\n elif operator == 'ends_with' and node['type'] == 'string':\n result = '(\"{0}\"'.format(varname) + \\\n ' LIKE %s) AND (\"{0}\" is not null)'.format(varname)\n result_fields = [\"%\" + node['value']]\n\n elif operator == 'not_ends_with' and node['type'] == 'string':\n result = '(\"{0}\"'.format(varname) + \\\n ' NOT LIKE %s) OR (\"{0}\" is null)'.format(varname)\n result_fields = [\"%\" + node['value']]\n\n elif operator == 'is_empty' and node['type'] == 'string':\n result = '(\"{0}\"'.format(varname) + \\\n \" = '') OR (\\\"{0}\\\" is null)\".format(varname)\n\n elif operator == 'is_empty' and node['type'] != 'string':\n result = \"(\\\"{0}\\\" is null)\".format(varname)\n\n elif operator == 'is_not_empty' and node['type'] == 'string':\n result = '(\"{0}\"'.format(varname) + \\\n \" != '') AND (\\\"{0}\\\" is not null)\".format(varname)\n\n elif operator == 'is_not_empty' and node['type'] != 'string':\n result = \"(\\\"{0}\\\" is not null)\".format(varname)\n\n elif operator == 'less' and \\\n (node['type'] == 'integer' or node['type'] == 'double'\n or node['type'] == 'datetime'):\n result = '\"{0}\"'.format(varname) + ' < %s'\n result_fields = [str(constant)]\n\n elif operator == 'less_or_equal' and \\\n (node['type'] == 'integer' or node['type'] == 'double'\n or node['type'] == 'datetime'):\n result = '\"{0}\"'.format(varname) + ' <= %s'\n result_fields = [str(constant)]\n\n elif operator == 'greater' and \\\n (node['type'] == 'integer' or node['type'] == 'double'\n or node['type'] == 'datetime'):\n result = '\"{0}\"'.format(varname) + ' > %s'\n result_fields = [str(constant)]\n\n elif operator == 'greater_or_equal' and \\\n (node['type'] == 'integer' or node['type'] == 'double'\n or node['type'] == 'datetime'):\n result = '\"{0}\"'.format(varname) + ' >= %s'\n result_fields = [str(constant)]\n\n elif operator == 'between' and \\\n (node['type'] == 'integer' or node['type'] == 'double'\n or node['type'] == 'datetime'):\n result = '\"{0}\"'.format(varname) + ' BETWEEN %s AND %s'\n result_fields = [str(node['value'][0]), str(node['value'][1])]\n\n elif operator == 'not_between' and \\\n (node['type'] == 'integer' or node['type'] == 'double'\n or node['type'] == 'datetime'):\n result = '\"{0}\"'.format(varname) + ' NOT BETWEEN %s AND %s'\n result_fields = [str(node['value'][0]), str(node['value'][1])]\n\n else:\n raise Exception(\n _('Type, operator, field {0}, {1}, {2} not supported yet').format(\n node['type'], operator, varname\n )\n )\n\n if node.get('not', False):\n raise Exception(_('Negation found in unexpected location'))\n\n return result, result_fields", "title": "" }, { "docid": "0dd8285bd0816627e7f7b40210481df8", "score": "0.5158893", "text": "def query(\n self, strOrQuery, initBindings={},\n initNs={}, base=None, DEBUG=False):\n\n if not isinstance(strOrQuery, Query):\n parsetree = parseQuery(strOrQuery)\n query = translateQuery(parsetree, base, initNs)\n else:\n query = strOrQuery\n\n return evalQuery(self.graph, query, initBindings, base)", "title": "" }, { "docid": "911fc88c4f06f7a15ccb80b7985bca8c", "score": "0.51363057", "text": "def execute(self, neo4j_session: Session):\n if self.query == \"\":\n raise ValueError(\n \"The query must be generated before this method is called.\"\n )\n try:\n return neo4j_session.write_transaction(lambda tx: tx.run(self.query))\n except:\n print(\"Query not defined error\")\n return None", "title": "" }, { "docid": "5af7e59578232b5286f33142426d48b4", "score": "0.51362425", "text": "def buildConnectionString(params):\n return \";\".join([\"%s=%s\" % (k, v) for k, v in params.items()])", "title": "" }, { "docid": "250b21f2993d378d79cd2b0b0096170a", "score": "0.5131796", "text": "def _row_to_query_statement_string(self, row):\n start_node = 'MERGE (start:{state_lab} {{code:\"{start_state}\"}})'.format(\n state_lab=self.labels.state, start_state=row[self.start_state_col]\n )\n\n end_node = 'MERGE (end:{state_lab} {{code:\"{end_state}\"}})'.format(\n state_lab=self.labels.state, end_state=row[self.end_state_col]\n )\n\n transition = \"MERGE (start)<-[:SOURCE]-(trans:{trans_lab})-[:TARGET]->(end)\".format(\n trans_lab=self.labels.transition\n )\n\n def _conditions_str(row, start_state_col, end_state_col):\n \"\"\"Build the string used to express transition conditions.\n \n This is the part in curly braces specifying the Condition node's\n properties.\n \"\"\"\n row = row.drop([start_state_col, end_state_col])\n count = 0\n s = \"\"\n for i, val in row.iteritems():\n s += i + \":\"\n if isinstance(val, six.string_types):\n s += '\"' + val + '\"'\n elif isinstance(val, bool):\n s += str(val).lower()\n else:\n s += str(val)\n if count < len(row) - 1:\n s += \", \"\n count += 1\n\n return \"{\" + s + \"}\"\n\n condition = \"MERGE (cond:{cond_lab} {cond_str})-[:CAUSES]->(trans)\".format(\n cond_lab=self.labels.condition,\n cond_str=_conditions_str(row, self.start_state_col, self.end_state_col),\n )\n\n query_str = (\n start_node + \" \" + end_node + \" \" + transition + \" \" + condition + \";\"\n )\n\n if self.global_params:\n query_str = self._add_global_params_to_query_string(\n query_str, self.global_params\n )\n\n return query_str", "title": "" }, { "docid": "54c7e0913a92b7dfb4e7cc3e5cb349f6", "score": "0.5125673", "text": "def build_query(url: str, *args, **kwargs) -> str:\n url = url.strip(\"/\")\n args = map(lambda arg: arg.strip(\"/\"), args)\n kwargs = dict(filter(lambda kv: kv[1] is not None, kwargs.items()))\n url = \"/\".join([url, *args]) + \"?\"\n url += \"&\".join([f\"{key}={value}\" for key, value in kwargs.items()])\n return url", "title": "" }, { "docid": "976a7fa212ec09e76082f81572c24b87", "score": "0.51216996", "text": "def build_query(field, fields, fuzziness=\"AUTO\"):\n val = fields[field]\n if \"*\" in val:\n qtype = \"wildcard\"\n fval = val\n else:\n qtype = \"match\"\n fval = dict(query=val, fuzziness=fuzziness)\n return Q(qtype, **{field: fval})", "title": "" }, { "docid": "27d86e7c41d5fb86b9a53f995bbc51a9", "score": "0.5117474", "text": "def q(*args: Any, **kwargs: Any) -> \"Query\":\n return Query(*args, **kwargs)", "title": "" }, { "docid": "f0fd368bbe1ee92e7081900909296ef4", "score": "0.51087725", "text": "def query_on_params(self,\\\n **kwargs):\n if self.check_empty_query(**kwargs):\n return []\n \n elif 'username' in kwargs and len(kwargs) == 1:\n name = kwargs['username'][0]\n return self.query_on_name(\\\n name,\\\n self.all()) \n\n username_s = kwargs['username'][0]\n fullname_s = kwargs['fullname'][0]\n faculty_s = kwargs['faculty'][0]\n program_s = kwargs['program'][0]\n query_set_l = self.all()\n if username_s != '':\n query_set_l = query_set_l.filter(\\\n username__icontains = username_s)\n if fullname_s != '':\n query_set_l = self.query_on_name(\\\n fullname_s,\\\n query_set_l)\n if faculty_s != '':\n query_set_l = query_set_l.filter(\\\n faculty__exact = faculty_s)\n if program_s != '':\n query_set_l = query_set_l.filter(\\\n program__exact = program_s)\n return query_set_l", "title": "" }, { "docid": "a40795caa1ea72b2cdda71b5e0413200", "score": "0.51069057", "text": "def sql_condition(\n self,\n conditions=tuple(),\n all_inner=True,\n all_outer=True,\n match_inner=True,\n match_outer=True,\n ):\n args = locals()\n # put the conditions into a dict\n conds = {\n pref: args.get(pref)\n for pref in (\n \"all_inner\",\n \"all_outer\",\n \"match_inner\",\n \"match_outer\",\n )\n }\n conds = { # properly cycle all settings\n pref: itertools.cycle(val if hasattr(val, \"__iter__\") else (val,))\n for pref, val in conds.items()\n }\n conds[\"values\"] = (\n (conditions,) if hasattr(conditions, \"items\") else conditions\n )\n sql_conditions, sql_params = [], []\n for cond in listdict_to_dictlist(conds):\n sanitized_values = {\n self.sanitize(k, brutally=True): self.sanitize(\n v, brutally=False\n )\n if isinstance(v, str)\n else v\n for k, v in cond[\"values\"].items()\n if k in self.tableheader\n }\n inner_sql_cond = (\" AND \" if cond[\"all_inner\"] else \" OR \").join(\n map(\n \"{} {} ?\".format,\n sanitized_values.keys(),\n itertools.cycle(\n (\"IS\" if cond[\"match_inner\"] else \"IS NOT\",)\n ),\n )\n ) or \"1=1\"\n sql_conditions.append(\n \"{}({})\".format(\n \"\" if cond[\"match_outer\"] else \"NOT \", inner_sql_cond\n )\n )\n sql_params.extend(tuple(sanitized_values.values()))\n sql_condition = (\n functools.reduce(\n lambda x, y: (\n \" AND \" if next(conds[\"all_outer\"]) else \" OR \"\n ).join((x, y)),\n sql_conditions,\n )\n if sql_conditions\n else \"1=1\"\n )\n return sql_condition, tuple(sql_params)", "title": "" }, { "docid": "c126e83185b3f095e5bc3f7c32cda876", "score": "0.51048845", "text": "def post(self, statement, parameters=None):\n payload = {\"query\": statement}\n if parameters:\n payload[\"params\"] = {}\n for key, value in parameters.items():\n if isinstance(value, (Node, Rel, Relationship)):\n value = value._id\n payload[\"params\"][key] = value\n log.info(\"execute %r %r\", payload[\"query\"], payload.get(\"params\", {}))\n return self.resource.post(payload)", "title": "" } ]
152c3edbb3aa0b05d956c5dcb6acb205
Initialize the Decomposable OperatorValued Kernel.
[ { "docid": "f09118c547cd865e208584e4cfd55bdd", "score": "0.0", "text": "def __init__(self, X, gamma):\n super(RBFDivFreeKernelMap, self).__init__(gamma)\n self.n = X.shape[0]\n self.d = X.shape[1]\n self.p = X.shape[1]\n self.X = X\n self.Gs_train = None", "title": "" } ]
[ { "docid": "31c1f825722e935a9f2040878ee62b3c", "score": "0.6014059", "text": "def init_op(self):\n return self._init_op", "title": "" }, { "docid": "933dc4e128744f5c26c4a36acf306da6", "score": "0.59667915", "text": "def __init__(self,kernel_fn,*args,order=2,**kwargs):\n super().__init__(*args,**kwargs)\n self.dkernel_fn = DiscretizedKernelFN(kernel_fn,order)", "title": "" }, { "docid": "6d9845f2e29e914f28540bd4faadf50b", "score": "0.5955899", "text": "def __init__(self, X, A, scalar_kernel, scalar_kernel_params):\n super(DecomposableKernelMap, self).__init__(A, scalar_kernel,\n scalar_kernel_params)\n self.n = X.shape[0]\n self.d = X.shape[1]\n self.X = X\n self.Gs_train = None", "title": "" }, { "docid": "4fca6d74bf6efc98b6afae92b5d813d2", "score": "0.594024", "text": "def __init__(self):\n # set the kernel. One can play with the parameters.\n self.set_kernel(1, 2.0, 0.98)", "title": "" }, { "docid": "0ab1359409ab9f3bea19235d57510e45", "score": "0.5931874", "text": "def __init__(self):\n self.d = dd(lambda : 0)", "title": "" }, { "docid": "679da9cba2b0e04380ef1933feec9920", "score": "0.579953", "text": "def _setup_primitive_kernel(request) -> SDEKernel:\n return _create_markovflow_primitive_kernel(request.param)", "title": "" }, { "docid": "32cd9d7e5acbca27580e7960d902ce69", "score": "0.56664544", "text": "def __init__(self, kernel, adopt_thresh, state, target, maxsize, adaptive=True, forget_rate=0.0):\n\t\tself.dp = KernelDict(kernel, adopt_thresh, state, target, maxsize, adaptive, forget_rate)\n\t\tself.P = [[1]]\n\t\tself.Alpha = np.dot(self.dp.Kinv, target)", "title": "" }, { "docid": "31d3655e599882c02607bb11bff69504", "score": "0.56652945", "text": "def __init__(self, kernel_size):\n super().__init__()\n self.kernel_size = kernel_size", "title": "" }, { "docid": "31d3655e599882c02607bb11bff69504", "score": "0.56652945", "text": "def __init__(self, kernel_size):\n super().__init__()\n self.kernel_size = kernel_size", "title": "" }, { "docid": "31d3655e599882c02607bb11bff69504", "score": "0.56652945", "text": "def __init__(self, kernel_size):\n super().__init__()\n self.kernel_size = kernel_size", "title": "" }, { "docid": "31d3655e599882c02607bb11bff69504", "score": "0.56652945", "text": "def __init__(self, kernel_size):\n super().__init__()\n self.kernel_size = kernel_size", "title": "" }, { "docid": "5acf0ce085e1d9c8d75204b2a828c8ad", "score": "0.56514794", "text": "def _init_op(new_op):\n\n def _impl(inputs, attrs):\n assert len(inputs) == 0\n shape = attrs.get_int_tuple(\"shape\")\n dtype = attrs.get_str(\"dtype\", \"float32\")\n return new_op(shape=shape, dtype=dtype)\n\n return _impl", "title": "" }, { "docid": "d18640e6b96ae414bc449aa49d579501", "score": "0.5644447", "text": "def __init__(self, kernel=None, noise_var=None, exact_feval=False, optimizer='bfgs',\n max_iters=1000, optimize_restarts=5, sparse=None, num_inducing=10,\n verbose=False, ARD=False, seed=42, normalize_Y=True, n_subspaces=12, update_freq=5):\n self.kernel = kernel\n self.noise_var = noise_var\n self.exact_feval = exact_feval\n self.optimize_restarts = optimize_restarts\n self.optimizer = optimizer\n self.max_iters = max_iters\n self.verbose = verbose\n self.sparse = sparse\n self.sparse_surrogate = False\n self.num_inducing = num_inducing\n self.model = None\n self.ARD = ARD\n self.seed = seed\n self.update_interval = update_freq\n self.normalize_Y = normalize_Y\n self.n_sub = n_subspaces\n self.n_decomp = 20\n np.random.rand(self.seed)", "title": "" }, { "docid": "abb773c00e94f7cd4d00189b8b26809d", "score": "0.5515068", "text": "def __call__(self):\r\n new_node = Op.__call__(self)\r\n new_node.inputs = []\r\n new_node.name = \"Global_Variables_Initializer\"\r\n return new_node", "title": "" }, { "docid": "c8983388747a5dd2343aaa072cff2509", "score": "0.5513527", "text": "def kernel(self, value):\n\n if value is not None:\n assert hasattr(\n value,\n '__call__'), ('\"{0}\" attribute: \"{1}\" is not callable!'.format(\n 'kernel', value))\n\n self._kernel = value", "title": "" }, { "docid": "a05329e78028c09e4d065fb146db76c7", "score": "0.5498108", "text": "def __init__(\n self,\n params: grid_parametrization.GridParametrization,\n kernel_op: get_kernel_fn.ApplyKernelOp,\n solver_option: poisson_solver_pb2.PoissonSolver,\n ):\n super().__init__(params, kernel_op, solver_option)\n\n self._kernel_op.add_kernel({'weighted_sum_121': ([1.0, 2.0, 1.0], 1)})\n self._omega = solver_option.jacobi.omega\n self._num_iters = solver_option.jacobi.max_iterations\n self._halo_width = solver_option.jacobi.halo_width\n\n self._factor_b = 0.5 / (params.dx**-2 + params.dy**-2 + params.dz**-2)\n self._factor_x = params.dx**2 / self._factor_b\n self._factor_y = params.dy**2 / self._factor_b\n self._factor_z = params.dz**2 / self._factor_b", "title": "" }, { "docid": "1dfd0e455ad0824814bf1628e531aa99", "score": "0.5469124", "text": "def __init__(self, initial_value, *args, dtype=None, **kwargs):\n # Variables by default use the current device scope for placement. This\n # wrapper has them follow the initial value's placement instead (which will\n # be the DTensor device if the initial value has a layout).\n\n # Pop layout from kwargs since keras make_variable may pass a 'layout'\n # keyword argument. We need to pop it because we are passing kwargs to\n # super class constructor.\n layout = kwargs.pop('layout', None)\n shape = kwargs.get('shape', None)\n\n if callable(initial_value):\n unwrapped = initial_value\n if issubclass(type(initial_value), functools.partial):\n unwrapped = initial_value.func\n\n # If wrapped is a CheckpointInitialValueCallable, this means that\n # we are creating a Variable during a checkpoint restore.\n # Thus the restore will happen now through this callable\n # and we will create the DVariable with the restored dtensor.\n if issubclass(type(unwrapped), trackable.CheckpointInitialValueCallable):\n if not shape or not layout:\n raise ValueError('Expected shape and layout to be not None.')\n\n # CheckpointInitialValueCallable will call an eager tf.RestoreV2,\n # which does not have any shape information or layout information\n # attached. Thus we will do two things to have them correctly specified:\n #\n # The default layout scope allows us to correctly specify the output\n # layout of the tf.RestoreV2 that will be called\n #\n # Passing shard_info with the correct shape allows the tf.RestoreV2\n # ShapeInference to extract the shape.\n initial_value = api.call_with_layout(\n initial_value,\n layout,\n shard_info=trackable.ShardInfo(\n shape=shape, offset=[0] * len(shape)))\n else:\n initial_value = initial_value()\n\n # When the initial value came from a Checkpoint restoration, fetch tensor.\n if isinstance(initial_value, trackable.CheckpointInitialValue):\n initial_value = initial_value.wrapped_value\n\n initial_value = ops.convert_to_tensor(initial_value, dtype=dtype)\n variable_device = initial_value.device\n self._save_as_bf16 = False\n # TODO(b/159035705): The following code enables variable creation inside\n # a tf.function. However, it requires a global dtensor device.\n # if not variable_device and not tf.executing_eagerly():\n # try:\n # initial_value.op.get_attr(\"_layout\")\n # except ValueError:\n # pass\n # else:\n # # The initial value is a DTensor, but because the DTensor device is\n # # only active during eager execution at the moment we need to\n # # translate that into a placement for the eager VarHandleOp.\n # variable_device = _dtensor_device().name\n with ops.device(variable_device):\n # If initial tensor assigned to DVariable is DTensor, record the layout of\n # the resource so that this can be queried.\n if context.executing_eagerly():\n if api.is_dtensor(initial_value):\n value_layout = api.fetch_layout(initial_value)\n if layout is not None and layout != value_layout:\n raise errors_impl.InvalidArgumentError(\n None,\n None,\n 'Conflicting layout are provided for initial '\n f'value layout ({value_layout}) and variable ({layout}).',\n )\n layout = value_layout\n elif layout is not None:\n initial_value = api.relayout(initial_value, layout)\n else:\n raise errors_impl.InvalidArgumentError(\n None,\n None,\n 'Neither layout nor DTensor initial value are provided.',\n )\n self.layout = layout\n with api.default_mesh(layout.mesh):\n super(DVariable, self).__init__(\n initial_value, *args, dtype=dtype, **kwargs\n )\n else:\n # FIXME(175928457): Record value layout in graph mode.\n if layout is not None:\n initial_value = api.relayout(initial_value, layout)\n super(DVariable, self).__init__(\n initial_value, *args, dtype=dtype, **kwargs)", "title": "" }, { "docid": "c3de15c5a768f66fe4fbaed3a7d68cc4", "score": "0.54562724", "text": "def __init__(self, kernel=None, hp=None):\n\n self.kernel = kernel\n self.hp = hp\n self.name = kernel.__name__", "title": "" }, { "docid": "95f099c9ceb72599c3eb191e27acebee", "score": "0.5444765", "text": "def __init__(\n self,\n kernel='linear', # kernel type\n max_iter=180, # maximum iteration times\n conv=1e-3, # convergence criterion\n beta=1e3, # initial beta\n bias_used=True # bias\n ):\n self.kernel = kernel\n self.n_iter = max_iter\n self.conv = conv\n self.beta = beta\n self.bias_used = bias_used\n self.alpha, self.alpha_, self.alpha_old, self.beta_, self.relevance, self.phi, self.phi_,\\\n self.y, self.mean_, self.sigma_, self.bias = [None]*11", "title": "" }, { "docid": "2fa80d6f5fdd7a6d6ca7a2bed06d3ca9", "score": "0.5426685", "text": "def __init__(self, model):\n self.model = model\n self.opcache = {} # a cache of operators, which should get initialized by from_vector calls", "title": "" }, { "docid": "4b823c3cb49da97065fb9a3afeeb8a89", "score": "0.53927", "text": "def __call__(self, initial_value, dtype = None, shape = None, name = \"Variable\"):\r\n new_node = Op.__call__(self)\r\n if shape is not None:\r\n assert shape == initial_value.shape\r\n if dtype is not None:\r\n if isinstance(initial_value, np.ndarray):\r\n global_variables[new_node] = initial_value.astype(dtype)\r\n else:\r\n global_variables[new_node] = np.array(initial_value).astype(dtype)\r\n else:\r\n global_variables[new_node] = initial_value\r\n new_node.name = name\r\n return new_node", "title": "" }, { "docid": "303057902312f28c3b3789db58ed8b76", "score": "0.5379558", "text": "def __init__(self, kernel_size):\n\n self.stride = kernel_size\n self.kernel_size = kernel_size", "title": "" }, { "docid": "75b72de45026680e384ba08990058900", "score": "0.5367593", "text": "def __init__(self, **kwargs):\n\n LOG.info(\"%s: initing...\" % str(self))\n\n super(Generic2dOscillator, self).__init__(**kwargs)\n\n #self._state_variables = [\"V\", \"W\"]\n self._nvar = 2\n self.cvar = numpy.array([0], dtype=numpy.int32)\n\n LOG.debug(\"%s: inited.\" % repr(self))", "title": "" }, { "docid": "b7aab43ddc245a9aeebd561fb21486f2", "score": "0.53663516", "text": "def _setup_impl(self):\n\n self.bpm_program = OCLProgram(absPath(\"kernels/bpm_3d_kernels.cl\"))\n\n Nx, Ny, Nz = self.size\n\n self._plan = fft_plan((Ny,Nx))\n\n\n self._H_g = OCLArray.from_array(self._H.astype(np.complex64))\n\n if not self.dn is None and self.n_volumes==1:\n self.dn_g = OCLArray.from_array(self.dn)\n\n\n self.scatter_weights_g = OCLArray.from_array(self.scatter_weights.astype(np.float32))\n self.gfactor_weights_g = OCLArray.from_array(self.gfactor_weights.astype(np.float32))\n\n self.scatter_cross_sec_g = OCLArray.zeros(Nz,\"float32\")\n self.gfactor_g = OCLArray.zeros(Nz,\"float32\")\n\n\n\n # self.reduce_kernel = OCLReductionKernel(\n # np.float32, neutral=\"0\",\n # reduce_expr=\"a+b\",\n # map_expr=\"weights[i]*cfloat_abs(field[i]-(i==0)*plain)*cfloat_abs(field[i]-(i==0)*plain)\",\n # arguments=\"__global cfloat_t *field, __global float * weights,cfloat_t plain\")", "title": "" }, { "docid": "5fdf5263c27b7114fd643c548c5c86cc", "score": "0.5359017", "text": "def __init__(self, value):\n\t\tsuper(Functor, self).__init__(value)", "title": "" }, { "docid": "6bb8bab55f68310770b6b4a2897fe8af", "score": "0.53568095", "text": "def __init__(self, kernel_type=None):\r\n self.kernel_type = kernel_type\r\n self.gamma = 0\r\n self.coef = 0\r\n self.degree = 0\r\n self.num_classes = 0\r\n self.labels = None\r\n self.total_sv = 0\r\n self.rho = 0\r\n self.sv = []", "title": "" }, { "docid": "0967a0baddc13a0d6db87af07c84175b", "score": "0.5346729", "text": "def initialize_operator(self, operator=None, matrix=False, eval_at_once=False):\n # TODO: Make this more efficient, only compute values needed at each (r,c) step.\n # For this, 'operator' must support the 'component=(r,c)' option.\n # Operator is None is interpreted as identity transformation\n if operator is None:\n self._operator = lambda nodes, dummy, entry=None: ones((1, nodes.shape[1])) if entry[0] == entry[1] else zeros((1, nodes.shape[1]))\n else:\n # Wrap the operator inside a lambda ignoring the dummy parameter\n # This allows to use the same nsd code for quadrature and matrix construction.\n if matrix is False:\n self._operator = lambda nodes, dummy, entry=None: operator(nodes, entry=entry)\n else:\n self._operator = operator\n self._eval_at_once = eval_at_once", "title": "" }, { "docid": "b68ca023fe7092aeaef64e1da8254009", "score": "0.5345871", "text": "def _init_routine(self):\n if self.is_initialized is False:\n # decide the routine\n if self.sn_def['op'] in {'d', 'project'}:\n # for d kernel_shape [num_in, num_out]; for project, kernel shape [num_class, num_in]\n assert len(self.kernel_shape) == 2, \\\n '{}: kernel shape {} does not have length 2'.format(self.name_in_err, self.kernel_shape)\n num_in, num_out = self.kernel_shape\n # self.use_u = True\n self.use_u = True if num_in <= num_out else False\n x_shape = [1, num_in] if self.use_u else [1, num_out]\n self.forward = self._dense_ if self.use_u else self._dense_t_\n self.backward = self._dense_t_ if self.use_u else self._dense_\n elif self.sn_def['op'] in {'cd'}: # kernel_shape [num_class, num_in, num_out]\n assert len(self.kernel_shape) == 3, \\\n '{}: kernel shape {} does not have length 3'.format(self.name_in_err, self.kernel_shape)\n num_class, num_in, num_out = self.kernel_shape\n self.use_u = True if num_in <= num_out else False\n x_shape = [num_class, 1, num_in] if self.use_u else [num_class, 1, num_out]\n self.forward = self._dense_ if self.use_u else self._dense_t_\n self.backward = self._dense_t_ if self.use_u else self._dense_\n elif self.sn_def['op'] in {'dck'}: # convolution * conditional scale\n assert isinstance(self.kernel_shape, (list, tuple)) and len(self.kernel_shape) == 2, \\\n '{}: kernel shape must be a list of length 2. Got {}'.format(self.name_in_err, self.kernel_shape)\n assert len(self.kernel_shape[0]) == 2 and len(self.kernel_shape[1]) == 2, \\\n '{}: kernel shape {} does not have length 2'.format(self.name_in_err, self.kernel_shape)\n num_in, num_out = self.kernel_shape[0]\n num_class = self.kernel_shape[1][0]\n self.use_u = True if num_in <= num_out else False\n x_shape = [num_class, num_in] if self.use_u else [num_class, num_out]\n self.forward = (lambda x: self._scalar_(self._dense_(x, index=0), index=1, offset=1.0)) \\\n if self.use_u else (lambda y: self._dense_t_(self._scalar_(y, index=1, offset=1.0), index=0))\n self.backward = (lambda y: self._dense_t_(self._scalar_(y, index=1, offset=1.0), index=0)) \\\n if self.use_u else (lambda x: self._scalar_(self._dense_(x, index=0), index=1, offset=1.0))\n elif self.sn_def['op'] in {'c', 'tc'}:\n assert len(self.kernel_shape) == 4, \\\n '{}: kernel shape {} does not have length 4'.format(self.name_in_err, self.kernel_shape)\n # self.use_u = True\n self.use_u = True \\\n if np.prod(self.sn_def['input_shape'][1:]) <= np.prod(self.sn_def['output_shape'][1:]) \\\n else False\n if self.sn_def['op'] in {'c'}: # input / output shape NCHW or NHWC\n x_shape = self.sn_def['input_shape'].copy() if self.use_u else self.sn_def['output_shape'].copy()\n x_shape[0] = 1\n y_shape = self.sn_def['input_shape'].copy()\n y_shape[0] = 1\n elif self.sn_def['op'] in {'tc'}: # tc\n x_shape = self.sn_def['output_shape'].copy() if self.use_u else self.sn_def['input_shape'].copy()\n x_shape[0] = 1\n y_shape = self.sn_def['output_shape'].copy()\n y_shape[0] = 1\n else:\n raise NotImplementedError('{}: {} not implemented.'.format(self.name_in_err, self.sn_def['op']))\n self.forward = self._conv_ if self.use_u else (lambda y: self._conv_t_(y, x_shape=y_shape))\n self.backward = (lambda y: self._conv_t_(y, x_shape=y_shape)) if self.use_u else self._conv_\n elif self.sn_def['op'] in {'cck', 'tcck'}: # convolution * conditional scale\n assert isinstance(self.kernel_shape, (list, tuple)) and len(self.kernel_shape) == 2, \\\n '{}: kernel shape must be a list of length 2. Got {}'.format(self.name_in_err, self.kernel_shape)\n assert len(self.kernel_shape[0]) == 4 and len(self.kernel_shape[1]) == 4, \\\n '{}: kernel shape {} does not have length 4'.format(self.name_in_err, self.kernel_shape)\n self.use_u = True \\\n if np.prod(self.sn_def['input_shape'][1:]) <= np.prod(self.sn_def['output_shape'][1:]) \\\n else False\n num_class = self.kernel_shape[1][0]\n if self.sn_def['op'] in {'cck'}: # input / output shape NCHW or NHWC\n x_shape = self.sn_def['input_shape'].copy() if self.use_u else self.sn_def['output_shape'].copy()\n x_shape[0] = num_class\n y_shape = self.sn_def['input_shape'].copy()\n y_shape[0] = num_class\n self.forward = (lambda x: self._scalar_(self._conv_(x, index=0), index=1, offset=1.0)) \\\n if self.use_u \\\n else (lambda y: self._conv_t_(self._scalar_(y, index=1, offset=1.0), x_shape=y_shape, index=0))\n self.backward = (lambda y: self._conv_t_(\n self._scalar_(y, index=1, offset=1.0), x_shape=y_shape, index=0)) \\\n if self.use_u else (lambda x: self._scalar_(self._conv_(x, index=0), index=1, offset=1.0))\n elif self.sn_def['op'] in {'tcck'}: # tcck\n x_shape = self.sn_def['output_shape'].copy() if self.use_u else self.sn_def['input_shape'].copy()\n x_shape[0] = num_class\n y_shape = self.sn_def['output_shape'].copy()\n y_shape[0] = num_class\n self.forward = (lambda x: self._conv_(self._scalar_(x, index=1, offset=1.0), index=0)) \\\n if self.use_u \\\n else (lambda y: self._scalar_(self._conv_t_(y, x_shape=y_shape, index=0), index=1, offset=1.0))\n self.backward = (lambda y: self._scalar_(\n self._conv_t_(y, x_shape=y_shape, index=0), index=1, offset=1.0)) \\\n if self.use_u else (lambda x: self._conv_(self._scalar_(x, index=1, offset=1.0), index=0))\n else:\n raise NotImplementedError('{}: {} not implemented.'.format(self.name_in_err, self.sn_def['op']))\n else:\n raise NotImplementedError('{}: {} is not implemented.'.format(self.name_in_err, self.sn_def['op']))\n\n self.x = tf.compat.v1.get_variable(\n 'in_rand', shape=x_shape, dtype=tf.float32,\n initializer=tf.truncated_normal_initializer(), trainable=False)\n\n self.is_initialized = True", "title": "" }, { "docid": "f2a6967944c915daed81a3fe3422a498", "score": "0.5284371", "text": "def __init__(self, shape, k, filters, kernels):\n self.dims = shape\n self.k = k\n self.filters = filters\n self.kernels = kernels", "title": "" }, { "docid": "3d489be3b563e75c12cba4f31c651635", "score": "0.52758646", "text": "def __init__(self, var, accum, linear, grad, indices,\n lr, lr_power, l1, l2, l2_shrinkage, kernel_name):\n super().__init__(var, grad, indices, kernel_name)\n self.lr = lr\n self.lr_power = lr_power\n self.l1 = l1\n self.l2 = l2\n self.l2_shrinkage = l2_shrinkage\n self.lr_vrec = 1.0 / self.lr\n self.var_shape = var.get(\"shape\")\n self.var_dtype = var.get(\"dtype\").lower()\n self.accum_shape = accum.get(\"shape\")\n self.accum_dtype = accum.get(\"dtype\").lower()\n self.linear_shape = linear.get(\"shape\")\n self.linear_dtype = linear.get(\"dtype\").lower()\n self.check_param()", "title": "" }, { "docid": "a0686a0db70ea3e597fcc88adaa8c3f5", "score": "0.52641636", "text": "def __init__(self):\n self.da_val = DynamicArray()\n self.da_max = DynamicArray()", "title": "" }, { "docid": "b24468922248ebc17217ff925e070f54", "score": "0.526289", "text": "def __init__(self, D, kernel_gamma, m, step_size, gamma2=0.1, schedule=None, acc_star=0.234,\n update_kernel_gamma=None, update_kernel_gamma_schedule=None, update_kernel_gamma_tol=0.1):\n self.kernel_gamma = kernel_gamma\n self.m = m\n self.D = D\n self.gamma2 = gamma2\n self.step_size = step_size\n self.schedule = schedule\n self.acc_star = acc_star\n self.update_kernel_gamma = update_kernel_gamma\n self.update_kernel_gamma_schedule = update_kernel_gamma_schedule\n self.update_kernel_gamma_tol = update_kernel_gamma_tol\n \n # scaling parameter evolution might be collected to assess convergence\n self.nu2s = [step_size]\n \n # some sanity checks\n if acc_star is not None:\n assert acc_star > 0 and acc_star < 1\n \n if schedule is not None:\n lmbdas = np.array([schedule(t) for t in np.arange(100)])\n assert np.all(lmbdas > 0)\n assert np.allclose(np.sort(lmbdas)[::-1], lmbdas)\n \n if self.update_kernel_gamma:\n self.past_samples = collections.deque()\n \n if self.update_kernel_gamma_schedule is not None:\n lmbdas = np.array([update_kernel_gamma_schedule(t) for t in np.arange(100)])\n assert np.all(lmbdas > 0)\n assert np.allclose(np.sort(lmbdas)[::-1], lmbdas)\n \n self._initialise()", "title": "" }, { "docid": "260cd30aa6404a0db73c4f2663e7a1d5", "score": "0.5251513", "text": "def __init__(self, value):\n self.np_value = value", "title": "" }, { "docid": "1b98432c1827a929281f2a7f25c397f1", "score": "0.52501065", "text": "def __init__(self, initializer: ArrayOp, index_op: IndexOp):\n self.initializer = initializer\n self.index_op = index_op", "title": "" }, { "docid": "a6498103bfa75e7a4a24ec77f577d87d", "score": "0.52426285", "text": "def __init__(self, value):\n self._v = value", "title": "" }, { "docid": "601f3074c3929a4c542d73770542d2a7", "score": "0.52349836", "text": "def __init__(self, *args, **kwargs):\n super(DPTuner, self).__init__(*args, **kwargs)\n self._num_states = self._max_num_states = None\n self._stage_dict = {}\n self._dep_dict = {}\n self._counted_nodes_set = set()\n\n self._global_data_dict = {\n \"dtype\": self._dtype,\n \"counted_nodes_set\": self._counted_nodes_set,\n \"stage_dict\": self._stage_dict,\n \"in_nodes_dict\": self._in_nodes_dict,\n \"out_nodes_dict\": self._out_nodes_dict,\n \"dep_dict\": self._dep_dict,\n \"node_list\": self._node_list,\n \"input_shapes\": self._input_shapes,\n \"layout_transform_interlayer_cost\": self._layout_transform_interlayer_cost,\n }", "title": "" }, { "docid": "fe05bf701bd73f4f90dc69bea2b7c5e7", "score": "0.5229092", "text": "def __init__(self, pars=[], kernel=\"\"):\n\n self._pars = pars\n self._kernel = kernel", "title": "" }, { "docid": "86c32e4f4c1420ad4bad9df9f953f1cf", "score": "0.52148443", "text": "def init_vector(ocp):\n v_init = np.ndarray((0, 1))\n\n # For states\n for i_phase in range(len(ocp.nlp)):\n current_nlp = ocp.nlp[i_phase]\n\n repeat = 1\n if current_nlp.ode_solver.is_direct_collocation:\n repeat += current_nlp.ode_solver.polynomial_degree\n\n nlp = ocp.nlp[current_nlp.use_states_from_phase_idx]\n OptimizationVectorHelper._set_node_index(nlp, 0)\n for key in nlp.states:\n if key in nlp.x_init.keys():\n n_points = OptimizationVectorHelper._nb_points(nlp, nlp.x_init[key].type)\n nlp.x_init[key].check_and_adjust_dimensions(nlp.states[key].cx.shape[0], n_points)\n\n for k in range(nlp.ns + 1):\n OptimizationVectorHelper._set_node_index(nlp, k)\n for p in range(repeat if k != nlp.ns else 1):\n point = k if k != 0 else 0 if p == 0 else 1\n\n collapsed_values = np.ndarray((nlp.states.shape, 1))\n for key in nlp.states:\n if key in nlp.x_init.keys():\n point_to_eval = point\n if nlp.x_init[key].type == InterpolationType.ALL_POINTS:\n point_to_eval = k * repeat + p\n value = (\n nlp.x_init[key].init.evaluate_at(shooting_point=point_to_eval, repeat=repeat)\n / nlp.x_scaling[key].scaling\n )[:, np.newaxis]\n else:\n value = 0\n # Organize the controls according to the correct indices\n collapsed_values[nlp.states[key].index, :] = value\n\n v_init = np.concatenate((v_init, np.reshape(collapsed_values.T, (-1, 1))))\n\n # For controls\n for i_phase in range(len(ocp.nlp)):\n current_nlp = ocp.nlp[i_phase]\n\n nlp = ocp.nlp[current_nlp.use_controls_from_phase_idx]\n OptimizationVectorHelper._set_node_index(nlp, 0)\n if nlp.control_type in (ControlType.CONSTANT, ControlType.NONE):\n ns = nlp.ns\n elif nlp.control_type in (ControlType.LINEAR_CONTINUOUS, ControlType.CONSTANT_WITH_LAST_NODE):\n ns = nlp.ns + 1\n else:\n raise NotImplementedError(f\"Multiple shooting problem not implemented yet for {nlp.control_type}\")\n\n for key in nlp.controls.keys():\n if key in nlp.u_init.keys():\n nlp.u_init[key].check_and_adjust_dimensions(nlp.controls[key].cx.shape[0], ns - 1)\n\n for k in range(ns):\n OptimizationVectorHelper._set_node_index(nlp, k)\n collapsed_values = np.ndarray((nlp.controls.shape, 1))\n for key in nlp.controls:\n if key in nlp.u_init.keys():\n value = nlp.u_init[key].init.evaluate_at(shooting_point=k) / nlp.u_scaling[key].scaling\n else:\n value = 0\n\n # Organize the controls according to the correct indices\n collapsed_values[nlp.controls[key].index, 0] = value\n\n v_init = np.concatenate((v_init, np.reshape(collapsed_values.T, (-1, 1))))\n\n # For parameters\n collapsed_values = np.zeros((ocp.parameters.shape, 1))\n for key in ocp.parameters.keys():\n if key not in ocp.parameter_init.keys():\n v_init = np.concatenate((v_init, np.zeros((ocp.parameters[key].size, 1))))\n continue\n\n scaled_init = ocp.parameter_init[key].scale(ocp.parameters[key].scaling)\n collapsed_values[ocp.parameters[key].index, :] = scaled_init.init\n v_init = np.concatenate((v_init, np.reshape(collapsed_values.T, (-1, 1))))\n\n # For stochastic variables\n for i_phase in range(len(ocp.nlp)):\n nlp = ocp.nlp[i_phase]\n OptimizationVectorHelper._set_node_index(nlp, 0)\n\n for key in nlp.stochastic_variables.keys():\n if key in nlp.s_init.keys():\n nlp.s_init[key].check_and_adjust_dimensions(nlp.stochastic_variables[key].cx.shape[0], nlp.ns)\n\n for k in range(nlp.ns + 1):\n OptimizationVectorHelper._set_node_index(nlp, k)\n collapsed_values = np.ndarray((nlp.stochastic_variables.shape, 1))\n for key in nlp.stochastic_variables:\n if key in nlp.s_init.keys():\n value = nlp.s_init[key].init.evaluate_at(shooting_point=k) / nlp.s_scaling[key].scaling\n else:\n value = 0\n\n # Organize the stochastic variables according to the correct indices\n collapsed_values[nlp.stochastic_variables[key].index, 0] = value\n\n v_init = np.concatenate((v_init, np.reshape(collapsed_values.T, (-1, 1))))\n\n for i_phase in range(len(ocp.nlp)):\n nlp = ocp.nlp[i_phase]\n if nlp.motor_noise is not None:\n n_motor_noise = nlp.motor_noise.shape[0]\n n_sensory_noise = nlp.sensory_noise.shape[0]\n v_init = np.concatenate((v_init, np.zeros((n_motor_noise, 1))))\n v_init = np.concatenate((v_init, np.zeros((n_sensory_noise, 1))))\n\n return v_init", "title": "" }, { "docid": "f21f06197faa3e88badb2b42da306fe1", "score": "0.52086794", "text": "def __init__(self):\r\n self.inputs = []\r\n self.op = None\r\n self.const_attr = None\r\n self.name = \"\"", "title": "" }, { "docid": "7d475a0425781c6d7fa1542d8463f437", "score": "0.5186119", "text": "def __init__(self, name: tp.Optional[str] = None, dtype: np.dtype = jnp.float32):\n self.name = name if name else utils.lower_snake_case(self.__class__.__name__)\n self.dtype = dtype\n self._params = {}\n self._states = []\n self._submodules = []\n self._dynamic_submodules = []\n self._initialized = False\n self._trainable = True\n\n _init = self.init\n\n def init(*args, **kwargs):\n return _init(*args, **kwargs)\n\n self.init = init\n\n utils.wraps(self.call)(self.init)\n utils.wraps(self.call)(self)\n\n self._jit_functions()", "title": "" }, { "docid": "ede1473867441e7e3658e63982b4fb4e", "score": "0.5181392", "text": "def Do_kernel(self):\n return self.Kernel", "title": "" }, { "docid": "ec7c2366c48398d16ed6ca58d56cb137", "score": "0.5176585", "text": "def __init__(self, x=None):\n self.v = x\n self.cl = None\n self.cr = None\n return None", "title": "" }, { "docid": "89a718b8ed355be4f8f776d69cf7d2da", "score": "0.51614505", "text": "def __init__(self, pyo, t=None):\n if type(pyo) is Numeric.ArrayType:\n k = _arraytok(pyo) \n elif type(pyo) is _K:\n k = pyo\n if t and t != k.t:\n raise NotImplemented\n else:\n if not t:\n t = _guesst(pyo)\n else:\n if t > 0:\n pyt = _pyt[t-1]\n pyo = pyt(pyo)\n elif t < 0:\n pyt = _pyt[-t-1]\n pyo = map(pyt, pyo)\n kgen = _kgen(t)\n k = kgen(pyo)\n _K.__init__(self, k)", "title": "" }, { "docid": "b870f3d7f5b6706d9c9597f891c73f95", "score": "0.5157406", "text": "def _v__init__(self):", "title": "" }, { "docid": "0ba1f95b84ca10b7b07a5b2ccf6e80a2", "score": "0.514581", "text": "def __init__(self, shape, dtype):\n self._dtype = dtype\n self._sum = tf.Variable(lambda: tf.zeros(shape, dtype), False)\n self._count = tf.Variable(lambda: 0, trainable=False)", "title": "" }, { "docid": "ff5d8627423d0ced3926413340e0e896", "score": "0.51323915", "text": "def __init__(self, master=None, value=None, name=None):\n _TK.Variable.__init__(self, master, value, name)", "title": "" }, { "docid": "0bb7d41df8f841d7b93509b616075367", "score": "0.5131436", "text": "def __init__(self,kernel,args,invocations=None,type='VERTICAL'):\n\t\tself.args = []\n\t\tself.kernel = \"\"\n\t\tif(invocations == None):\n\t\t\ttemp = kernel.getSub(\"clEnqueueNDRangeKernel\")\n\t\t\tif(temp is not None):\n\t\t\t\tself.kernel = temp\n\t\t\telse:\n\t\t\t\tprint \"Invalid kernel statement\"\n\t\t\tfor arg in args:\n\t\t\t\ttemp = arg.getSub(\"clSetKernelArg\")\n\t\t\t\tif(temp is not None):\n\t\t\t\t\tif(temp.children[0] == self.kernel.children[1]):\n\t\t\t\t\t\tself.args.append((temp.children[2],temp.children[3]))\n\t\t\t\telse:\n\t\t\t\t\tprint \"invalid kernel argument\"\n\t\telse:\n\t\t\tself.kernel = copy.deepcopy(invocations[0].kernel)\n\t\t\tself.kernel.children[1].tokens[0].value = \"\"\n\t\t\tfor invocation in invocations:\n\t\t\t\tself.kernel.children[1].tokens[0].value += invocation.kernel.children[1].tokens[0].value.split('_')[0] + \"_\"\n\t\t\t\tself.args += invocation.args\n\t\t\tself.kernel.children[1].tokens[0].value += \"kernel\"\n\t\t\tif(type == 'HORIZONTAL'):\n\t\t\t\t\"\"\"Append an h\"\"\"\n\t\t\t\tself.kernel.children[1].tokens[0].value +='h'\t\n\t\t\tself.args = removeDuplicates2(self.args)", "title": "" }, { "docid": "429e749ed7cc10f710ceb9a638caf71f", "score": "0.5110538", "text": "def __init__(self, device_params = DEFAULT_DEVICE_PARAMS):\n self.device_names = list(device_params.keys())\n self.dirichlet_params = list(device_params.values())\n self.p_vector = dirichlet(self.dirichlet_params).rvs().reshape(-1,)", "title": "" }, { "docid": "e245d650938297578a09e6890a46c981", "score": "0.51071066", "text": "def __init__(self, potential, flow_operator_mask_v, flow_operator_mask_k):\n self._potential = potential\n self._v = potential.without_weights()\n self._k = potential.kinetic_energy()\n self._lam = potential.lam\n self._flow_op_mask_v = flow_operator_mask_v\n self._flow_op_mask_k = flow_operator_mask_k\n self._flow = 'lambda'", "title": "" }, { "docid": "07d03dfd8f3467b96acfbfa3c3a0d282", "score": "0.5101611", "text": "def __init__(\n self,\n kernel_op: get_kernel_fn.ApplyKernelOp,\n params: parameters_lib.SwirlLMParameters,\n scalar_name: str,\n thermodynamics: thermodynamics_manager.ThermodynamicsManager,\n ):\n self._kernel_op = kernel_op\n self._params = params\n self._bc_types = copy.deepcopy(params.bc_type)\n self._scalar_name = scalar_name\n self._thermodynamics = thermodynamics\n\n self._h = (self._params.dx, self._params.dy, self._params.dz)\n\n self._scalar_params = None\n for scalar in self._params.scalars:\n if scalar.name == self._scalar_name:\n self._scalar_params = scalar\n break\n assert (\n self._scalar_params is not None\n ), f'{self._scalar_name} is not configured.'\n\n for override_bc in self._scalar_params.override_bc_type:\n dim = override_bc.dim\n face = override_bc.face\n logging.info('BC type for scalar: %s '\n 'will be reset to %s (originally %s) for dimension %d at '\n 'face %d', self._scalar_name,\n str(boundary_condition_utils.BoundaryType.UNKNOWN),\n str(self._bc_types[dim][face]), dim, face)\n self._bc_types[dim][face] = (\n boundary_condition_utils.BoundaryType.UNKNOWN)\n\n # Find the direction of gravity. Only vector along a particular dimension is\n # considered currently.\n self._g_vec = (\n self._params.gravity_direction if self._params.gravity_direction else [\n 0.0,\n ] * 3)\n self._g_dim = None\n for i in range(3):\n if np.abs(np.abs(self._g_vec[i]) - 1.0) < _G_THRESHOLD:\n self._g_dim = i\n break\n\n # Prepare diffusion related models.\n self._diffusion_fn = diffusion.diffusion_scalar(self._params)\n self._use_sgs = self._params.use_sgs\n filter_widths = (self._params.dx, self._params.dy, self._params.dz)\n if self._use_sgs:\n self._sgs_model = sgs_model.SgsModel(self._kernel_op, filter_widths,\n params.sgs_model)", "title": "" }, { "docid": "c6c3eaa187c603279b9eacd627bd1e8a", "score": "0.5097064", "text": "def __init__(self, K, delta, c, np = None):\n\n self.K = float(K)\n self.K_int = int(K)\n self.delta = delta\n self.c = c\n\n S = self.calc_S()\n cdf, Z = gen_rsd_cdf(K, S, delta)\n self.cdf = cdf\n self.Z = Z\n\n #self.inter = inter.interp1d(np.concatenate(([0], cdf)), range(0,K+1))\n self.np_rand = RandomState(1)\n self.np = np\n\n self.state = 1", "title": "" }, { "docid": "b025b34a1936ef033d369b14517a6320", "score": "0.5088485", "text": "def __init__(self, ntok, ndim, init=df.init.ortho_svd()):\n df.Module.__init__(self)\n\n self.ndim = ndim\n self.W = self._addparam((ntok, ndim), init, name='Wemb_{}x{}'.format(ntok, ndim))", "title": "" }, { "docid": "670b1ee85ca5f25840cd22b783692716", "score": "0.5082001", "text": "def __init__(self, k: float = 1.0, x_shift: float = 0.0, y_shift: float = 0.0):\n\n self.constants = {self.k: k, self.x_shift: x_shift, self.y_shift: y_shift}\n self.V = self.V_functional.subs(self.constants)\n self.dVdpos = sp.diff(self.V, self.position)\n\n super().__init__()", "title": "" }, { "docid": "5ed8bb627d644964e64e4280b08c7088", "score": "0.50803864", "text": "def __init__(self, target: \"Expr\", operator: UnOp) -> None:\n self.target = target\n self.operator = operator", "title": "" }, { "docid": "1d3bce030f98c517a2d9c05a1062aa7d", "score": "0.5075545", "text": "def _kernel_init(scale=1.0, seed=None):\n scale = 2. * scale\n return tf.keras.initializers.VarianceScaling(\n scale=scale, mode='fan_in', distribution='truncated_normal', seed=seed)", "title": "" }, { "docid": "2e3b771b5507abee7a05e1de11e5d55b", "score": "0.50715685", "text": "def __init__(self, batch_size, embedding_size, k=None, device=None):\n super(SimpleStruct, self).__init__(batch_size, embedding_size)\n operations = [Operation.push, Operation.pop]\n self._reg_trackers = [None for _ in operations]\n self._read_strength = k\n self._values = []\n self._strengths = []\n self._device = device", "title": "" }, { "docid": "14767bc57bcafb626a78c7673062cca6", "score": "0.50543296", "text": "def __init__(self, tsr):\n super(DenseLinearOperator, self).__init__(tsr)\n self.tensor = tsr", "title": "" }, { "docid": "b638c603a0064f8e19d723f0d6a1a7fb", "score": "0.5053925", "text": "def _initialise_state(self) -> None:\n super()._initialise_state()\n self.test_window = tf.gather(self.x_ref_eff, self.init_test_inds)\n self.k_xtc = self.kernel(self.test_window, self.kernel_centers)", "title": "" }, { "docid": "1f8713d2cff480f49e5a16bb5caa601b", "score": "0.5046923", "text": "def __init__(self,\n kernel,\n batch_size,\n max_num_context,\n x_size=1,\n y_size=1,\n testing=False,\n device = torch.device(\"cpu\")):\n self.kernel = kernel\n self._batch_size = batch_size\n self._max_num_context = max_num_context\n self._x_size = x_size\n self._y_size = y_size\n self._testing = testing\n self.device = device", "title": "" }, { "docid": "827ed719975ed277602a055df1fe7bc9", "score": "0.50449395", "text": "def __init__(self, initializer_params={}, verbose=True, seed=0):\n\n super(Constant, self).__init__(initializer_params, verbose, seed)", "title": "" }, { "docid": "5277911cc013a07ef6cceb5bcfc2fc05", "score": "0.5040688", "text": "def __init__(self, vec, evotype=\"auto\"):\n vec = SPAMVec.convert_to_vector(vec)\n if evotype == \"auto\":\n evotype = \"statevec\" if _np.iscomplexobj(vec) else \"densitymx\"\n assert(evotype in (\"statevec\", \"densitymx\")), \\\n \"Invalid evolution type '%s' for %s\" % (evotype, self.__class__.__name__)\n DenseSPAMVec.__init__(self, vec, evotype)", "title": "" }, { "docid": "8f2a5100d4a03294b66cfeb709965644", "score": "0.5024744", "text": "def kernel(self, *args):\n raise NotImplementedError()", "title": "" }, { "docid": "f077d60cbbc17c88aa5fb673dd71f0c3", "score": "0.5020952", "text": "def __init__(self, vec, evotype=\"auto\"):\n vec = SPAMVec.convert_to_vector(vec)\n if evotype == \"auto\":\n evotype = \"statevec\" if _np.iscomplexobj(vec) else \"densitymx\"\n elif evotype == \"statevec\":\n vec = _np.asarray(vec, complex) # ensure all statevec vecs are complex (densitymx could be either?)\n\n assert(evotype in (\"statevec\", \"densitymx\")), \\\n \"Invalid evolution type '%s' for %s\" % (evotype, self.__class__.__name__)\n DenseSPAMVec.__init__(self, vec, evotype)", "title": "" }, { "docid": "5b73f58df49cdc4fb04ea592cd5a8341", "score": "0.5013608", "text": "def _define_kernel(self):\n def k(x,y,\n dist=self.dist,\n commonKernel=self.commonKernel,\n blockKernel=self.blockKernel):\n # Geodesic distance. \n d=dist(x[1:],y[1:])\n\n # Calculate the underlying kernel for all subjects.\n cov=commonKernel(d)\n\n # Add individual specific covariance.\n if x[0]==y[0]:\n cov+=blockKernel[int(x[0])](d)\n return cov\n \n self.kernel=k\n \n # Update GPR under-the-hood that relies on this kernel.\n self._update_gp()", "title": "" }, { "docid": "c357b6703e4e6f0eb142d7347a729a71", "score": "0.50009483", "text": "def _init_variable(self, name, val):\n # return tf.get_variable(name, (), dtype=tf.float32, trainable=False,\n # initializer=tf.constant_initializer(val))\n return tf.constant(name=name, shape=(), dtype=tf.float32, value=val)", "title": "" }, { "docid": "b119ae4642062f2847a8127122d8ef4f", "score": "0.50004977", "text": "def __init__(self, n_features, kernel):\n self.n_features = n_features\n self.kernel = kernel", "title": "" }, { "docid": "77a92b259d5be88049f3e71e4212ebd7", "score": "0.49999592", "text": "def __init__(self, kernel, itspace_extents, *args, **kwargs):\n if self._initialized:\n return\n self._parloop = kwargs.get('parloop')\n self.comm = itspace_extents.comm\n self._kernel = self._parloop._kernel\n self._config = kwargs.get('config')\n self._initialized = True", "title": "" }, { "docid": "5ad8752674ae08b5ffe6f6e6d4d2aa61", "score": "0.49982068", "text": "def __init__(self\n , **kwargs\n ):\n self.t_functorArgs = {}\n self.t_cstArgs = {}\n \n # The data to which the functor is applied\n self.o_vectorDataSet = None\n \n # The functor returned data\n self.x_fData = None\n \n # First get T_FUNCTOR_ARGS\n for t_funcArgTuple in self.__class__.getFunctorArgs():\n \n s_funcArgKey = t_funcArgTuple[ self.U_FUNC_ARG_KEY_INDEX ]\n b_required = t_funcArgTuple[ self.U_FUNC_ARG_REQUIRED_INDEX ]\n \n if not kwargs.has_key( s_funcArgKey ):\n if b_required:\n raise QArkFunctorMissingRequiredFunctorArgError( self.__class__.__name__, s_funcArgKey )\n else:\n self.t_functorArgs[ s_funcArgKey ] = None \n else:\n # Get the QArkFunctorArg\n o_functorArg = kwargs[ s_funcArgKey ]\n self.t_functorArgs[ s_funcArgKey ] = o_functorArg.getValue()\n \n # Get the constant value parameters\n for t_cstArgTuple in self.__class__.getCstArgs():\n \n s_cstArgKey = t_cstArgTuple[ self.U_FUNC_ARG_KEY_INDEX ]\n b_required = t_cstArgTuple[ self.U_FUNC_ARG_REQUIRED_INDEX ]\n \n if not kwargs.has_key( s_cstArgKey ):\n if b_required:\n raise QArkFunctorMissingRequiredFunctorArgError( self.__class__.__name__, s_cstArgKey )\n else:\n self.t_cstArgs[ s_cstArgKey ] = None \n else:\n # Get the QArkFunctorArg\n o_cstArg = kwargs[ s_cstArgKey ]\n self.t_cstArgs[ s_cstArgKey ] = o_cstArg.getValue()", "title": "" }, { "docid": "2dea2f57a1f90dc4b827a67ba680564f", "score": "0.4976893", "text": "def __init__(self, env, k):\n gym.Wrapper.__init__(self, env)\n self.k = k\n self.states = deque([], maxlen=k)\n shp = self.observation_space.n\n self.observation_space = (k, shp)", "title": "" }, { "docid": "3b767c6f0501614ef6a73b62c8e8eaa9", "score": "0.49731472", "text": "def init_op_pattern():\n fusion_manager.init_current_op_pattern()", "title": "" }, { "docid": "1e53c3d8b3fa5a4ff22e11b3435270ca", "score": "0.49591565", "text": "def init(self, net_dim, state_dim, action_dim):", "title": "" }, { "docid": "1e53c3d8b3fa5a4ff22e11b3435270ca", "score": "0.49591565", "text": "def init(self, net_dim, state_dim, action_dim):", "title": "" }, { "docid": "49a81ef952dca4e2e241406f697edc54", "score": "0.49527246", "text": "def __init__(self, X, mu, p):\n super(DotProductKernelMap, self).__init__(mu, p)\n self.n = X.shape[0]\n self.d = X.shape[1]\n self.X = X\n self.Gs_train = None", "title": "" }, { "docid": "79466e46f7a122d0032ad903f386dd53", "score": "0.4948092", "text": "def init_accumulators(self):\n torch.nn.init.constant_(self.mu_numerator, self.eps)\n torch.nn.init.constant_(self.mu_denominator, self.eps * self.C)\n torch.nn.init.constant_(self.var_numerator, self.eps)\n torch.nn.init.constant_(self.var_denominator, self.eps * self.C)", "title": "" }, { "docid": "251831a91ce68da00733b1975980b5f5", "score": "0.49430963", "text": "def __init__(self, **kwargs):\n LOG.info(\"%s: initing...\" % str(self))\n super(JansenRitDavid, self).__init__(**kwargs)\n\n #self._state_variables = [\"y0\", \"y1\", \"y2\", \"y3\", \"y4\", \"y5\"]\n self._nvar = 8\n\n self.cvar = numpy.array([1,2], dtype=numpy.int32)\n\n #TODO: adding an update_derived_parameters method to remove some of the\n # redundant parameter multiplication in dfun should gain about 7%\n # maybe not worth it... The three exp() kill us at ~90 times *\n #self.nu_max2 = None #2.0 * self.nu_max\n #self.Aa = None # self.A * self.a\n #self.Bb = None # self.B * self.b\n #self.aa = None # self.a**2\n #self.a2 = None # 2.0 * self.a\n #self.b2 = None # 2.0 * self.b\n #self.a_1J = None # self.a_1 * self.J\n #self.a_2J = None # self.a_2 * self.J\n #self.a_3J = None # self.a_3 * self.J\n #self.a_4J = None # self.a_4 * self.J\n\n LOG.debug('%s: inited.' % repr(self))", "title": "" }, { "docid": "8b8907041e456b0c34a14714a13a8534", "score": "0.4932093", "text": "def init(self):\n self._model = {}\n self._value_function = {}\n self._terminal_state_set = set()\n self._state_action = None", "title": "" }, { "docid": "5399a96be429116cc61fcfd03f89ba41", "score": "0.49313113", "text": "def _do_data_dependent_init():\n w_fc_normalized = tf.nn.l2_normalize(w_fc.read_value(), [0])\n output_init = tf.matmul(embeddings, w_fc_normalized)\n mean_init, var_init = tf.nn.moments(output_init, [0])\n # Data-dependent init values.\n g_init_value = 1. / tf.sqrt(var_init + 1e-10)\n ops = [tf.assign(g, g_init_value)]\n if not cosine_classifier:\n # Also initialize a bias in a data-dependent way.\n b_fc_init_value = -mean_init * g_init_value\n ops.append(tf.assign(b_fc, b_fc_init_value))\n # Mark that the data-dependent initialization is done to prevent it from\n # happening again in the future.\n ops.append(tf.assign(data_dependent_init_done, 1))\n return tf.group(*ops)", "title": "" }, { "docid": "44c5613c40d97e6454db8adf43bf569c", "score": "0.49255443", "text": "def init_variable(v, init, name=\"init\"):\n with ops.name_scope(None, v.op.name + \"/\", [v, init]):\n with ops.name_scope(name) as scope:\n with ops.colocate_with(v):\n if callable(init):\n assert v.get_shape().is_fully_defined(), \"Variable shape unknown.\"\n # TODO(mrry): Convert to v.shape when the property and\n # accessor are reconciled (and all initializers support\n # tf.TensorShape objects).\n value = init(v.get_shape().as_list(), v.dtype.base_dtype)\n value = ops.convert_to_tensor(value, name=\"value\")\n return gen_state_ops.assign(v, value, name=scope)\n else:\n init = ops.convert_to_tensor(init, name=\"init\")\n return gen_state_ops.assign(v, init, name=scope)", "title": "" }, { "docid": "7ad5823f394b26d1fc23bd233430ddc7", "score": "0.4921869", "text": "def __init__(self, component, name, shape, dtype):\n self._name = name\n self._shape = shape\n self._component = component\n beta = tf.get_variable(\n 'beta_%s' % name,\n shape=shape,\n dtype=dtype,\n initializer=tf.zeros_initializer())\n gamma = tf.get_variable(\n 'gamma_%s' % name,\n shape=shape,\n dtype=dtype,\n initializer=tf.ones_initializer())\n self._params = [beta, gamma]", "title": "" }, { "docid": "0729f28466be1f8662a2650d69888e6c", "score": "0.49182227", "text": "def __init__(self, name, **okwargs):\n assert name in AVAILABLE_KERNELS, \"Unknown kernel %s\" % name\n super(SparseKernel, self).__init__(name, **okwargs)\n self.use_rbf = okwargs.get('use_rbf', name in RBF_KERNELS)\n if name not in RBF_KERNELS and self.use_rbf:\n raise ValueError('Cannot use RBF with non-RBF kernel {}'.format(name))\n # get the module used to compute the sparse distances (FIXME ugly)\n spdist = None\n exec('import sparse_distances.sparse_%s as spdist' % name)\n # define \"raw\" distance functions between sparse vectors (from C library)\n self._v2v_f = spdist.v2v # raw dist. between vectors\n self._v2m_f = spdist.v2m # raw dist. between a vector and a matrix\n self._m2m_f = spdist.m2m # raw dist. between a (test) matrix and a (train) matrix\n self._gram_f = spdist.gram # raw gram matrix of a list of vectors\n if name in ('linear', 'intersection'):\n self.is_additive = True", "title": "" }, { "docid": "696f0b83531e773d2f9e7679ffa66996", "score": "0.49122977", "text": "def __init__(self, dataset, kernel, model=Exact(), mean=None, name=None, rescale_x=False):\n \n if not isinstance(dataset, DataSet):\n dataset = DataSet(dataset)\n if dataset.get_output_dims() == 0:\n raise ValueError(\"dataset must have at least one channel\")\n names = [name for name in dataset.get_names() if name is not None]\n if len(set(names)) != len(names):\n raise ValueError(\"all data channels must have unique names\")\n\n if rescale_x:\n dataset.rescale_x()\n else:\n for channel in dataset:\n for dim in range(channel.get_input_dims()):\n xran = np.max(channel.X[dim].transformed) - np.min(channel.X[dim].transformed)\n if xran < 1e-3:\n logger.warning(\"Very small X range may give problems, it is suggested to scale up your X axis\")\n elif 1e4 < xran:\n logger.warning(\"Very large X range may give problems, it is suggested to scale down your X axis\")\n\n self.name = name\n self.dataset = dataset\n self.kernel = kernel\n\n X = [[x[channel.mask] for x in channel.X] for channel in self.dataset]\n Y = [np.array(channel.Y[channel.mask]) for channel in self.dataset]\n x, y = self._to_kernel_format(X, Y)\n\n self.model = model.build(kernel, x, y, mean, name)\n if issubclass(type(kernel), MultiOutputKernel) and issubclass(type(model), Exact):\n self.model.variance.assign(0.0, lower=0.0, trainable=False) # handled by MultiOutputKernel", "title": "" }, { "docid": "8cf3117e76c5fabe7b753fb2c1e95073", "score": "0.49072382", "text": "def __init__(self,oscillators,comb_op):\n self.oscillators=oscillators\n self.comb_op=comb_op", "title": "" }, { "docid": "6a80a836cd045a2779e2c83a489f40bd", "score": "0.4900156", "text": "def __init__(self, **kwargs):\n LOG.info('%s: initing...' % str(self))\n super(LileySteynRoss, self).__init__(**kwargs)\n #self._state_variables = [\"E\", \"I\"]\n self._nvar = 2\n self.cvar = numpy.array([0, 1], dtype=numpy.int32)\n LOG.debug('%s: inited.' % repr(self))", "title": "" }, { "docid": "a26bdf4c22d69c8784e808227fb4785a", "score": "0.4893577", "text": "def __init__(self, name, copy=0, val=0, size=1, **kwargs):\r\n self.value = val\r\n if isinstance(self.value, numpy.ndarray):\r\n self.size = self.value.shape[0]\r\n elif isinstance(self.value, list):\r\n self.size = len(self.value)\r\n else:\r\n self.size = size\r\n\r\n super(IndVar, self).__init__(name, copy, **kwargs)\r\n \r\n if 'u_scal' in self.kwargs:\r\n self.u_scal = self.kwargs['u_scal']\r\n else:\r\n self.u_scal = 1.0\r\n if 'f_scal' in self.kwargs:\r\n self.f_scal = self.kwargs['f_scal']\r\n else:\r\n self.f_scal = 1.0", "title": "" }, { "docid": "7f13d984fb9004cdb7e465aa197e71b8", "score": "0.4890825", "text": "def __init__(self, pool_size, stride, padding=\"VALID\"):\n super().__init__()\n self.kernel_shape = pool_size\n self.stride = stride\n\n self.padding_mode = padding\n self.padding = None\n\n self.input = None\n self.X = None", "title": "" }, { "docid": "20d37517a9cbdc62dadf4417a61d5731", "score": "0.48837817", "text": "def __init__(self):\n self.da = DynamicArray()", "title": "" }, { "docid": "b0d23029791c70659c983e74c8c7aa81", "score": "0.48798135", "text": "def setKernel(self, k):\n self.__kernel = k", "title": "" }, { "docid": "2b365597f362f5204053091ab359913b", "score": "0.48740718", "text": "def __init__(\n self,\n dtensor_components: Tuple[tensor.Tensor],\n global_element_spec: tensor_spec.TensorSpec,\n layouts: Any):\n # dtensor_components is expected to be a single-element tuple.\n [self._iterator_resource_dtensor] = dtensor_components\n self._global_element_spec = global_element_spec\n self._layouts = layouts\n self._layouts_str = nest.map_structure(\n lambda layout: layout.to_string(), layouts)\n\n super().__init__(\n components=dtensor_components, element_spec=global_element_spec)", "title": "" }, { "docid": "0a7fe2b29bb7123c470f655eedadb589", "score": "0.48736146", "text": "def __init__(self, stride_shape, convolution_shape, num_kernels):\n\n # Inherit parent attributes\n super().__init__()\n\n self.stride_shape = stride_shape\n self.stride_y, self.stride_x = 0, 0\n\n self.convolution_shape = convolution_shape\n self.input_tensor_shape = None\n self.input_tensor = None\n\n self.num_kernels = num_kernels\n\n # Initialize the filter uniformly random in the range [0, 1) as tensor of size: [Nr_kernels x C x M (x N)].\n self.weights = np.random.uniform(size=(self.num_kernels, *self.convolution_shape))\n\n # Initialize the bias uniformly random in the range [0, 1) for each kernel as a single value.\n self.bias = np.random.uniform(size=self.num_kernels)\n\n self.gradient_weights = None # Gradient with respect to the weights.\n self.gradient_bias = None # Gradient with respect to the bias.\n\n self.optimizer = None # Optimizer of this layer.", "title": "" }, { "docid": "c1c2bb027440f1d4c616441288b1acd3", "score": "0.48729536", "text": "def __init__(self, *args):\n this = _openmm.new_VariableVerletIntegrator(*args)\n try:\n self.this.append(this)\n except:\n self.this = this", "title": "" }, { "docid": "a66eeb8f71b64a13fbd03d9940288a98", "score": "0.48687768", "text": "def _scalar_op(self, op, o):\n new_parts = []\n new_terms = {term: getattr(term, op)(o) for term in self.expgate_sets.keys()}\n new_parts = ({targets: new_terms[term]\n for targets, term in part.items()}\n for part in self.parts)\n new = self.__class__(*new_parts)\n if self._dense is not None:\n new.dense = getattr(self.dense, op)(o)\n if self._circuit is not None:\n new._circuit = self._circuit\n new._circuit.dt = None\n new.expgate_sets = {new_terms[term]: gate_set\n for term, gate_set in self.expgate_sets.items()}\n return new", "title": "" }, { "docid": "03d9055aa5ebd994aa9b36d5ecc6417d", "score": "0.4867485", "text": "def __init__(self):\n self.node: Node[K, V] = EmptyNode()", "title": "" }, { "docid": "00babe8f5fa91fe8a9f10ef7c506e6ef", "score": "0.4864531", "text": "def __init__(self, rho: float, batch_multiplier: float, lambda_vae: float, lambda_cos: float, lambda_l1: float):\n self.rho = rho\n self.lambda_vae = lambda_vae\n self.lambda_cos = lambda_cos\n self.lambda_l1 = lambda_l1\n self.batch_multiplier = batch_multiplier", "title": "" }, { "docid": "665007793f12574d1c320285c82d3a00", "score": "0.48640746", "text": "def __init__(self, target, key, values=0, active=0):\n \n #Initialized variables\n self.target = target\n self.key = key\n self.values = values\n self.active = active", "title": "" }, { "docid": "6f42e931ee21cb79a96e0b09c1f99d14", "score": "0.48613185", "text": "def __init__(self, dim): #, length_scale, length_scale_bounds=()):\n# assert isinstance(column, (list, tuple, int)), \"must be int or list of ints\"\n# self.column = [column] if isinstance(column, int) else column\n# assert all(isinstance(i, int) for i in self.column), \"must be integers\"\n self.dim = dim\n \n kernels = [Projection(RBF(), [c]) for c in range(dim)]\n\n # combine the kernels into a single product kernel\n self.kernel = reduce(lambda k0, k1 : k0 * k1, kernels)", "title": "" }, { "docid": "91dee435523b3f227ca893f9e528764f", "score": "0.48593938", "text": "def __init__(self, operator_id):\n\n _base.Base.__init__(self, _types.COMPONENT_TYPE_OPERATOR)\n self.__op_id = operator_id", "title": "" }, { "docid": "54500e01c5648df0d8f588ea9a0ff3b6", "score": "0.48528546", "text": "def init(self) -> None:\n self.layout = self.attrs['data_layout']\n\n logger.info(\"Init {} layer: {}\".format(\n 'Conv2D' if self.kernel_groups == 1 else 'DepthWiseConv2D',\n self.layout))\n logger.info(self.input_shapes)\n\n input_shapes, kernel, biases = \\\n self.input_shapes, self.kernel, self.biases\n if kernel is not None:\n kernel = \\\n tf.compat.v1.placeholder_with_default(kernel, kernel.shape)\n else:\n kernel = \\\n tf.compat.v1.placeholder(RtLayerTF.dtype_to_tf[self.dtype],\n shape=input_shapes[1])\n\n if biases is not None:\n biases = \\\n tf.compat.v1.placeholder_with_default(biases, biases.shape)\n else:\n biases = \\\n tf.compat.v1.placeholder(RtLayerTF.dtype_to_tf[self.dtype],\n shape=input_shapes[2])\n\n inpt = \\\n tf.compat.v1.placeholder(RtLayerTF.dtype_to_tf[self.dtype],\n shape=input_shapes[0])\n\n self.inpts = [inpt, kernel, biases]\n\n self.quant_output = self._get_conv_tensor(self.inpts)\n self.res = self.get_output_tensors(self.inpts)[0]\n logger.info(\"Res shape: {}\".format(self.res.shape))", "title": "" }, { "docid": "f3c96da5ae9c96c7d103042f8d26c505", "score": "0.4852539", "text": "def glv_init(self):\n pass", "title": "" }, { "docid": "453f264a9edca02a883467f14ebb4701", "score": "0.4848578", "text": "def __init__(self, sg_values): \n self.__a = {} \n self.__init_coefs(sg_values)", "title": "" }, { "docid": "ca7dd23f379b010e3624e42573da451f", "score": "0.48449957", "text": "def __init__(self):\n self.U, self.s, self.V = (None,None,None)\n self.user_encoder = None\n self.item_encoder = None\n self.mat = None\n self.decomp = False", "title": "" }, { "docid": "159c7de3ad234f5ac44629ce829f08c0", "score": "0.48449928", "text": "def __init__(self, *args):\n _gp.gp_Vec2d_swiginit(self,_gp.new_gp_Vec2d(*args))", "title": "" } ]
b385b0580c451d27151a0b7f7ebf8293
Return a restricted zscore.
[ { "docid": "d9b8dbd9271cb251514b6863c4b30b2c", "score": "0.70605326", "text": "def zscoreOtherRestricted(measure, power, median, variationCoefficient, computeFinalZScore):\n zscoreNorm = zscore(measure, power, median, variationCoefficient)\n if math.fabs(zscoreNorm) > 3 and computeFinalZScore:\n if zscoreNorm > 3:\n std3Pos = cutoff(3, power, median, variationCoefficient)\n std23Pos = std3Pos - cutoff(2, power, median, variationCoefficient)\n zscoreNorm = 3 + ((measure - std3Pos) / std23Pos)\n elif zscoreNorm < 3:\n std3Neg = cutoff(-3, power, median, variationCoefficient)\n std23Neg = cutoff(-2, power, median, variationCoefficient) - std3Neg\n zscoreNorm = -3 + ((measure - std3Neg) / std23Neg)\n return zscoreNorm", "title": "" } ]
[ { "docid": "9ef0a40399c631949df3b99157924f0d", "score": "0.72016364", "text": "def get_zscore(self, zscore_value):\n\n s = self.get_score(self.prov_types['zscore'], zscore_value)\n return s", "title": "" }, { "docid": "953a1114c01dd842fe0a8694202b17b3", "score": "0.7117339", "text": "def zscore(x):\n return (x - x.mean()) / x.std()", "title": "" }, { "docid": "9394d33a1d8cdcc699a9e78d16db20d9", "score": "0.6993641", "text": "def zscore(rate, mean, std):\n zscore = (rate - mean) / std\n return zscore", "title": "" }, { "docid": "5460d67193924f289c030ec4183feda1", "score": "0.6947195", "text": "def compute_z_score(self):\n\t\tx_scaled = (self.x - self.x.mean())/self.x.std()\n\t\treturn x_scaled", "title": "" }, { "docid": "af3719b97714cf1fbb5f7b1abf8762cc", "score": "0.6893497", "text": "def zscore(raw, mean, stddev):\n\n zscore = (raw - mean) / stddev\n\n return zscore", "title": "" }, { "docid": "23dac8796a09bd083c06ea7a94804d96", "score": "0.6865544", "text": "def zscore(x, mu, std):\n\n return (x-mu)/std", "title": "" }, { "docid": "d3d988f044779167aeff2c0b2dd226e9", "score": "0.6859767", "text": "def z_score(self, x):\n return (x - self.mean) / self.stddev", "title": "" }, { "docid": "d3d988f044779167aeff2c0b2dd226e9", "score": "0.6859767", "text": "def z_score(self, x):\n return (x - self.mean) / self.stddev", "title": "" }, { "docid": "d3d988f044779167aeff2c0b2dd226e9", "score": "0.6859767", "text": "def z_score(self, x):\n return (x - self.mean) / self.stddev", "title": "" }, { "docid": "d0b7c03dfe37befe5b0e7529dbc69e0f", "score": "0.67672366", "text": "def zscore(self, *args):\n if self._cluster:\n return self.execute(u'ZSCORE', *args, shard_key=args[0])\n return self.execute(u'ZSCORE', *args)", "title": "" }, { "docid": "963aafe67d75b527d31b4529d57ce7a3", "score": "0.66174215", "text": "def get_worst_zscore(self):\n worst_zscore = None\n for indicator in VisitStatistics.INDICATORS:\n if hasattr(self, indicator):\n zandp = getattr(self, indicator)\n if zandp and not healthdb.util.isNaN(zandp.zscore):\n if worst_zscore is None or worst_zscore > zandp.zscore:\n# logging.info(\"new worst_zscore = %s\" % zandp.zscore)\n worst_zscore = zandp.zscore\n# logging.info(\"worst_zscore = %s\" % worst_zscore)\n return worst_zscore", "title": "" }, { "docid": "dd34f7cb38892869ec81637ad9eb5f5b", "score": "0.64894617", "text": "def _max_score(self):\n try:\n return self.zscore(self.__getitem__(-1))\n except IndexError:\n return None", "title": "" }, { "docid": "79af348f22e96b9985b97269f8c1892b", "score": "0.6463926", "text": "def test_getZScores(self):\n values = [12,13,9,18,7,9,14,16,10,12,7,13,14,19,10,16,12,16,19,11]\n arr = numpy.array(values,dtype=numpy.float64)\n\n zscore = Stats.getZscore(arr)\n self.assertAlmostEqual(1.63977, zscore[4], places = 4)\n self.assertAlmostEqual(0.32235, zscore[6], places = 4)\n\n modZ = Stats.getModifiedZscore(arr)\n self.assertAlmostEqual(1.23658, modZ[4], places = 4)\n self.assertAlmostEqual(0.33725, modZ[6], places = 4)\n\n # Test the sorted argument still works. Remove this when the function is removed\n # sorted=True only ever affected the order\n zscore = Stats.getZscore(arr, sorted=True)\n self.assertAlmostEqual(1.63977, zscore[4], places = 4)\n self.assertAlmostEqual(0.32235, zscore[6], places = 4)", "title": "" }, { "docid": "0940662b45e0cde7a986b1275d9724c5", "score": "0.6352521", "text": "def _min_score(self):\n try:\n return self.zscore(self.__getitem__(0))\n except IndexError:\n return None", "title": "" }, { "docid": "20f5abc792709c0f3acd896f348248a3", "score": "0.6240699", "text": "def zscore(self, elem):\n return self.redis.zscore(self.key, self.serialize(elem))", "title": "" }, { "docid": "80a138434df927a55d202d6a4c6817ea", "score": "0.62341267", "text": "def zscore(data):\n datalength = len(data)\n try:\n data = (data - np.mean(data)) / np.std(data)\n except:\n data = np.zeros(datalen)\n\n return data", "title": "" }, { "docid": "a3d418bed63651d05af4a8c8dd922b7b", "score": "0.62231916", "text": "def _impaired_or_not(z_score, cutoff):\n if z_score <= cutoff:\n return 1\n else:\n return 0", "title": "" }, { "docid": "ac30978fc86ba5d8a99918673b987d7b", "score": "0.6182298", "text": "def compute_zscore(self, data, slope, intercept):\n spread = (data['PEP'].price - (slope * data['KO'].price + intercept))\n self.spreads.append(spread)\n spread_wind = self.spreads[-self.window_length:]\n zscore = (spread - np.mean(spread_wind)) / np.std(spread_wind)\n return zscore", "title": "" }, { "docid": "d5f1571d2b2bce9a8648d59f5f8a68e9", "score": "0.6143231", "text": "def get_highest_z(self) -> float:\n assert False, \"get_highest_z only supported on engine core\"", "title": "" }, { "docid": "24308779304fdd0ed05600ca107a58aa", "score": "0.6113914", "text": "def z_score_xr(dr):\n other_dims = [d for d in dr.dims if d != 'neuron']\n return (dr - dr.mean(other_dims)) / dr.std(other_dims)", "title": "" }, { "docid": "3df5e6e4b48aeafa746a58814735697b", "score": "0.61133456", "text": "def z_score(x, lst):\n if not lst or not len(lst):\n return nan\n mu = mean(lst)\n sdv = sd(lst)\n return (x - mu) / sdv", "title": "" }, { "docid": "1e236c46b9707f1430564c47d6d8750d", "score": "0.61009943", "text": "def score(self, member):\n return self.zscore(member)", "title": "" }, { "docid": "9dbe17dd42a08fcd259adccf4b4b02c6", "score": "0.609521", "text": "def scale_zscore(zscore, mean, std):\n zscaled = zscore * std + mean\n return zscaled", "title": "" }, { "docid": "2523ad7970c4346a9b39754236d4f142", "score": "0.6089465", "text": "def z(inlist, score):\n z = (score - mean(inlist)) / samplestdev(inlist)\n return z", "title": "" }, { "docid": "022687a206d6cdb454293405fc51f052", "score": "0.6046075", "text": "def _to_z_score(scaled_score, expected_score, test):\n denominator_dict = {'sdmt': 2.790,\n 'bvmt': 2.793,\n 'cvlt': 2.801}\n\n denominator = denominator_dict.get(test)\n\n z_score = (scaled_score - expected_score)/denominator\n\n return z_score", "title": "" }, { "docid": "db42d74e4ba1ed69cf6c45ab5c7544ee", "score": "0.6023797", "text": "def get_score(self) -> float:\n pass", "title": "" }, { "docid": "7a2947259caa75efb1d8d35827aa97bb", "score": "0.60219604", "text": "def score(self, member):\r\n return self.client.zscore(self.name, member)", "title": "" }, { "docid": "c6f3a0bf0ba08e71441d9017bef9d20b", "score": "0.6010244", "text": "def z_score(self):\n control = VariantStat(self.experiment.control, self.experiment)\n\n alternative = self\n\n if control.name == alternative.name:\n return 'N/A'\n\n conv_c = control.conversion_rate\n conv_a = alternative.conversion_rate\n\n num_c = control.participant_count\n num_a = alternative.participant_count\n\n if conv_c == 0 or conv_a == 0:\n return 0\n\n numerator = conv_a - conv_c\n\n frac_c = (conv_c * (1 - conv_c)) / float(num_c)\n frac_a = (conv_a * (1 - conv_a)) / float(num_a)\n\n if frac_c + frac_a == 0:\n # square root of 0 is 0, so no need to calculate\n return 0\n elif frac_c + frac_a < 0:\n # can't take a square root of a negative number,\n # so return 'Invalid'\n return 'Invalid'\n\n return numerator / math.sqrt((frac_c + frac_a))", "title": "" }, { "docid": "db9623e3168adac427129f947467fe01", "score": "0.59958607", "text": "def SquaredZScoreInWorldSpace(self, point: 'itkPointD3') -> \"double\":\n return _itkGaussianSpatialObjectPython.itkGaussianSpatialObject3_SquaredZScoreInWorldSpace(self, point)", "title": "" }, { "docid": "d414d8a0b9dac5c06073d50832ad5685", "score": "0.59949404", "text": "def SquaredZScoreInObjectSpace(self, point: 'itkPointD3') -> \"double\":\n return _itkGaussianSpatialObjectPython.itkGaussianSpatialObject3_SquaredZScoreInObjectSpace(self, point)", "title": "" }, { "docid": "cbeb0aa9fca4b72da8f4275f00e9ab99", "score": "0.59595954", "text": "def Zscore(self, kmer):\n mean = self.eValue(kmer)\n N = self.N\n p = mean/N #finds p from the mean.\n stdDev = (N*p*(1 - p))**0.5 #finds stdDeviation\n\n Ztop = self.kmerCounts[kmer][0] - mean\n \n return Ztop/stdDev", "title": "" }, { "docid": "35d13d515807810bbea8099b2ee5a663", "score": "0.59485126", "text": "def z_score(dist, num):\n mu = float(sum((key * val for (key, val)\n in zip(dist.keys(),\n dist.values())))) / 1000\n\n # print show_hist(dist)\n print \"mean is: \" + str(mu)\n sigma = math.sqrt(float(sum(((key - mu) ** 2) * val for (key, val)\n in zip(dist.keys(),\n dist.values()))) / 1000)\n\n print \"standart is: \" + str(sigma)\n print \"tripple st dev:\" + str(mu + 3 * sigma)\n return (num - mu) / sigma", "title": "" }, { "docid": "9e774de9fc5eaa8a9a96da62a24700f2", "score": "0.5948267", "text": "def set_zscores(self):\n pass", "title": "" }, { "docid": "4f76f124efa1c1489560ceaaa1b1057f", "score": "0.5924716", "text": "def SquaredZScoreInObjectSpace(self, point: 'itkPointD2') -> \"double\":\n return _itkGaussianSpatialObjectPython.itkGaussianSpatialObject2_SquaredZScoreInObjectSpace(self, point)", "title": "" }, { "docid": "ecd7b02f7a5dde6074639eceee4fb1a3", "score": "0.59235656", "text": "def sorted_score(self, k: str, m: str) -> Union[float, None]:\n conn = self.get_conn()\n return conn.zscore(k, m)", "title": "" }, { "docid": "94d7ebb287e80fdfa23f90a6c66fe585", "score": "0.59227055", "text": "def SquaredZScoreInWorldSpace(self, point: 'itkPointD2') -> \"double\":\n return _itkGaussianSpatialObjectPython.itkGaussianSpatialObject2_SquaredZScoreInWorldSpace(self, point)", "title": "" }, { "docid": "db854da7e2241d1c9618ba95aa0b6a8b", "score": "0.5864021", "text": "def z_eval(self):\n if not hasattr(self, '_zeval'):\n\n if self._args['evaluate_mc_at_zlens']:\n self._zeval = self.z\n else:\n self._zeval = self.z_infall\n\n return self._zeval", "title": "" }, { "docid": "81c40100f9e55858b0ab51e81aa4ce62", "score": "0.5846473", "text": "def z(self) -> float:\n return self._z", "title": "" }, { "docid": "8ac6c28e2e253ff235afab6da251fe3a", "score": "0.58452505", "text": "def threshold_by_zscore(zscored_data, time, minimum_duration=0.015, zscore_threshold=2):\n is_above_mean = zscored_data >= 0\n is_above_threshold = zscored_data >= zscore_threshold\n\n return extend_threshold_to_mean(\n is_above_mean, is_above_threshold, time, minimum_duration=minimum_duration\n )", "title": "" }, { "docid": "5f68c7214412be2bd4790b674ebe0ad7", "score": "0.5815507", "text": "def get_score(self):\n pass", "title": "" }, { "docid": "e18da72954e262bc2bc0eb509ab11a46", "score": "0.5778263", "text": "def wZ(self, z):\r\n return self.w0 * np.sqrt(1 + (z / self.zR) ** 2)", "title": "" }, { "docid": "1b3ce039d4fce67e51f63cdff71b27f1", "score": "0.5752515", "text": "def z_critical_value(cls, alpha, dir):\n if dir == cls.TWO_TAILED_TEST:\n alpha /= 2.0\n\n z = norm.ppf(1 - alpha)\n if dir == StatTool.ONE_TAILED_NEGATIVE_TEST:\n z = 0 - z\n return z", "title": "" }, { "docid": "a84a09df14c8964e5f8753ec4498895f", "score": "0.57146937", "text": "def normalize_zscore(data, z=2, offset=0.5, clip=False):\n mean = np.mean(data)\n std = np.std(data)\n img = ((data - mean) / (2 * std * z) + offset) \n if clip:\n # print ('Before')\n # print (np.min(img), np.max(img))\n img = np.clip(img, -0.0, 1.0)\n # print ('After clip')\n # print (np.min(img), np.max(img))\n return img", "title": "" }, { "docid": "0ac26db3b124e3947506297470d4ed5f", "score": "0.571298", "text": "def z_get(self, proxy, key):\n return self._client.zscore(key, proxy)", "title": "" }, { "docid": "3c8f389df059e4999a4a9b0369104525", "score": "0.57076114", "text": "def probability_for_z(cls, z_score, dir):\n assert dir in cls.valid_dirs\n\n # cdv is the probability that any sample will have LESS than z_score\n cdv = norm.cdf(z_score)\n\n if dir == cls.TWO_TAILED_TEST:\n if z_score < 0:\n dir = cls.ONE_TAILED_NEGATIVE_TEST\n else:\n dir = cls.ONE_TAILED_POSITIVE_TEST\n\n if dir == cls.ONE_TAILED_POSITIVE_TEST:\n return 1 - cdv\n else:\n return cdv", "title": "" }, { "docid": "ddff10cdc4fcf7c3ae8aa9d0dea252a6", "score": "0.5698196", "text": "def var_modified(r, level = 5):\n #compute the Z score assuming it was Gausian\n z= norm.ppf(level/100)\n #if modified:\n # modify the Z score based on observed skewness and kurtosis\n s=skewness(r)\n k=kurtosis(r)\n z=(z +\n (z**2-1)*s/6 + \n (z**3 -3*z)*(k-3)/24 -\n (2*z**3 - 5*z)*(s**2)/36\n )\n return -(r.mean() + z*r.std(ddof = 0))", "title": "" }, { "docid": "2f433132958d74f2171254fe150c2033", "score": "0.56827646", "text": "def zmax(self):\n if self._zmax is None:\n self._zmax = self.rmax\n return self._zmax", "title": "" }, { "docid": "bbb52be9a9b0ab7ada2fffe4dc821349", "score": "0.56279254", "text": "def get_score(self):\n return self._score", "title": "" }, { "docid": "86513526f07539ac8651bf91d21c69e8", "score": "0.56194705", "text": "def z_score(tableau, ur):\n zScore = 0\n for j in tableau[ur]:\n zScore += tableau[ur][j][2]\n return zScore", "title": "" }, { "docid": "05717db3354e3172f361963721c1d58a", "score": "0.5546481", "text": "def z_value(x: float, mean: float, std: float) -> float:\n return (x-mean) / std", "title": "" }, { "docid": "4479528e768431bd5e61a0d9eb3d352d", "score": "0.55305403", "text": "def R2(self, z, zpred):\n\n\n zmean = np.average(z)\n\n return 1 - np.sum((z - zpred)**2)/np.sum((z - zmean)**2)", "title": "" }, { "docid": "12e94247201b5761983384f234e6755d", "score": "0.5527806", "text": "def zscore(A, axis=None):\n return (A - np.mean(A,axis=axis)) / np.std(A,axis=axis,ddof=1)", "title": "" }, { "docid": "66559194e54019dbd45ca832f8553605", "score": "0.55251133", "text": "def outliers_modified_z_score(self, ys, y):\n threshold = 3.5\n\n median_y = np.median(ys)\n median_absolute_deviation_y = np.median([np.abs(y - median_y) for y in ys])\n modified_z_scores = 0.6745 * (y - median_y) / median_absolute_deviation_y\n return np.where(np.abs(modified_z_scores) > threshold)", "title": "" }, { "docid": "437de78c57999e8bc2d3967e81698b4d", "score": "0.5522258", "text": "def z_eval(self):\n return self.z", "title": "" }, { "docid": "d2258ec09fbf7d74eb4dc3878cee20a5", "score": "0.55171925", "text": "def get_a_from_z(self, z):\n z = np.array(z)\n if np.any(z < 0.0):\n raise ValueError(\n \"Cannot convert negative redshift to scale factor\")\n return 1.0/(1.0+z)", "title": "" }, { "docid": "ef7e30711e78b06da46136ccf1bc31d2", "score": "0.54868895", "text": "def get_clinical_Z_range(): \n return (-105.0, 100000000.0) # in mm", "title": "" }, { "docid": "bf3accf9bcd511a1b5a157bdbb04cfa6", "score": "0.54471064", "text": "def raw_score(self):\n return self._raw_score", "title": "" }, { "docid": "58c3cbe7285b3c0cba21e9786e7c813b", "score": "0.5436805", "text": "def get_score(self):\n raise NotImplementedError(\"This method must be overridden\")", "title": "" }, { "docid": "aafa86dc8337437eff19f347d4e7db14", "score": "0.542814", "text": "def zscoreToPercentile(zscore):\n retVal = healthdb.util.NaN\n # WHO technical specs chapter 7: \"However, a restriction was imposed on\n # all indicators to enable the derivation of percentiles only within\n # the interval corresponding to z-scores between -3 and 3. The\n # underlying reasoning is that percentiles beyond +-3 SD are invariant\n # to changes in equivalent z-scores. The loss accruing to this\n # restriction is small since the inclusion range corresponds to the\n # 0.135th to 99.865th percentiles.\"\n if math.fabs(zscore) <= 3:\n absVal = math.fabs(zscore)\n P1 = (1 - 1 / math.sqrt(2 * math.pi) * math.exp(-math.pow(absVal, 2) / 2)\n * (\n 0.31938 * (1 / (1 + 0.2316419 * absVal))\n - 0.356563782 * math.pow((1 / (1 + 0.2316419 * absVal)), 2)\n + 1.781477937 * math.pow((1 / (1 + 0.2316419 * absVal)), 3)\n - 1.82125 * math.pow((1 / (1 + 0.2316419 * absVal)), 4)\n + 1.330274429 * math.pow((1 / (1 + 0.2316419 * absVal)), 5)\n ))\n \n if zscore > 0:\n P1 = P1 * 100\n else:\n P1 = 100 - P1 * 100\n \n if 0 <= P1 and P1 <= 100:\n retVal = P1\n return retVal", "title": "" }, { "docid": "a19b11413fb191d6925413b7ebf74b01", "score": "0.53977555", "text": "def getZScore(self, index):\n # Calculate mean and standard deviation of the list of values\n mu = np.mean(self.matrix[:, index], axis=0)\n sd = np.std(self.matrix[:, index], axis=0)\n # Calculate Z-score for the given column for each gene\n zscore = (self.matrix[:, index] - mu) / sd\n mygenes = {}\n for (name, ndx) in self.genes.items():\n try:\n mygenes[name] = zscore[ndx, :]\n except IndexError:\n mygenes[name] = zscore[ndx]\n # Return the dictionary of Z-scores\n return mygenes", "title": "" }, { "docid": "e326cfd1ca90e434102efeb46af2d1f6", "score": "0.53955096", "text": "def get_score(self, value):\n try:\n encodedvalue = PickleEncoder().encode(value)\n except Exception, e:\n raise RedisError(\"encode error:%s\" % e)\n\n try:\n score = self._db.zscore(self.namespace, encodedvalue)\n return score\n except Exception, e:\n raise RedisError(\"redis error:%s\" % e)", "title": "" }, { "docid": "ed457a87f10952f48019978b8e1e1d36", "score": "0.5376343", "text": "def zscoreFromAttribute(anthroConfig):\n for row in csv.DictReader(open(anthroConfig.fileName)):\n if 'age' in row:\n dataAgeHeightOrLength = row['age']\n elif 'length' in row:\n dataAgeHeightOrLength = row['length']\n elif 'height' in row:\n dataAgeHeightOrLength = row['height']\n if int(row['sex']) == anthroConfig.sex and float(dataAgeHeightOrLength) == anthroConfig.ageHeightOrLength:\n return zscoreOtherRestricted(anthroConfig.measureKey, float(row['l']), float(row['m']), float(row['s']), True) \n return healthdb.util.NaN", "title": "" }, { "docid": "fad2d8b9197199d7edc5fda8187214db", "score": "0.5350002", "text": "def score(self):\n pos = self.scores.filter(value=1).count()\n n = self.scores.count()\n return wilson_ci_lower_bound(n, pos)", "title": "" }, { "docid": "bfed7dbefd669d898835324cabe153d1", "score": "0.53445476", "text": "def NO_SCORE() -> int:\n return -1", "title": "" }, { "docid": "8538005b30ce8cfd3ed977e86d84b93a", "score": "0.5340626", "text": "def Z(self) -> int:\r\n return self._Z", "title": "" }, { "docid": "ea26fb8ca8fcf09b92a47eeba81c975a", "score": "0.5333083", "text": "def get_best_score(self):\n return self.best_score", "title": "" }, { "docid": "1716b29e9d7a608b2938d8cc1634e9c6", "score": "0.5331177", "text": "def compute(cls, observation, prediction):\n assert type(observation) is dict\n assert type(prediction) is dict\n try:\n p_value = prediction['mean'] # Use the prediction's mean. \n except (TypeError,KeyError): # If there isn't one...\n try:\n p_value = prediction['value'] # Use the prediction's value. \n except TypeError: # If there isn't one...\n p_value = prediction # Use the prediction (assume it is numeric).\n o_mean = observation['mean']\n o_std = observation['std']\n value = (p_value - o_mean)/o_std\n value = utils.assert_dimensionless(value)\n return ZScore(value)", "title": "" }, { "docid": "100ea5e8e6e0474083b076a4a68719d7", "score": "0.53240174", "text": "def compute_best_risk(self):\n has_chi2 = (self.chi2_fit != 0).sum(axis=1) > 0 \n mask = (has_chi2) & (self.zbest > self.zgrid[0])\n \n zbest_grid = np.dot(self.zbest[mask, None],\n np.ones_like(self.zgrid)[None,:])\n L = self._loss((zbest_grid-self.zgrid)/(1+self.zgrid))\n #dz = np.gradient(self.zgrid)\n \n zbest_risk = np.zeros(self.NOBJ, dtype=self.ARRAY_DTYPE)-1\n zbest_risk[mask] = np.dot(np.exp(self.lnp[mask,:])*L, self.trdz)\n \n del(has_chi2)\n del(mask)\n del(zbest_grid)\n del(L)\n \n return zbest_risk", "title": "" }, { "docid": "c70832f4b5008dc1a9bab31f1fb77d2c", "score": "0.5320918", "text": "def score_one(self, x: dict) -> float:", "title": "" }, { "docid": "c70832f4b5008dc1a9bab31f1fb77d2c", "score": "0.5320918", "text": "def score_one(self, x: dict) -> float:", "title": "" }, { "docid": "00b65cb58654be9d87f5140f9f692e8d", "score": "0.5320872", "text": "def wZ(w0, z, zR):\r\n return w0 * np.sqrt(1 + (z / zR) ** 2)", "title": "" }, { "docid": "57bd528109934590edcc37520b8aecc6", "score": "0.53161615", "text": "def get_score(self):\n return self._points", "title": "" }, { "docid": "d145a7dc3c8c2db905526386567322af", "score": "0.5306321", "text": "def norm_score(self):\n return self.score", "title": "" }, { "docid": "10ba4834d38dd73afa4faf78179c59de", "score": "0.53024745", "text": "def compute_full_risk(self):\n zsq = np.dot(self.zgrid[:,None], np.ones_like(self.zgrid)[None,:])\n L = self._loss((zsq-self.zgrid)/(1+self.zgrid))\n \n Rz = self.lnp*0.\n \n has_chi2 = (self.chi2_fit != 0).sum(axis=1) > 0 \n hasz = self.zbest > self.zgrid[0]\n idx = self.idx[hasz & (has_chi2)]\n \n for i in idx:\n Rz[i,:] = np.dot(np.exp(self.lnp[i,:])*L, self.trdz)\n \n del(zsq)\n del(L)\n del(has_chi2)\n del(hasz)\n del(idx) \n return Rz\n \n #self.full_risk = Rz\n #self.min_risk = self.zgrid[np.argmin(Rz, axis=1)]", "title": "" }, { "docid": "35b8f1cc00b5b07453048380c9183176", "score": "0.5287981", "text": "def getScore(self):\n return self._score", "title": "" }, { "docid": "35b8f1cc00b5b07453048380c9183176", "score": "0.5287981", "text": "def getScore(self):\n return self._score", "title": "" }, { "docid": "dcb862fa989f902036a49cc1625a4f90", "score": "0.528489", "text": "def max_score(self):\n return self.weight if self.has_score else None", "title": "" }, { "docid": "3440f70adf1c7fd88bdefc2f668b642d", "score": "0.5280114", "text": "def constant_score(self):\n return self._constant_score", "title": "" }, { "docid": "8dc96d30401cecf5b2207a98a0df1a10", "score": "0.5262474", "text": "def get_score(self):\n score = 1\n if self.answer:\n score = 3\n if not self.correct:\n score *= -1\n return score", "title": "" }, { "docid": "7c7e62a3d3390f0de2732aa78ed1d28e", "score": "0.5253121", "text": "def __call__(self, Z):\n\n return tf.math.maximum(Z, 0)", "title": "" }, { "docid": "1836f7e1518a3851013eb49b808aa889", "score": "0.52435356", "text": "def score(self):\n return self.__score", "title": "" }, { "docid": "3de3634c605416248a0eb6a0e5726904", "score": "0.52232313", "text": "def score(self):\n check_is_fitted(self, [\"lower_bound_\"])\n return self.lower_bound_", "title": "" }, { "docid": "0a79fbce580c66f934eacea64394c344", "score": "0.52211654", "text": "def z(self):\n return self._vct.z", "title": "" }, { "docid": "58de9a1a700da120b9ef3b0822a54958", "score": "0.5218734", "text": "def saclhitsrate(self) :\n try :\n return self._saclhitsrate\n except Exception as e:\n raise e", "title": "" }, { "docid": "789844b69cf207afb25c9696fe51dbb2", "score": "0.5213474", "text": "def score(self):\n return self._score", "title": "" }, { "docid": "789844b69cf207afb25c9696fe51dbb2", "score": "0.5213474", "text": "def score(self):\n return self._score", "title": "" }, { "docid": "789844b69cf207afb25c9696fe51dbb2", "score": "0.5213474", "text": "def score(self):\n return self._score", "title": "" }, { "docid": "789844b69cf207afb25c9696fe51dbb2", "score": "0.5213474", "text": "def score(self):\n return self._score", "title": "" }, { "docid": "789844b69cf207afb25c9696fe51dbb2", "score": "0.5213474", "text": "def score(self):\n return self._score", "title": "" }, { "docid": "789844b69cf207afb25c9696fe51dbb2", "score": "0.5213474", "text": "def score(self):\n return self._score", "title": "" }, { "docid": "789844b69cf207afb25c9696fe51dbb2", "score": "0.5213474", "text": "def score(self):\n return self._score", "title": "" }, { "docid": "953ef2864b7c9b9b26aa003dab0961b4", "score": "0.5205887", "text": "def fowlkes_mallows_index(y_true: np.array, y_score: np.array) -> float:\n ppv = positive_predictive_value(y_true, y_score)\n tpr = true_positive_rate(y_true, y_score)\n fm = (ppv * tpr) ** 0.5\n return fm", "title": "" }, { "docid": "a542313a7482d565193ff9df0cef3138", "score": "0.5202119", "text": "def detect_outlier_zscore(df):\n\tthreshold = 3\n\tz = np.abs(stats.zscore(df))\n\toutliers = np.where(z > threshold)\n\treturn outliers", "title": "" }, { "docid": "6f99c4afcdafcff521b56c5a458723e4", "score": "0.5199307", "text": "def getZonePercentageFeatureValue(self):\n Nz = self.coefficients['Nz']\n Np = self.coefficients['Np']\n\n zp = Nz / Np\n return zp", "title": "" }, { "docid": "81da7f92b66bb19c61c2617346cad4fe", "score": "0.5194981", "text": "def zprob(z):\n Z_MAX = 6.0 # maximum meaningful z-value\n if z == 0.0:\n x = 0.0\n else:\n y = 0.5 * math.fabs(z)\n if y >= (Z_MAX * 0.5):\n x = 1.0\n elif (y < 1.0):\n w = y * y\n x = ((((((((0.000124818987 * w\n - 0.001075204047) * w + 0.005198775019) * w\n - 0.019198292004) * w + 0.059054035642) * w\n - 0.151968751364) * w + 0.319152932694) * w\n - 0.531923007300) * w + 0.797884560593) * y * 2.0\n else:\n y = y - 2.0\n x = (((((((((((((-0.000045255659 * y\n + 0.000152529290) * y - 0.000019538132) * y\n - 0.000676904986) * y + 0.001390604284) * y\n - 0.000794620820) * y - 0.002034254874) * y\n + 0.006549791214) * y - 0.010557625006) * y\n + 0.011630447319) * y - 0.009279453341) * y\n + 0.005353579108) * y - 0.002141268741) * y\n + 0.000535310849) * y + 0.999936657524\n if z > 0.0:\n prob = ((x + 1.0) * 0.5)\n else:\n prob = ((1.0 - x) * 0.5)\n return prob", "title": "" }, { "docid": "23444449548c751d57eb8a7312433623", "score": "0.5192021", "text": "def credit_score(self):\n return self._credit_score", "title": "" }, { "docid": "1706217b6bb470a64b395196f3a35f76", "score": "0.5191078", "text": "def res_function(X, Z):\n X_col = X.reshape((-1, 1))\n Z_row = Z.reshape((1, -1))\n return np.maximum(Z_row - X_col + margin, 0).mean()", "title": "" }, { "docid": "5b4d0a2f8fbc43bd9a9ce40b6891f4a6", "score": "0.5191033", "text": "def lcz(self, zbest=None):\n if zbest is None:\n zbest = self.zbest\n \n _lcz = np.dot(1/(1+zbest[:, np.newaxis]), self.pivot[np.newaxis,:])\n return _lcz", "title": "" }, { "docid": "350f7ffdbf74204002d650b60f84004b", "score": "0.5189782", "text": "def Score(cls):\n\n return ScoreMask()", "title": "" }, { "docid": "595a292a563ab3746659d808dd8b2ba1", "score": "0.51834446", "text": "def custom_score(game, player):\n # TODO: finish this function!\n if game.is_loser(player):\n return float(\"-inf\")\n\n if game.is_winner(player):\n return float(\"inf\")\n\n def calculate_score(moves):\n if len(moves) == 0:\n return 0\n centered_locs = np.array(moves) - (game.height/2.0, game.width/2.0)\n #print(centered_locs)\n return np.sum(1/LA.norm(centered_locs,ord=2,axis=1))\n\n own_moves = game.get_legal_moves(player)\n opp_moves = game.get_legal_moves(game.get_opponent(player))\n\n return float(calculate_score(own_moves)-calculate_score(opp_moves))", "title": "" } ]
bdeff234398c45a1b8d7776f2ff0c2a2
Class method, which allows the user to populate the Database
[ { "docid": "5e4f589475625e693b789b714ddcc790", "score": "0.0", "text": "def populate(self):\n data = api_research_off()\n for product in data['products']:\n product_name = self.get_product_name(product)\n nutriscore = self.get_nutriscore(product)\n if nutriscore is None:\n continue\n nutritional_list = self.get_nutritional_mark_for_100g(product)\n if nutritional_list is None:\n continue\n url_page = self.get_url_page(product)\n url_image = self.get_url_image(product)\n if(not url_image):\n continue\n id_product = self.get_id_product(product)\n if(not id_product):\n continue\n last_modified_date = self.get_last_modified(product)\n food = Food(name=product_name,\n nutriscore=nutriscore,\n url=url_page,\n url_picture=url_image,\n fat_100g=nutritional_list[0],\n saturated_fat_100g=nutritional_list[2],\n sugars_100g=nutritional_list[4],\n salt_100g=nutritional_list[6],\n fat_level=nutritional_list[1],\n salt_level=nutritional_list[7],\n saturated_fat_level=nutritional_list[3],\n sugars_level=nutritional_list[5],\n last_modified=last_modified_date,\n openff_id=id_product)\n food.save()\n self.get_and_insert_categories_in_db(product, food)", "title": "" } ]
[ { "docid": "11c5c10dcb7519f823633e95d691706e", "score": "0.7409577", "text": "def _populateTables(cls):\n for method in dir(Populate):\n if not method.startswith('__'):\n populateTable = getattr(Populate, method)\n print 'Inserting data for %s ...' % method\n populateTable()\n\n db.session.commit()", "title": "" }, { "docid": "dda5e8e7e0ff2e9f2e1e78366231c75b", "score": "0.7352905", "text": "def __call__(self):\n self.create_database()", "title": "" }, { "docid": "e04dc2fb4584516abc29cd5b12401ab9", "score": "0.7279164", "text": "def populate():", "title": "" }, { "docid": "3738361c8de7ca311743c12b407f463c", "score": "0.7264388", "text": "def populate_db_command():\n populate_db()\n click.echo(\"Created dummy data.\")", "title": "" }, { "docid": "aeb3e562c42462ac59c49c99c1f6c53e", "score": "0.72629017", "text": "def handle(self, *args, **options):\n self._populate_db()", "title": "" }, { "docid": "e755fbb017c46d5659f9eac88663f994", "score": "0.72245425", "text": "def populate_db():\n database.create_tables([Donor, Donation])\n logging.basicConfig(level=logging.INFO)\n logger = logging.getLogger(__name__)\n\n #database = SqliteDatabase('donor.db')\n\n logger.info('Working with Person class')\n logger.info('Note how I use constants and a list of tuples as a simple schema')\n logger.info('Normally you probably will have prompted for this from a user')\n\n# NAME = 0\n# DONATION = 1\n# \n donors = [('Karl', [100, 200, 300.55]),\n ('Luis', [400, 500, 600]),\n ('Woody', [700, 800, 900]),\n ('Mary', [455.99])]\n \n logger.info('Creating Donor records: iterate through the list of tuples')\n logger.info('Prepare to explain any errors with exceptions')\n logger.info('and the transaction tells the database to fail on error')\n \n for users, donations in donors:\n user = Donor.create(username=users)\n for donation in donations:\n Donation.create(user=user, donation=float(donation))", "title": "" }, { "docid": "13029ab8865339e29a5c71c47127fda5", "score": "0.71562296", "text": "def data_into_database():\n pass", "title": "" }, { "docid": "d20c034a8c3cbc0832dc74bb4da1b0f3", "score": "0.71474314", "text": "def fromDB(self):\n #- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -\n pass", "title": "" }, { "docid": "fdc0072cd1eefb95b2048c8061190efe", "score": "0.7144774", "text": "def initiate(cls):\n cls._purgeTables()\n cls._populateTables()\n print 'DB initialization finished!!'", "title": "" }, { "docid": "8f9911dbbe0284a285ab8f29ced0957c", "score": "0.7093239", "text": "def populate(self, connection, system):", "title": "" }, { "docid": "7efc43d24ebff7e863c3941c631b63d4", "score": "0.704847", "text": "def init_db():\n\n with app.app_context():\n data = json.loads(urllib2.urlopen(DATA_URL).read())\n ds = DataSource(data['data'], data['meta']['view']['columns'])\n headers = ds.get_headers(\n set([\n 'Applicant', \n 'Address', \n 'FoodItems', \n 'Latitude', \n 'Longitude'\n ]),\n 'name'\n )\n db = get_db()\n db.init_database()\n for item in ds.gen_items(headers):\n try:\n db.add_row(\n item['Applicant'], \n item['Address'], \n item['FoodItems'], \n item['Latitude'], \n item['Longitude']\n )\n except psycopg2.IntegrityError:\n pass", "title": "" }, { "docid": "8a8560696f49cd0ff48c227dff9a1f49", "score": "0.69914603", "text": "def populate_db2(self):\n db.create_all()\n\n informantRelationships = self.create_informant_relationships()\n users = self.create_users()\n finalCodes = self.create_final_codes()\n states = self.create_states()\n abstractStatuses = self.create_abstract_statuses()\n sexes = self.create_sexes()\n races = self.create_races()\n ethnicities = self.create_ethnicities()\n vitals = self.create_vital_statuses()\n contacts = self.create_contacts()\n inactives = self.create_inactives()\n ucrReportTypes = self.create_ucr_report_types()\n physicianStatuses = self.create_physician_statuses()\n physFacilityStatuses = self.create_physician_facility_statuses()\n phoneTypes = self.create_phone_types()\n irbHolders = self.create_irb_holders()\n projectTypes = self.create_project_types()\n contactStatuses = self.create_contact_statuses()\n contactSources = self.create_contact_sources()\n grantStatuses = self.create_grant_statuses()\n fundingSources = self.create_funding_sources()\n reviewCommitteeStatuses = self.create_review_committee_statuses()\n projectStatuses = self.create_project_statuses()\n logTypes = self.create_log_subjects()\n reviewCommittees = self.create_review_committees()\n staffRoles = self.create_staff_roles()\n projectPhases = self.create_project_phases()\n hsts = self.create_human_subject_trainings()\n tracingSources = self.create_tracing_sources()\n contactTypes = self.create_contact_types()\n ucrRoles = self.create_ucr_roles()\n giftCards = self.create_gift_cards()\n\n project1 = models.Project(\n projectTypeID=1,\n irbHolderID=1,\n projectTitle=\"Test Project\",\n shortTitle=\"Test Project\",\n projectSummary=\"Summary\",\n sop=\"sop\",\n ucrProposal=\"ucr_proposal\",\n budgetDoc=\"budget_doc\",\n ucrFee=\"no\",\n ucrNoFee=\"yes\",\n previousShortTitle=\"t short\",\n dateAdded=datetime(2016, 2, 2),\n finalRecruitmentReport=\"report\",\n ongoingContact=True,\n activityStartDate=datetime(2016, 2, 2),\n activityEndDate=datetime(2016, 2, 2))\n\n project2 = models.Project(\n projectTypeID=1,\n irbHolderID=1,\n projectTitle=\"Test Project\",\n shortTitle=\"Test Project\",\n projectSummary=\"Summary\",\n sop=\"sop\",\n ucrProposal=\"ucr_proposal\",\n budgetDoc=\"budget_doc\",\n ucrFee=\"no\",\n ucrNoFee=\"yes\",\n previousShortTitle=\"t short\",\n dateAdded=datetime(2016, 2, 2),\n finalRecruitmentReport=\"report\",\n ongoingContact=True,\n activityStartDate=datetime(2016, 2, 2),\n activityEndDate=datetime(2016, 2, 2))\n\n budget1 = models.Budget(\n projectID=1,\n numPeriods=1,\n periodStart=datetime(2016, 2, 2),\n periodEnd=datetime(2016, 2, 2),\n periodTotal=1.23,\n periodComment=\"comment\")\n\n rc = models.ReviewCommittee(\n projectID=1,\n reviewCommitteeStatusID=1,\n reviewCommitteeLUTID=1,\n reviewCommitteeNumber=\"1\",\n dateInitialReview=datetime(2016, 2, 2),\n dateExpires=datetime(2016, 2, 2),\n rcNote=\"rc_note\",\n rcProtocol=\"rc_proto\",\n rcApproval=\"rc_approval\")\n\n ucr = models.UCRReport(\n projectID=1,\n reportTypeID=1,\n reportSubmitted=datetime(2016, 2, 2),\n reportDue=datetime(2016, 2, 2),\n reportDoc=\"doc\"\n )\n arcReview = models.ArcReview(\n projectID=1,\n reviewType=1,\n dateSentToReviewer=datetime(2016, 2, 2),\n reviewer1=1,\n reviewer1Rec=1,\n reviewer1SigDate=datetime(2016, 2, 2),\n reviewer1Comments=\"test comment\",\n reviewer2=2,\n reviewer2Rec=2,\n reviewer2SigDate=datetime(2016, 2, 2),\n reviewer2Comments=\"test comment\",\n research=1,\n linkage=False,\n contact=True,\n engaged=True,\n nonPublicData=True)\n\n funding = models.Funding(\n grantStatusID=1,\n projectID=1,\n fundingSourceID=1,\n primaryFundingSource=\"pfs\",\n secondaryFundingSource=\"sfs\",\n fundingNumber=\"number\",\n grantTitle=\"title\",\n dateStatus=datetime(2016, 2, 2),\n grantPi=1,\n primaryChartfield=\"pcf\",\n secondaryChartfield=\"scf\"\n )\n\n staff = models.Staff(\n userID=1,\n firstName=\"Aaron\",\n lastName=\"Thomas\",\n middleName=\"Pulver\",\n email=\"[email protected]\",\n phoneNumber=\"phone\",\n phoneComment=\"phoneComment\",\n institution=\"institution\",\n department=\"department\",\n position=\"position\",\n credentials=\"credentials\",\n street=\"street\",\n city=\"city\",\n stateID=1,\n ucrRoleID=1\n )\n staff2 = models.Staff(\n userID=2,\n firstName=\"Phoebe\",\n lastName=\"\",\n middleName=\"McNeally\",\n email=\"email\",\n phoneNumber=\"phone\",\n phoneComment=\"phoneComment\",\n institution=\"institution\",\n department=\"department\",\n position=\"position\",\n credentials=\"credentials\",\n street=\"street\",\n city=\"city\",\n stateID=2,\n ucrRoleID=1\n )\n\n projStatus = models.ProjectStatus(\n projectStatusTypeID=1,\n projectID=1,\n staffID=1,\n statusDate=datetime(2016, 2, 2),\n statusNotes=\"notes\"\n )\n\n preApp = models.PreApplication(\n projectID=1,\n piFirstName=\"pi_fname\",\n piLastName=\"pi_lname\",\n piEmail=\"pi_email\",\n piPhone=\"pi_phone\",\n contactFirstName=\"contact_fname\",\n contactLastName=\"contact_lname\",\n contactPhone=\"contact_phone\",\n contactEmail=\"contact_email\",\n institution=\"institution\",\n institution2=\"institution2\",\n uid=\"uid\",\n udoh=1,\n projectTitle=\"project_title\",\n purpose=\"purpose\",\n irb0=True,\n irb1=True,\n irb2=True,\n irb3=True,\n irb4=True,\n otherIrb=\"other_irb\",\n updb=True,\n ptContact=True,\n startDate=datetime(2016, 2, 2),\n link=True,\n deliveryDate=datetime(2016, 2, 2),\n description=\"description\"\n )\n log = models.Log(\n logSubjectID=1,\n projectID=1,\n staffID=1,\n phaseStatusID=1,\n note=\"note\",\n date=datetime(2016, 2, 2)\n )\n projectStaff = models.ProjectStaff(\n staffRoleID=1,\n projectID=1,\n staffID=1,\n datePledge=datetime(2016, 2, 2),\n dateRevoked=datetime(2016, 2, 2),\n contactID=1,\n inactiveID=1\n )\n staffTraining = models.StaffTraining(\n staffID=1,\n humanSubjectTrainingID=1,\n dateTaken=datetime(2016, 2, 2),\n dateExpires=datetime(2016, 2, 2)\n )\n patient = models.Patient(\n patID=\"1\",\n ucrDistID=1,\n UPDBID=1,\n firstName=\"fname\",\n lastName=\"lname\",\n middleName=\"mname\",\n maidenName=\"maiden_name\",\n aliasFirstName=\"alias_fname\",\n aliasLastName=\"alias_lname\",\n aliasMiddleName=\"alias_middle\",\n dobDay=15,\n dobMonth=2,\n dobYear=1990,\n SSN=\"999999999\",\n sexID=2,\n raceID=1,\n ethnicityID=1,\n vitalStatusID=1\n )\n patient2 = models.Patient(\n patID=\"1\",\n ucrDistID=1,\n UPDBID=1,\n firstName=\"fname2\",\n lastName=\"lname2\",\n middleName=\"mname2\",\n maidenName=\"maiden_name\",\n aliasFirstName=\"alias_fname\",\n aliasLastName=\"alias_lname\",\n aliasMiddleName=\"alias_middle\",\n dobDay=26,\n dobMonth=4,\n dobYear=1970,\n SSN=\"999999999\",\n sexID=1,\n raceID=2,\n ethnicityID=1,\n vitalStatusID=2\n )\n\n patientAddress = models.PatientAddress(\n contactInfoSourceID=1,\n participantID=1,\n contactInfoStatusID=1,\n street=\"street\",\n street2=\"street2\",\n city=\"city\",\n stateID=1,\n zip=\"12345\",\n addressStatusDate=datetime(2016, 2, 2),\n )\n\n patientEmail = models.PatientEmail(\n contactInfoSourceID=1,\n participantID=1,\n contactInfoStatusID=1,\n email=\"email\",\n emailStatusDate=datetime(2016, 2, 2)\n )\n patientPhone = models.PatientPhone(\n contactInfoSourceID=1,\n participantID=1,\n contactInfoStatusID=1,\n phoneTypeID=1,\n phoneNumber=\"phone\",\n phoneStatusDate=datetime(2016, 2, 2)\n )\n patientPhone2 = models.PatientPhone(\n contactInfoSourceID=1,\n participantID=1,\n contactInfoStatusID=1,\n phoneTypeID=1,\n phoneNumber=\"phone\",\n phoneStatusDate=datetime(2016, 2, 2)\n )\n informant1 = models.Informant(\n participantID=1,\n firstName=\"fname\",\n lastName=\"lname\",\n middleName=\"middle_name\",\n informantPrimary=True,\n informantRelationshipID=1,\n notes=\"notes\"\n )\n informant2 = models.Informant(\n participantID=1,\n firstName=\"fname\",\n lastName=\"lname\",\n middleName=\"middle_name\",\n informantPrimary=True,\n informantRelationshipID=1,\n notes=\"notes\"\n )\n informantAddress = models.InformantAddress(\n contactInfoSourceID=1,\n informantID=1,\n contactInfoStatusID=1,\n street=\"street\",\n street2=\"street2\",\n city=\"city\",\n stateID=2,\n zip=\"12345\",\n addressStatusDate=datetime(2016, 2, 2),\n )\n informantPhone = models.InformantPhone(\n contactInfoSourceID=1,\n informantID=1,\n contactInfoStatusID=1,\n phoneTypeID=1,\n phoneNumber=\"phone\",\n phoneStatusDate=datetime(2016, 2, 2)\n )\n informantPhone2 = models.InformantPhone(\n contactInfoSourceID=1,\n informantID=1,\n contactInfoStatusID=1,\n phoneTypeID=1,\n phoneNumber=\"phone\",\n phoneStatusDate=datetime(2016, 2, 2)\n )\n ctc1 = models.CTC(\n participantID=1,\n dxDateDay=2,\n dxDateMonth=7,\n dxDateYear=1988,\n site=\"Site 2\",\n histology=\"histology\",\n behavior=\"behavior\",\n ctcSequence=\"sequence\",\n stage=\"stage\",\n dxAge=1,\n dxStreet1=\"street1\",\n dxStreet2=\"street2\",\n dxCity=\"city\",\n dxStateID=1,\n dxZip=99999,\n dxCounty=\"county\",\n dnc=\"dnc\",\n dncReason=\"dnc_reason\",\n recordID=\"abc321\"\n )\n ctc2 = models.CTC(\n participantID=1,\n dxDateDay=3,\n dxDateMonth=10,\n dxDateYear=1958,\n site=\"Site 1\",\n histology=\"histology\",\n behavior=\"behavior\",\n ctcSequence=\"sequence\",\n stage=\"stage\",\n dxAge=1,\n dxStreet1=\"street1\",\n dxStreet2=\"street2\",\n dxCity=\"city\",\n dxStateID=2,\n dxZip=99999,\n dxCounty=\"county\",\n dnc=\"dnc\",\n dncReason=\"dnc_reason\",\n recordID=\"abc123\"\n )\n projectPatient = models.ProjectPatient(\n projectID=1,\n staffID=1,\n ctcID=1,\n currentAge=1,\n batch=1,\n siteGrp=1,\n finalCodeID=1,\n finalCodeDate=datetime(2016, 2, 2),\n enrollmentDate=datetime(2016, 2, 2),\n dateCoordSigned=datetime(2016, 2, 2),\n importDate=datetime(2016, 2, 2),\n finalCodeStaffID=1,\n enrollmentStaffID=1,\n dateCoordSignedStaffID=1,\n abstractStatusID=1,\n abstractStatusDate=datetime(2016, 2, 2),\n abstractStatusStaffID=1,\n sentToAbstractorDate=datetime(2016, 2, 2),\n sentToAbstractorStaffID=1,\n abstractedDate=datetime(2016, 2, 2),\n abstractorStaffID=1,\n researcherDate=datetime(2016, 2, 2),\n researcherStaffID=1,\n consentLink=\"link\",\n medRecordReleaseSigned=True,\n medRecordReleaseLink=\"link\",\n medRecordReleaseStaffID=1,\n medRecordReleaseDate=datetime(2016, 2, 2),\n surveyToResearcher=datetime(2016, 2, 2),\n surveyToResearcherStaffID=1,\n qualityControl=True,\n )\n\n projectPatient2 = models.ProjectPatient(\n projectID=1,\n staffID=1,\n ctcID=1,\n currentAge=1,\n batch=1,\n siteGrp=1,\n finalCodeID=1,\n finalCodeDate=datetime(2016, 2, 2),\n enrollmentDate=datetime(2016, 2, 2),\n dateCoordSigned=datetime(2016, 2, 2),\n importDate=datetime(2016, 2, 2),\n finalCodeStaffID=1,\n enrollmentStaffID=1,\n dateCoordSignedStaffID=1,\n abstractStatusID=1,\n abstractStatusDate=datetime(2016, 2, 2),\n abstractStatusStaffID=1,\n sentToAbstractorDate=datetime(2016, 2, 2),\n sentToAbstractorStaffID=1,\n abstractedDate=datetime(2016, 2, 2),\n abstractorStaffID=1,\n researcherDate=datetime(2016, 2, 2),\n researcherStaffID=1,\n consentLink=\"link\",\n medRecordReleaseSigned=True,\n medRecordReleaseLink=\"link\",\n medRecordReleaseStaffID=1,\n medRecordReleaseDate=datetime(2016, 2, 2),\n surveyToResearcher=datetime(2016, 2, 2),\n surveyToResearcherStaffID=1,\n qualityControl=False\n )\n\n tracing = models.Tracing(\n tracingSourceID=1,\n participantID=1,\n date=datetime(2016, 2, 2),\n staffID=1,\n notes=\"notes\"\n )\n physician = models.Physician(\n firstName=\"fname\",\n lastName=\"lname\",\n middleName=\"middle_name\",\n credentials=\"credentials\",\n specialty=\"specialty\",\n aliasFirstName=\"alias_fname\",\n aliasLastName=\"alias_lname\",\n aliasMiddleName=\"alias_middle_name\",\n physicianStatusID=1,\n physicianStatusDate=datetime(2016, 2, 2),\n )\n\n physician2 = models.Physician(\n firstName=\"fname\",\n lastName=\"lname\",\n middleName=\"middle_name\",\n credentials=\"credentials\",\n specialty=\"specialty\",\n aliasFirstName=\"alias_fname\",\n aliasLastName=\"alias_lname\",\n aliasMiddleName=\"alias_middle_name\",\n physicianStatusID=1,\n physicianStatusDate=datetime(2016, 2, 2),\n )\n physicianAddress = models.PhysicianAddress(\n contactInfoSourceID=1,\n physicianID=1,\n contactInfoStatusID=1,\n street=\"street\",\n street2=\"street2\",\n city=\"city\",\n stateID=1,\n zip=\"12345\",\n addressStatusDate=datetime(2016, 2, 2),\n )\n\n physicianEmail = models.PhysicianEmail(\n contactInfoSourceID=1,\n physicianID=1,\n contactInfoStatusID=1,\n email=\"email\",\n emailStatusDate=datetime(2016, 2, 2)\n )\n\n physicianPhone = models.PhysicianPhone(\n contactInfoSourceID=1,\n physicianID=1,\n contactInfoStatusID=1,\n phoneNumber=\"phone\",\n phoneTypeID=1,\n phoneStatusDate=datetime(2016, 2, 2)\n )\n physicianPhone2 = models.PhysicianPhone(\n contactInfoSourceID=1,\n physicianID=1,\n contactInfoStatusID=1,\n phoneNumber=\"phone\",\n phoneTypeID=1,\n phoneStatusDate=datetime(2016, 2, 2)\n )\n physicianToCTC = models.PhysicianToCTC(\n physicianID=1,\n ctcID=1\n )\n facility1 = models.Facility(\n facilityName=\"name\",\n contactFirstName=\"fname\",\n contactLastName=\"lname\",\n facilityStatus=1,\n facilityStatusDate=datetime(2016, 2, 2),\n contact2FirstName=\"fname\",\n contact2LastName=\"lname\"\n )\n facility2 = models.Facility(\n facilityName=\"name\",\n contactFirstName=\"fname\",\n contactLastName=\"lname\",\n facilityStatus=1,\n facilityStatusDate=datetime(2016, 2, 2),\n contact2FirstName=\"fname\",\n contact2LastName=\"lname\"\n )\n facilityAddress = models.FacilityAddress(\n contactInfoSourceID=1,\n facilityID=1,\n contactInfoStatusID=1,\n street=\"street\",\n street2=\"street2\",\n city=\"city\",\n stateID=1,\n zip=\"12345\",\n addressStatusDate=datetime(2016, 2, 2),\n )\n\n facilityPhone = models.FacilityPhone(\n contactInfoSourceID=1,\n facilityID=1,\n contactInfoStatusID=1,\n clinicName=\"clinic\",\n phoneTypeID=1,\n phoneNumber=\"phone\",\n phoneStatusDate=datetime(2016, 2, 2)\n )\n facilityPhone2 = models.FacilityPhone(\n contactInfoSourceID=1,\n facilityID=1,\n contactInfoStatusID=1,\n clinicName=\"clinic\",\n phoneTypeID=1,\n phoneNumber=\"phone2\",\n phoneStatusDate=datetime(2016, 2, 2)\n )\n patientProjectStatusType1 = models.PatientProjectStatusLUT(\n statusDescription=\"desc\"\n )\n patientProjectStatusType2 = models.PatientProjectStatusLUT(\n statusDescription=\"desc\"\n )\n patientProjectStatus = models.PatientProjectStatus(\n patientProjectStatusTypeID=1,\n participantID=1,\n statusDate=datetime(2016,2,2)\n )\n physicianFacility = models.PhysicianFacility(\n facilityID=1,\n physicianID=1,\n physFacilityStatusID=1,\n physFacilityStatusDate=datetime(2016, 2, 2)\n )\n contact = models.Contact(\n contactTypeLUTID=1,\n participantID=1,\n staffID=1,\n informantID=1,\n informantPhoneID=1,\n description=\"desc\",\n contactDate=datetime(2016, 2, 2),\n initials=\"atp\",\n notes=\"notes\"\n )\n contact2 = models.Contact(\n contactTypeLUTID=1,\n participantID=1,\n staffID=1,\n facilityID=1,\n facilityPhoneID=1,\n description=\"desc\",\n contactDate=datetime(2016, 2, 2),\n initials=\"atp\",\n notes=\"notes\"\n )\n contact3 = models.Contact(\n contactTypeLUTID=1,\n participantID=1,\n staffID=1,\n physicianID=1,\n physicianPhoneID=1,\n description=\"desc\",\n contactDate=datetime(2016, 2, 2),\n initials=\"atp\",\n notes=\"notes\"\n )\n contact4 = models.Contact(\n contactTypeLUTID=1,\n participantID=1,\n staffID=1,\n patientPhoneID=1,\n description=\"desc\",\n contactDate=datetime(2016, 2, 2),\n initials=\"atp\",\n notes=\"notes\"\n )\n ctcFacility = models.CTCFacility(\n ctcID=1,\n facilityID=1,\n coc=123\n )\n incentive = models.Incentive(\n participantID=1,\n incentiveDescription=\"desc\",\n barcode=\"123456789\",\n dateGiven=datetime(2016, 4, 3)\n )\n db.session.add_all(ucrRoles)\n db.session.add_all(informantRelationships)\n db.session.add_all(users)\n db.session.add_all(states)\n db.session.add_all(finalCodes)\n db.session.add_all(sexes)\n db.session.add_all(abstractStatuses)\n db.session.add_all(races)\n db.session.add_all(ethnicities)\n db.session.add_all(vitals)\n db.session.add_all(contacts)\n db.session.add_all(inactives)\n db.session.add_all(ucrReportTypes)\n db.session.add_all(physicianStatuses)\n db.session.add_all(physFacilityStatuses)\n db.session.add_all(phoneTypes)\n db.session.add_all(irbHolders)\n db.session.add_all(projectTypes)\n db.session.add_all(contactStatuses)\n db.session.add_all(contactSources)\n db.session.add_all(grantStatuses)\n db.session.add_all(fundingSources)\n db.session.add_all(reviewCommitteeStatuses)\n db.session.add_all(projectStatuses)\n db.session.add_all(logTypes)\n db.session.add_all(reviewCommittees)\n db.session.add_all(staffRoles)\n db.session.add_all(projectPhases)\n db.session.add_all(hsts)\n db.session.add_all(tracingSources)\n db.session.add_all(contactTypes)\n db.session.add_all(giftCards)\n db.session.add(staff)\n db.session.add(staff2)\n db.session.add(project1)\n db.session.add(project2)\n db.session.add(funding)\n db.session.add(budget1)\n db.session.add(rc)\n db.session.add(ucr)\n db.session.add(arcReview)\n db.session.add(preApp)\n db.session.add(log)\n db.session.add(projectStaff)\n db.session.add(staffTraining)\n db.session.add(patient)\n db.session.add(patient2)\n db.session.add(patientAddress)\n db.session.add(patientEmail)\n db.session.add(patientPhone)\n db.session.add(patientPhone2)\n db.session.add(informant1)\n db.session.add(informant2)\n db.session.add(informantAddress)\n db.session.add(informantPhone)\n db.session.add(informantPhone2)\n db.session.add(ctc1)\n db.session.add(ctc2)\n db.session.add(projectPatient)\n db.session.add(projectPatient2)\n db.session.add(projStatus)\n db.session.add(tracing)\n db.session.add(physician)\n db.session.add(physician2)\n db.session.add(physicianAddress)\n db.session.add(physicianEmail)\n db.session.add(physicianPhone)\n db.session.add(physicianPhone2)\n db.session.add(physicianToCTC)\n db.session.add(facility1)\n db.session.add(facility2)\n db.session.add(facilityAddress)\n db.session.add(facilityPhone)\n db.session.add(facilityPhone2)\n db.session.add(patientProjectStatusType1)\n db.session.add(patientProjectStatusType2)\n db.session.add(patientProjectStatus)\n db.session.add(physicianFacility)\n db.session.add(contact)\n db.session.add(contact2)\n db.session.add(contact3)\n db.session.add(contact4)\n db.session.add(ctcFacility)\n db.session.add(incentive)\n db.session.commit()", "title": "" }, { "docid": "f1f4704030f5c33ae04a6f6ed3fc8c9d", "score": "0.699089", "text": "def populate(self):\n pass", "title": "" }, { "docid": "77c4cbc11305536344a963ae28dd0f92", "score": "0.69689226", "text": "def populate_db():\n values = db.load_config()\n logging.info(values)\n logging.info(\"Started import \")\n logging.info(f'Importing into DB: {values[\"database\"]}')\n # host, db, user, password\n conn = db.get_connection_local_pg( values)\n logging.info(\"Finished import \")\n if ( not os.path.isdir(\"data/2014-i2b2-nlp-evaluation-data-txt\") ):\n logging.exception(\"Data not downloaded, run the main.py get_data first \")\n os.sys.exit(1)\n db.import_data(conn, \"data/2014-i2b2-nlp-evaluation-data-txt\", \"data/testing-PHI-Gold-fixed\")", "title": "" }, { "docid": "eab65cea50449a2f2f4271702528c59f", "score": "0.69370925", "text": "def __init__(self):\n self.db = db\n self.db.create_all()", "title": "" }, { "docid": "53db6e0a06eccebdfecb42251ceff176", "score": "0.6923225", "text": "def do_populate(self, arg):\n if arg.strip():\n self.print('Bad syntax!')\n return\n\n self.client.populate_db(numElem=50, numVp=5)\n self.print('OK!')", "title": "" }, { "docid": "b0597bdfe794be623b11017593a932e0", "score": "0.69071597", "text": "def _init_db_structure(self):\n #with self._db as conn: conn.execute('')\n pass", "title": "" }, { "docid": "db9b4b3df1fe7585313948ae354a3a87", "score": "0.6852799", "text": "def _populate_db():\n data_1 = json.dumps(DATA_1)\n data_2 = json.dumps({\n \"organization\": \"Skynet Papercorp\", \"reported_at\": \"2015-04-22\",\n \"created_at\": \"2015-04-23\",\n \"inventory\": [{\"name\": \"paper\", \"price\": \"4.00\"}]\n })\n report_1 = Report(data=data_1)\n report_2 = Report(data=data_2)\n db.session.add(report_1)\n db.session.add(report_2)\n db.session.commit()", "title": "" }, { "docid": "6a0ccdb9a18f76b2e8cd24c1104a62ad", "score": "0.6847385", "text": "def update_database(self):", "title": "" }, { "docid": "c47700b5a4ddf0cb55f3bcad8e6ef127", "score": "0.6742828", "text": "def create_db_samples():\n init_users()\n init_categories()\n init_items()", "title": "" }, { "docid": "f19de7f49a4903a12523b88a31e718e7", "score": "0.6733228", "text": "def run(self, args):\n super(PopulateCommand, self).run(args)\n try:\n init_conf(\"populate.log\")\n# cfg.CONF.log_file = \"populate.log\"\n# cfg.CONF.use_stderr = True\n LOG = api.LOG = get_logger(\"populate\")\n LOG.info(_(\"Loading environment\"))\n self.load_app()\n LOG.info(_(\"Building schema\"))\n LOG.info(_(\"Starting a transaction...\"))\n models.start()\n\n # FIXME: There's no create_all equivalent for Music.\n\n # Valet\n Group.create_table()\n Placement.create_table()\n Plan.create_table()\n\n # Ostro\n Event.create_table()\n PlacementRequest.create_table()\n PlacementResult.create_table()\n except Exception as ex:\n models.rollback()\n LOG.error(\"Rolling back... %s\" % ex)\n raise\n else:\n LOG.info(_(\"Committing.\"))\n models.commit()", "title": "" }, { "docid": "dda6cc12fabb7bd264b4028d965e6e4f", "score": "0.66867036", "text": "def insert_data(self):\n # Make a connexion with a mock database\n self.generate_data_collection()", "title": "" }, { "docid": "f9d68f7ddaf994169e9ef7ea55297614", "score": "0.664742", "text": "def init_db():\n global app\n Supplier.init_db(app)", "title": "" }, { "docid": "1a0f57d7958b9a592b22a30b870e439a", "score": "0.66439116", "text": "def toDB(self):\n #- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -\n pass", "title": "" }, { "docid": "9c7a639687c0a13a0d5426588108c1e4", "score": "0.66213745", "text": "def populate_db():\n from datetime import datetime\n from models import Post, Tag, post_to_tag\n Post.query.delete()\n Tag.query.delete()\n db.session.execute(post_to_tag.delete())\n\n personalwebapp = Tag(name='personalwebapp')\n db.session.add(personalwebapp)\n\n post = Post(title='Hello PersonalWebApp!', content=FIRST_POST, author_id=1, timezone=-5)\n post.tags.append(personalwebapp)\n post.state_id = 2\n post.published_at = datetime.utcnow()\n post.url = 'hello_personal_webapp'\n db.session.add(post)\n\n post = Post(title='Example Draft Post', content='This is the first draft. I\\'ll finish it later.',\n author_id=1, timezone=-5)\n post.url = 'example_draft_post'\n db.session.add(post)\n\n db.session.commit()\n\n print('\"personalwebapp\" tag'\n 'and \"Hello PersonalWebApp!\" published post and \"Example Draft Post\" draft post have been generated.')", "title": "" }, { "docid": "d910fe18fbef4319354befcc35d03628", "score": "0.65966296", "text": "def create_db(self):\n return None", "title": "" }, { "docid": "ca9c7e87e76ef17c1834e51d3ec5b2fe", "score": "0.65857136", "text": "def populate_db():\n\n api_url = BOOK_URL\n\n api_response = requests.get(api_url)\n print(api_response.text)\n json_api_response = api_response.json()\n if api_response.status_code == 200:\n for book in json_api_response:\n name = book['name']\n isbn = book[\"isbn\"]\n authors = json.dumps(book[\"authors\"])\n country = book['country']\n number_of_pages = int(book[\"numberOfPages\"])\n publisher = book[\"publisher\"]\n release_date = datetime.strptime(book[\"released\"], \"%Y-%m-%dT%H:%M:%S\").date()\n\n book_insert = Books(None, name, isbn, authors, country, number_of_pages, publisher, release_date, datetime.now(),\n datetime.now())\n db.session.add(book_insert)\n db.session.commit()", "title": "" }, { "docid": "02692e1ea91042a4b1bb734b743fdd70", "score": "0.65577847", "text": "def populate_database(query, write):\n db = ingest.load_database()\n db = ingest.populate_database(db, query=query, write=write)", "title": "" }, { "docid": "019188738626cd084af97385709e6cb4", "score": "0.6507313", "text": "def initialize_db():\n # Drop the database and create tables\n setup_database = TestDatabase()\n setup_database.drop()\n setup_database.create()\n setup_database.populate()", "title": "" }, { "docid": "738e02b7d0516b0517e04b9cd2313a1b", "score": "0.64941686", "text": "def run(self):\n self.db.table('users').insert({\n 'name': 'sjahn',\n 'email': '[email protected]',\n 'password': generate_password_hash('1234')\n })", "title": "" }, { "docid": "c96b463c4afba4b8874e42597aee6b3b", "score": "0.64413", "text": "def seed_data():\n typer.echo(\"Creating initial data\")\n db = SessionLocal()\n init_db(db)\n typer.echo(\"Initial data created\")", "title": "" }, { "docid": "a2bc2d10880289fafb45d98b7d9b9758", "score": "0.6428364", "text": "def dbcreation():\n db.create_all() \n return \"Hi\"", "title": "" }, { "docid": "d23d384becf2768d3e555256fd4cf92f", "score": "0.6423991", "text": "def populate(self):\n raise NotImplementedError", "title": "" }, { "docid": "ef264dd3df50c3020dbd0dd2de6672e7", "score": "0.6420109", "text": "def __populate(self):\n self._populate_cli()\n self._populate_model()", "title": "" }, { "docid": "c52a7b7a7c599071e0c42c0dc4163f5e", "score": "0.6419514", "text": "def setUp(self):\n\t\tdb.create_all()", "title": "" }, { "docid": "d0e326115fecf07a75034ac464aff76c", "score": "0.6418617", "text": "def initialize():\n db.connect()\n db.create_tables([Product], safe=True)", "title": "" }, { "docid": "22618966909c2b541155c9cccc016691", "score": "0.64135474", "text": "def initialize():\n db.connect()\n db.create_tables([Entry], safe=True)", "title": "" }, { "docid": "253b0f1c720f5fda12e6a428c5ab086f", "score": "0.6398332", "text": "def initialisation(self):\n self.cursor.execute('create database if not exists open_food_facts;')\n\n with open(\"creation_bdd.sql\", 'r') as bdd:\n file = bdd.read()\n requetes = file.split(';')\n for line in requetes:\n self.cursor.execute(line)\n\n self.connexion.commit()", "title": "" }, { "docid": "58caf7c1e15f11c58efc8b15d2e8bd2f", "score": "0.6390033", "text": "def get_database_data(self):\r\n pass", "title": "" }, { "docid": "aa52c5d6fcf41a0a51cbbf90cf49e5f4", "score": "0.6380734", "text": "def create_db():\n db.create_all()\n db_fill_data()", "title": "" }, { "docid": "739094ba7ea9b2a730020bba9cfc3cab", "score": "0.6374668", "text": "def init_db_command():\n db = init_db()\n import_parsed_data(db)", "title": "" }, { "docid": "220077a23ec1b23d82b202cd9f0c39c2", "score": "0.6373129", "text": "def init_db():\n\n create_model_tables([\n Text,\n Citation,\n ], fail_silently=True)", "title": "" }, { "docid": "c5957b336c45f673120ecf164d9b315a", "score": "0.63699794", "text": "def build_database():\n\tfrom fake_data import setup_initial_database, more_data\n\n\tprint (\"Updating Local Development Database...\")\n\n\tsetup_initial_database()\n\n\tprint (\"Finished!\")\n\n\treturn", "title": "" }, { "docid": "5734b372ff7c325ebedb7ccee68dd464", "score": "0.63501185", "text": "def initialize_db(self):\r\n click.echo('[OrientModel_initialize_db_%s] Starting process...' % (get_datetime()))\r\n if self.checks['created'] == False:\r\n self.create_db()\r\n if self.checks['open_db'] == False:\r\n self.open_db()\r\n sql = \"\"\r\n for m in self.models:\r\n sql = sql+\"create class %s extends %s;\\n\" % (m, self.models[m]['class'])\r\n for k in self.models[m].keys():\r\n if k != 'class':\r\n sql = sql+\"create property %s.%s %s;\\n\" % (m, k, self.models[m][k])\r\n if 'id' in str(k):\r\n sql = sql + \"create index %s_%s on %s (%s) UNIQUE ;\\n\" % (m, k, m, k)\r\n\r\n sql = sql + \"create sequence idseq type ordered;\"\r\n click.echo('[OrientModel_initialize_db_%s]'\r\n ' Initializing db with following batch statement'\r\n '\\n*************** SQL ***************\\n'\r\n '%s\\n*************** SQL ***************\\n' % (get_datetime(), sql))\r\n self.checks['initialized'] = True\r\n\r\n return self.client.batch(sql)", "title": "" }, { "docid": "b45da97885b18b7a1e9b14a83ba0820f", "score": "0.6349823", "text": "def creation_database_loading_started(self):\n heading.main_page_loading()\n print('\\t\\tCreation of the Database ... 0%\\n\\n'\n '\\t\\t- - - - - - - - - - - - - - - - - - - - -\\n\\n')", "title": "" }, { "docid": "3f2a1691c8b3e0c77cb7b7cacc2bbcf3", "score": "0.63198614", "text": "def init_db(self):\n # Connecting to the database file\n conn = sqlite3.connect(DatabaseAccessor.DATABASE_PATH)\n c = conn.cursor()\n\n # Create new tables\n for sql in self.SQL_CREATES:\n c.execute(sql)\n\n # Committing changes and closing the connection to the database file\n conn.commit()\n conn.close()", "title": "" }, { "docid": "06a678067aaeca2b31e147bf8b83af57", "score": "0.6313009", "text": "def _init_database(self):\r\n\r\n for schema in self._object_schemas:\r\n type_specs = [self._create_sql_for_column(name, schema_item)\r\n for (name, schema_item) in schema.fields]\r\n self.cursor.execute(\"CREATE TABLE %s (%s)\" %\r\n (schema.table_name, ', '.join(type_specs)))\r\n for name, columns in schema.indexes:\r\n self.cursor.execute(\"CREATE INDEX %s ON %s (%s)\" %\r\n (name, schema.table_name, ', '.join(columns)))\r\n for name, columns in schema.unique_indexes:\r\n self.cursor.execute(\"CREATE UNIQUE INDEX %s ON %s (%s)\" %\r\n (name, schema.table_name, ', '.join(columns)))\r\n self._create_variables_table()\r\n self.set_version()\r\n self.setup_fulltext_search()", "title": "" }, { "docid": "bf0c15cd3fe6ac70b7ba41350f4bb2c8", "score": "0.63018775", "text": "def initialize_database(self):\n self.database = self.loader.request_library(\"common_libs\", \"database\")\n self.database.create_connection(\"production\")\n self.database.load_mappings()\n\n self.migrator = self.loader.request_library(\"database_tools\", \"migrator\")\n self.migrator.migrate()", "title": "" }, { "docid": "46b5beb195eb88b0cf9a0d1aa2d98dc4", "score": "0.6297803", "text": "def initialize_db():\n print(\"Do init db\")\n db.create_all()\n print(\"init db done\")", "title": "" }, { "docid": "b3e440fe43530ddba4ca0b4a847d00a9", "score": "0.62872404", "text": "def _create_tables(self):", "title": "" }, { "docid": "1631bc9ae71fad0be54bbace58f0a6ce", "score": "0.62842786", "text": "def create_database():\n print('Creating database...')\n init_db()", "title": "" }, { "docid": "aca2caa00fa9fc1fbc76530b0e78a022", "score": "0.6271357", "text": "def database():\n pass", "title": "" }, { "docid": "4c4b447b309d4ac61d1d972e740693d4", "score": "0.6265521", "text": "def populate_db():\n\n logging.basicConfig(level=logging.INFO)\n logger = logging.getLogger(__name__)\n\n database = SqliteDatabase('personjob.db')\n\n logger.info('Working with Person class')\n\n PERSON_NAME = 0\n LIVES_IN_TOWN = 1\n NICKNAME = 2\n\n people = [\n ('Andrew', 'Sumner', 'Andy'),\n ('Peter', 'Seattle', None),\n ('Susan', 'Boston', 'Beannie'),\n ('Pam', 'Coventry', 'PJ'),\n ('Steven', 'Colchester', None),\n ]\n\n JOB_NAME = 0\n START_DATE = 1\n END_DATE = 2\n SALARY = 3\n PERSON_EMPLOYED = 4\n\n jobs = [\n ('Analyst', '2001-09-22', '2003-01-30', 65500, 'Andrew'),\n ('Senior analyst', '2003-02-01', '2006-10-22', 70000, 'Andrew'),\n ('Senior business analyst', '2006-10-23', '2016-12-24', 80000,\n 'Andrew'),\n ('Admin supervisor', '2012-10-01', '2014-11-10', 45900, 'Peter'),\n ('Admin manager', '2014-11-14', '2018-01-05', 45900, 'Peter')\n ]\n\n DEPARTMENT_NUMBER = 0\n DEPARTMENT_NAME = 1\n DEPARTMENT_MANAGER = 2\n NAME_OF_JOB = 3\n\n departments = [\n ('H153', 'Business Intelligence', 'John Phillips', 'Analyst'),\n ('H567', 'Product Development', 'Rose Adams', 'Senior analyst'),\n ('H195', 'Enterprise Strategy', 'Jonathan Range', 'Senior business '\n 'analyst'),\n ('K976', 'Production Engineering', 'Kathryn Allen', 'Admin '\n 'supervisor'),\n ('L763', 'Reliability Engineering', 'Adam Stunt', 'Admin manager')\n ]\n\n logger.info('Creating Person and Job records: iterate through the list of '\n 'tuples')\n logger.info('Prepare to explain any errors with exceptions')\n logger.info('and the transaction tells the database to fail on error')\n\n try:\n database.connect()\n database.execute_sql('PRAGMA foreign_keys = ON;')\n for person in people:\n with database.transaction():\n new_person = Person.create(\n person_name=person[PERSON_NAME],\n lives_in_town=person[LIVES_IN_TOWN],\n nickname=person[NICKNAME])\n new_person.save()\n logger.info('Database Person add successful')\n\n logger.info('Print the Person records we saved...')\n for saved_person in Person:\n logger.info(f'{saved_person.person_name} lives in '\n f'{saved_person.lives_in_town} ' +\\\n f'and likes to be known as {saved_person.nickname}')\n\n for job in jobs:\n date_end = datetime.datetime.strptime(job[END_DATE],\"%Y-%m-%d\")\n date_start = datetime.datetime.strptime(job[START_DATE],\"%Y-%m-%d\")\n duration = date_end - date_start\n with database.transaction():\n new_job = Job.create(\n job_name=job[JOB_NAME],\n start_date=job[START_DATE],\n end_date=job[END_DATE],\n salary=job[SALARY],\n employment_duration=duration.days,\n person_employed=job[PERSON_EMPLOYED])\n new_job.save()\n logger.info('Database job add successful')\n\n logger.info('Print the Job records we saved...')\n for saved_job in Job:\n logger.info(f'{saved_job.person_employed} held the job of '\n f'{saved_job.job_name} from '\n f'{saved_job.start_date} to '\n f'{saved_job.end_date} with a salary of '\n f'{saved_job.salary}')\n\n for department in departments:\n with database.transaction():\n new_department = Department.create(\n department_number=department[DEPARTMENT_NUMBER],\n department_name=department[DEPARTMENT_NAME],\n department_manager=department[DEPARTMENT_MANAGER],\n name_of_job=department[NAME_OF_JOB])\n new_department.save()\n logger.info('Database department add successful')\n\n logger.info('Print the department records we saved...')\n for saved_department in Department:\n logger.info('Department ' f'{saved_department.department_number} '\n 'is named ' f'{saved_department.department_name}'\n ', managed by ' \n f'{saved_department.department_manager}, and has a '\n f'job of ' f'{saved_department.name_of_job}')\n\n except Exception as e:\n logger.info(f'Error creating = {person[PERSON_NAME]}')\n logger.info(e)\n logger.info('See how the database protects our data')\n\n finally:\n logger.info('database closes')\n database.close()", "title": "" }, { "docid": "74f9e654494a3765f3c2805f2b73e971", "score": "0.6265285", "text": "def db(self):\n raise NotImplementedError", "title": "" }, { "docid": "86203a90ca15a10a52cec642d40e5c70", "score": "0.6262716", "text": "def init_db():\n global app\n Recommendation.init_db(app)", "title": "" }, { "docid": "faa54e515457a672171567e91ed392ac", "score": "0.62594074", "text": "def fill_db():\n dbname = get_database()\n collection_name = dbname[\"templates\"]\n try:\n collection_name.delete_many({})\n except Exception as err:\n logger.debug(f\"ERR: {err}\")\n\n try:\n collection_name.insert_many([*templates_1, *templates_2])\n except Exception as err:\n logger.debug(f\"ERR: {err}\")\n yield\n\n\n try:\n collection_name.delete_many({})\n except Exception as err:\n logger.debug(f\"ERR: {err}\")", "title": "" }, { "docid": "36f62cdc6ff20f32e10aba355418de0a", "score": "0.62396115", "text": "async def db_create(self, ctx):\n\t\tawait self.rebuild_database()", "title": "" }, { "docid": "75f6c4419807df47b1696e2cf4aaf341", "score": "0.62381905", "text": "def initializing(self):\n self.cursor.executescript(INIT_USERS_DATABASE)\n self.cursor.executescript(INIT_INVENTORY_DATABASE)", "title": "" }, { "docid": "807dead6cba37df60dcfdc6251d057fe", "score": "0.6236994", "text": "def populate_database():\n dom = DomainFactory(name=\"test.com\", quota=50)\n admin = UserFactory(\n username=\"[email protected]\", groups=(\"DomainAdmins\", ),\n password=\"{PLAIN}toto\"\n )\n MailboxFactory(address=\"admin\", domain=dom, user=admin)\n account = UserFactory.create(\n username=\"[email protected]\", groups=(\"SimpleUsers\",),\n )\n MailboxFactory.create(address=\"user\", domain=dom, user=account)\n\n al = AliasFactory.create(\n address=\"[email protected]\", domain=dom\n )\n AliasRecipientFactory.create(\n address=\"[email protected]\", alias=al)\n\n al = AliasFactory.create(\n address=\"[email protected]\", domain=dom\n )\n mb = account.mailbox\n AliasRecipientFactory.create(\n address=mb.full_address, alias=al, r_mailbox=mb)\n\n al = AliasFactory.create(\n address=\"[email protected]\", domain=dom\n )\n for address in [\"[email protected]\", \"[email protected]\"]:\n AliasRecipientFactory.create(address=address, alias=al)\n\n dom.add_admin(admin)\n\n dom2 = DomainFactory.create(name=\"test2.com\", default_mailbox_quota=0)\n admin = UserFactory.create(\n username=\"[email protected]\", groups=(\"DomainAdmins\",),\n password=\"{PLAIN}toto\"\n )\n MailboxFactory.create(address=\"admin\", domain=dom2, user=admin)\n u = UserFactory.create(\n username=\"[email protected]\", groups=(\"SimpleUsers\",)\n )\n MailboxFactory.create(address=\"user\", domain=dom2, user=u)\n dom2.add_admin(admin)", "title": "" }, { "docid": "2c04d5ec3dffc2dcda802bf5284869d1", "score": "0.6226869", "text": "def example_data():\n\n user_1 = User(email=\"[email protected]\", password=\"1234\")\n search_1 = Search(search_term=\"testestest\")\n outlet_1 = Outlet(outlet_name='Best News', outlet_popularity=10, outlet_bias='Right-Center')\n user_search_1 = User_Search(user_id=1, search_id=1)\n\n db.session.add_all([user_1, search_1, outlet_1])\n db.session.commit()", "title": "" }, { "docid": "0c41314e33f88a438daf047f9b64236a", "score": "0.6223169", "text": "def __init__(self, database):\n self.database = database", "title": "" }, { "docid": "7d412440f18ffb4fe6a755293360a1a1", "score": "0.62127787", "text": "def init_db():\n global app\n Inventory.init_db(app)", "title": "" }, { "docid": "f08630fdffbb4548a9384d670878c7c8", "score": "0.6210465", "text": "def _populate_data(self):\n\n pass", "title": "" }, { "docid": "b4e83ec014e7a72cfb35bcc9baaa4f77", "score": "0.6210141", "text": "def setUp(self):\n self.database = Database()", "title": "" }, { "docid": "8d99aa9a5b920aa730a6517506b70c69", "score": "0.61997527", "text": "def load_data(self, db_name):\r\n create_schema = CreateSchema(db_name)\r\n offices = create_schema.load_offices()\r\n livingspaces = create_schema.load_living_space()\r\n staffs = create_schema.load_staff()\r\n fellows = create_schema.load_fellow()\r\n office_staff_alloc = create_schema.load_office_staff_allocations()\r\n office_fellow_alloc = create_schema.load_office_fellow_allocations()\r\n livingspaces_alloc = create_schema.load_living_space_allocations()\r\n #Adding offices\r\n for office in offices:\r\n self.create_room(\"OFFICE\", office[0])\r\n #Adding living spaces\r\n for livingspace in livingspaces:\r\n self.create_room(\"LIVINGSPACE\", livingspace[0])\r\n #Adding staff\r\n for staff in staffs:\r\n self.add_person(\"STAFF\", staff[0])\r\n #Adding fellows\r\n for fellow in fellows:\r\n self.add_person(\"FELLOW\", fellow[0])\r\n #Creating office staff allocations'\r\n for alloc_office in office_staff_alloc:\r\n value = self.staff_dict[str(alloc_office[1])]\r\n self.allocate_rooms(value, \"OFFICE\")\r\n #Creating office fellow allocations\r\n for alloc_office in office_fellow_alloc:\r\n value = self.fellow_dict[str(alloc_office[2])]\r\n self.allocate_rooms(value, \"OFFICE\")\r\n # Allocating Living space\r\n for alloc_living_space in livingspaces_alloc:\r\n value = self.fellow_dict[str(alloc_living_space[1])]\r\n self.allocate_rooms(value, \"LIVINGSPACE\")", "title": "" }, { "docid": "083997557b55c3c6c5815ab710f0e1e7", "score": "0.61968344", "text": "def create_database(self):\n self._create_tables()\n self._create_functions()\n self._create_triggers()", "title": "" }, { "docid": "04afa680de176ecafcefb3de5841ad9d", "score": "0.6194173", "text": "def init():\n try:\n DataBase.create_table()\n DataBase.create_price_table()\n except Exception:\n print(MainMessage.db_critical_error)\n sys.exit(0)\n print(MainMessage.db_init_success)\n if DataBase.read_record_from_pricedb():\n (Stakes.stake, Stakes.term, Stakes.substake, Stakes.subterm) = DataBase.read_record_from_pricedb()\n else:\n DataBase.insert_record_to_pricedb(Stakes.stake, Stakes.term, Stakes.substake, Stakes.subterm)\n Stakes.change_stakes()", "title": "" }, { "docid": "9355d57b29c60ed9ecb2f2147062f036", "score": "0.61881137", "text": "def init_database(self):\n self._drop_tables()\n self._create_tables()", "title": "" }, { "docid": "51e9772beb911f40e0222101163fe454", "score": "0.6180436", "text": "def update_database():\n pass", "title": "" }, { "docid": "c791e0bc0d4578a3d0bbc0f415eed76f", "score": "0.6174021", "text": "def init_table(self):\n self.create_table()", "title": "" }, { "docid": "b1dcbe9a9c1b48448873415bf2213b59", "score": "0.61739", "text": "def create_db():\n\n # TODO:\n pass", "title": "" }, { "docid": "959a78bb1df3bc568390da2ab88111ec", "score": "0.6173813", "text": "def initialize():\r\n db.connect()\r\n db.create_tables([BaseModel], safe=True)\r\n db.create_tables([Records], safe=True)", "title": "" }, { "docid": "1ff935099c2772641552fbf8e0f244b2", "score": "0.61727023", "text": "def setup_tables(self):", "title": "" }, { "docid": "ff634b9defb5c8b9f625cc35fcecb901", "score": "0.61722714", "text": "def dbcreate(self):\r\n self.db.createtable()", "title": "" }, { "docid": "79c1b2e3b68a6b4c34d605a6188648f3", "score": "0.61717933", "text": "def __init__(self, database):\n self.db = database", "title": "" }, { "docid": "92ab14e391589055ac911fad939895a5", "score": "0.6170633", "text": "def init_db_command():\n from .data.database import init_db\n init_db()\n click.echo('Initialized the database.')", "title": "" }, { "docid": "d492e45f540121a62e6f784eafc01ada", "score": "0.61700296", "text": "def init_db():\r\n\r\n db = Scraper_DB()\r\n try:\r\n db.create_tables()\r\n except Exception as e:\r\n print(\"{0}\".format(e))", "title": "" }, { "docid": "3286660208fee39a3d20c9ecd4a39791", "score": "0.616117", "text": "def initialize():\n DATABASE.connect(reuse_if_open=True)\n DATABASE.create_tables([User, ToDo], safe=True)\n DATABASE.close()", "title": "" }, { "docid": "2e163e477fb3afc401b0661de598cf24", "score": "0.61580926", "text": "def populateInitialDataSet(self):", "title": "" }, { "docid": "96d33114588b0147f80274744c9144f3", "score": "0.6151307", "text": "def generate_db(self):\n print('Generating LANL Earthquake Prediction Database...')\n # Unzip the database\n self._unzip_db()\n\n # Get test dataset\n self._get_test_db()\n\n # Get train dataset\n self._get_train_db()\n print('Complete!\\n')", "title": "" }, { "docid": "5c47cf554648cdddb92ff0a42d78e2ce", "score": "0.6147908", "text": "def dbSetUp(self):\n pass", "title": "" }, { "docid": "753de69c4c3126921e551c944d3b7777", "score": "0.6147715", "text": "def creation_database(self):\n heading.main_page_loading()\n print('\\t\\tPlease, write the name of the database\\n')\n self.choice_creation_database = input('[Database Menu] : #dm\\n'\n '[Quit] : #quit\\n\\n\\t=> ')\n\n return self.choice_creation_database", "title": "" }, { "docid": "0f12a34445caf9eb081b7c0332a02764", "score": "0.6136958", "text": "def initdb():\n db.create_all()\n print \":creo las tablas de la base de datos:\"\n poblardb()", "title": "" }, { "docid": "641745680596c09f4ccc1753c64609b6", "score": "0.6131554", "text": "def transfertodb():", "title": "" }, { "docid": "4f32db0d5a6f2dedaeedfadb7aba8090", "score": "0.61295503", "text": "def setUp(self):\n db.create_all()", "title": "" }, { "docid": "c1041a33217543f2273e244a6771a296", "score": "0.61277354", "text": "def build_data_base(self):\n\n self.db.create_table(\"STUDENTS\", [[\"Firstname\", \"TEXT\"], [\"LastName\", \"TEXT\"], [\"Id\", \"TEXT\"],\n [\"Gender\", \"TEXT\"], [\"Country\", \"TEXT\"], [\"Age\", \"TEXT\"],\n [\"Username\", \"TEXT\"], [\"Password\", \"TEXT\"]])\n\n self.db.create_table(\"OnlineLessons\", [[\"GeneralSubject\", \"TEXT\"], [\"MainSubject\", \"TEXT\"],\n [\"TargetAudience\", \"TEXT\"], [\"AuthorName\", \"TEXT\"],\n [\"AuthorAge\", \"TEXT\"], [\"LastName\", \"TEXT\"],\n [\"ExceptedLength\", \"TEXT\"]])", "title": "" }, { "docid": "309e67e55289e682909ea0827a4471ad", "score": "0.6122299", "text": "def init_db():\n db = get_db()\n with app.open_resource('schema.sql', mode='r') as f:\n db.cursor().executescript(f.read())\n with app.open_resource('knowledge.sql', mode='r') as f:\n db.cursor().executescript(f.read())\n with app.open_resource('users.sql', mode='r') as f:\n db.cursor().executescript(f.read())\n \n db.commit()", "title": "" }, { "docid": "38e132347272880dac991ab250774e67", "score": "0.6111997", "text": "def create_database():\n init_db()\n click.echo(\"Initialized the database.\")", "title": "" }, { "docid": "aa5275a31cd425c5d356faeb8958f47a", "score": "0.6109491", "text": "def initial_setup():\n db = _get_db()\n db.execute(SCHEMA)\n db.commit()", "title": "" }, { "docid": "d24efa6d151640a258ecc9cece982fdd", "score": "0.6107269", "text": "def __init__(self):\n # try:\n\n self.filename = os.path.join(CONFIG_DATABASE_PATH,\n CONFIG_DATABASE_NAME)\n self.connection = db.connect(self.filename)\n\n # if not db.Connection.\n # print \"error\"\n \"\"\"creating a cursor\"\"\"\n\n #cursor = self.connection.cursor()\n # cursor.execute(\n # '''INSERT INTO excavation (projectID, projectTitle, projectType, projectSubType, site, studyArea)\n # VALUES (4, 'test', 'field', 'xx', 'ga', 'ta')''')\n #self.connection.commit()\n #\n # cursor = connection.execute('select * from excavation')\n # print cursor.fetchall()\n\n #self.connection.close()", "title": "" }, { "docid": "f4d3fbe5edf0d1582f8979019e09d465", "score": "0.6106553", "text": "def __init__(self, databaseName):\r\n\r\n print(\"\\n------------| DATABASE CREATION |------------\\n\")\r\n self.database_name = databaseName\r\n\r\n # Create DB\r\n self.conn = sqlite3.connect(self.database_name)\r\n\r\n # Variable to access DB\r\n self.c = self.conn.cursor()\r\n\r\n print(\"> Database\", self.database_name, \"was created successfully\")\r\n self.createTables()\r\n self.closeDB()", "title": "" }, { "docid": "9132cf670cc0bc8535fada9e5d53fa33", "score": "0.6106523", "text": "def _initializeDatabase(self):\n self.bot = GridBotModel(\n name = self.name,\n symbol = self.symbol,\n exchange = self.exchange.exchange_id,\n starting_balance = self.total_amount,\n current_balance = self.total_amount,\n trade_amount = self.trade_amount,\n trade_step = self.trade_step,\n test_run = self.test_mode)\n self.session.add(self.bot)\n self.session.commit()", "title": "" }, { "docid": "251dc8d2a87a8f12d31b19a5a9953e1f", "score": "0.6104041", "text": "def init_db():\n print 'Initialize DB'\n from yamp.app import db, engine\n from yamp.models import Base, __all__ as model_files\n from importlib import import_module\n for model_file in model_files:\n import_module('yamp.models.' + model_file, __name__)\n print '* Model \"%s\" loaded.' % model_file\n Base.metadata.create_all(bind=engine)\n db.commit()\n print '... All tables created!'\n raise SystemExit", "title": "" }, { "docid": "e3514015410b72f25ec9f2ccb21c09d3", "score": "0.6102784", "text": "def init_users():\n UserModel('thinh', '1234').save_to_db()\n UserModel('long', '1234').save_to_db()", "title": "" }, { "docid": "25bd78f2cf3726b2f9fd8f86e7b7d4e7", "score": "0.60980797", "text": "def setUp(self):\n schema(DBPATH)\n seed(DBPATH)", "title": "" }, { "docid": "6d6902cae012870febfcc1c047d0450c", "score": "0.6095673", "text": "def _instantiate(self):\n print(\"creating database with table(s) at \" + DB_PATH)\n\n with self._connect() as db_cur:\n\n # create disasters table\n db_cur.execute(\"\"\"CREATE TABLE IF NOT EXISTS slack_calls (\n id integer PRIMARY KEY ,\n team_id text UNIQUE NOT NULL ,\n team_name text ,\n channel_id text UNIQUE NOT NULL ,\n access_token text UNIQUE NOT NULL ,\n bot_access_token text UNIQUE NOT NULL ,\n );\n \"\"\")\n\n # indexing columns for faster access\n db_cur.execute(\"\"\"CREATE UNIQUE INDEX IF NOT EXISTS indexed_team_id ON slack_calls (\n team_id\n );\n \"\"\")", "title": "" }, { "docid": "a1deabb1a8aa6863203ab301bd1ab2db", "score": "0.60897547", "text": "def manual_enter(self):\n self._dbconnect = sqlite3.connect(self._db_file)\n\n # Set row_factory to access columns by name\n self._dbconnect.row_factory = sqlite3.Row\n\n # Create a cursor to work with the db\n self._cursor = self._dbconnect.cursor()", "title": "" }, { "docid": "ea3633fce200e85f45e90f30ef071775", "score": "0.60858333", "text": "def initialize():\n db.connect()\n db.create_tables([Task], safe=True)", "title": "" }, { "docid": "a20d295df90cd4681dabefe43ef7696d", "score": "0.60857296", "text": "def createData(self):\n self.data = self.dbroot['data'] = OOBTree()\n self.meta = self.dbroot['meta'] = Meta('PEAT-DB database')\n self.meta.staticfields = { 'name': 'text', 'Mutations':'text',\n 'Structure':'PDB'}\n self.blobs = self.dbroot['blobs'] = OOBTree() \n #use meta.info for any misc settings specific to the DB\n self.meta.info = PersistentMapping(self.defaultinfo)\n self.commit()\n return", "title": "" }, { "docid": "e776e6da8c21cf8b3eb86d0ad5f64480", "score": "0.60816956", "text": "def initalize_user_tables(db):\n \n from shotglass2.users.models import init_db as users_init_db \n users_init_db(db)", "title": "" }, { "docid": "dcc46ccf775d7e2ce6ddb240a0fd1e10", "score": "0.6078222", "text": "def database_access(db):", "title": "" } ]
28ecbd2ee477b01b0414a979344fe7a1
Function that Cleans the text and handles the superscript and subscript tags in the text
[ { "docid": "bada213dfa474c405bf524914fe10ab8", "score": "0.815164", "text": "def cleaning(text):\n \n ind = 0\n text = text.replace(\":\",\"\")\n n = len(text)\n\n \"\"\"Replacing sup tag with supercript\"\"\"\n while (ind < len(text)):\n if(text[ind]==\"<\" and ind < n-4 and text[ind+1:ind+4]==\"sup\"):\n curr = ind\n curr += 5\n temp = \"\"\n while(True):\n if(text[curr]==\"<\" and text[curr+1:curr+5]==\"/sup\"):\n break\n temp += text[curr]\n curr += 1\n text = text[:ind] + convert_to_superscript(temp)+text[curr + 6:]\n ind = curr + 6\n else:\n ind += 1\n\n\n \"\"\"Replacing sub tag with subcript\"\"\"\n ind = 0\n while (ind < len(text)):\n if(text[ind]==\"<\" and ind < n-4 and text[ind+1:ind+4]==\"sub\"):\n curr = ind\n curr += 5\n temp = \"\"\n while(True):\n if(text[curr]==\"<\" and text[curr+1:curr+5]==\"/sub\"):\n break\n temp += text[curr]\n curr += 1\n text = text[:ind] + convert_to_superscript(temp)+text[curr + 6:]\n ind = curr + 6\n else:\n ind += 1\n\n x = text\n y = re.sub('\\\\n', ' ',x)\n y = re.sub('\\\\r', ' ',y)\n y = re.sub('\\\\t', ' ',y)\n y = re.sub('<[^>]*>', ' ',y)\n y = y.strip()\n return y", "title": "" } ]
[ { "docid": "bcff0b3207f4a1f287474fa3ebeea447", "score": "0.70005953", "text": "def clean_text(text):\n # Replace newlines by space. We want only one doc vector.\n text = text.replace('\\n', ' ').lower()\n # Remove URLs\n #text = re.sub(r\"http\\S+\", \"\", text)\n # Expand contractions: you're to you are and so on.\n text = contractions.fix(text)\n # Remove stop words\n #text = preprocessing.remove_stopwords(text)\n \n #text = preprocessing.strip_tags(text)\n # Remove punctuation -- all special characters\n text = preprocessing.strip_multiple_whitespaces(preprocessing.strip_punctuation(text))\n return text", "title": "" }, { "docid": "2da14da586b935e9a7da3e19d0a684ac", "score": "0.6929299", "text": "def clean_text(text):\n # Replace newlines by space. We want only one doc vector.\n text = text.replace('\\n', ' ').lower()\n # Remove URLs\n text = re.sub(r\"http\\S+\", \"\", text)\n # Expand contractions: you're to you are and so on.\n text = contractions.fix(text)\n # Remove stop words\n text = preprocessing.remove_stopwords(text)\n # Remove punctuation -- all special characters\n text = preprocessing.strip_multiple_whitespaces(preprocessing.strip_non_alphanum(text))\n return text", "title": "" }, { "docid": "3dc1580629da1f55178692a2c74494d2", "score": "0.6823169", "text": "def clean_text(text): #cleantext\n replace_char = [\n \"[\",\n \"]\",\n \"u'\",\n \"None\",\n \"Thank you\",\n \"-\",\n \"(\",\n \")\",\n \"#\",\n \"Done\",\n \">\",\n \"<\",\n \"-\",\n \"|\",\n \"/\",\n \"\\\"\",\n \"Hint\",\n \"\\n\",\n \"'\"]\n for l in replace_char:\n text = text.replace(l, \"\")\n text = re.sub(' +', ' ', text)\n return text", "title": "" }, { "docid": "a73ad21d266a49d35abc6fe7f189fdf3", "score": "0.6713591", "text": "def _clean_text(self, text):\n text = re.sub('[-=+,#/\\?:^$.@*\\\"※~&%ㆍ!』\\\\‘|\\(\\)\\[\\]\\<\\>`…》]', '', text)\n return text", "title": "" }, { "docid": "33cad90681ef72942de02b7463b0bde4", "score": "0.6710349", "text": "def clean_text(text):\n # try:\n # text = text.encode(\"ascii\", errors=\"ignore\").decode()\n # except:\n # print(\"error in parsing text - \"+ text)\n # pass\n\n text = re.sub(r'\\([^)]*\\)', '', text)\n text = re.sub(r' +', ' ', text)\n\n text = remove_titles(text)\n return text", "title": "" }, { "docid": "439d9bbdc1cdff15c6b24bd045f96de1", "score": "0.67070097", "text": "def clean_text(text):\n\n text = re.sub('<[^>]+>', '', text)\n text = re.sub(r'[^A-Za-z0-9\\s]+', '', text)\n return text", "title": "" }, { "docid": "8cd7847dced4e22c557ff2b18816f239", "score": "0.66308665", "text": "def clean_text(self):\n cleanedtext = re.sub(r'\\n\\s*\\n', '', self.text) # extra lines\n cleanedtext = re.sub(r'[^\\w\\s]', '', cleanedtext) # punctuation\n # for numbers\n # cleanedtext = re.sub(r'[^A-Za-z0-9(),!?@\\'\\`\\\"\\_\\n]', ' ', cleanedtext)\n # cleanedtext = re.sub(r'\\w*\\d\\w*','',cleanedtext) # words with number plust letters\n # remove_digits = str.maketrans('', '', digits)\n # cleanedtext = cleanedtext.translate(remove_digits)\n cleanedtext = re.sub(' +', ' ', cleanedtext) # extra white spaces\n cleanedtext = cleanedtext.lower()\n self.cleanedtext = cleanedtext", "title": "" }, { "docid": "be79e585f11ff26f695f9bd4c24a03cc", "score": "0.6597903", "text": "def clean_text(text: str) -> str:\n fixed_text = re.sub('<[^>]+>', ' ', text) # removing everything inside <>\n fixed_text = re.sub('\\n', ' ', fixed_text)\n fixed_text = re.sub('&nbsp;', ' ', fixed_text)\n fixed_text = re.sub('nbsp', ' ', fixed_text)\n fixed_text = re.sub('&mdash;', ' ', fixed_text)\n fixed_text = re.sub('\\t', ' ', fixed_text)\n fixed_text = re.sub('\\r', ' ', fixed_text)\n fixed_text = re.sub('\"&hellip;', ' ', fixed_text)\n fixed_text = re.sub('&gt;', ' ', fixed_text)\n fixed_text = re.sub(r'[^\\w\\s]', ' ', fixed_text) # removing the punctuation\n fixed_text = re.sub(' +', ' ', fixed_text) # fix multiple spaces\n fixed_text = fixed_text.strip(' ')\n return fixed_text", "title": "" }, { "docid": "de370f2a8e6ba8efac5bacfa16e14e20", "score": "0.6591571", "text": "def textCleaning(self,text):\n newText = text.replace('\\n',' ').replace('\\r',' ').replace('\\t',' ').replace('<p>',' ').replace('<br>',' ').replace('</p>',' ').replace('</br>',' ').strip()\n return newText", "title": "" }, { "docid": "46dc40ef3429faddc40feea26c90edce", "score": "0.6588975", "text": "def cleanText(text):\n # clean: xml tags and mathtype -> spaces\n cleanText = replaceWithSpaces(xmlTagsRe, text)\n cleanText = replaceWithSpaces(mathTypeRe, text)\n # clean: non-letters -> spaces\n cleanText = nonLetterRe.sub(\" \", cleanText)\n cleanText = digitRe.sub(\" \", cleanText)\n return cleanText", "title": "" }, { "docid": "cf7970fb79abc26376642f89ef03a92b", "score": "0.65831715", "text": "def cleanup_text(text):\n # Remove empty paragraphs\n text = re.sub(r'<p[^>]*?>(\\s|<br>|<br/>|<br />|\\xc2\\xa0)*?</p>', '', text,\n flags=re.UNICODE)\n return text", "title": "" }, { "docid": "6bee9bd22cb054c1e24b2fb6ca4026d5", "score": "0.65464205", "text": "def clean_text(text):\r\n text = text.lower().strip() # lowercase and remove trailing whitespaces\r\n text = re.sub(r\"i'm\", \"i am\", text)\r\n text = re.sub(r\"he's\", \"he is\", text)\r\n text = re.sub(r\"she's\", \"she is\", text)\r\n text = re.sub(r\"it's\", \"it is\", text)\r\n text = re.sub(r\"that's\", \"that is\", text)\r\n text = re.sub(r\"what's\", \"what is\", text)\r\n text = re.sub(r\"where's\", \"where is\", text)\r\n text = re.sub(r\"there's\", \"there is\", text)\r\n text = re.sub(r\"how's\", \"how is\", text)\r\n text = re.sub(r\"\\'ll\", \" will\", text)\r\n text = re.sub(r\"\\'ve\", \" have\", text)\r\n text = re.sub(r\"\\'re\", \" are\", text)\r\n text = re.sub(r\"\\'d\", \" would\", text)\r\n text = re.sub(r\"\\'re\", \" are\", text)\r\n text = re.sub(r\"won't\", \"will not\", text)\r\n text = re.sub(r\"can't\", \"cannot\", text)\r\n text = re.sub(r\"n't\", \" not\", text)\r\n text = re.sub(r\"n'\", \"ng\", text)\r\n text = re.sub(r\"'bout\", \"about\", text)\r\n text = re.sub(r\"'til\", \"until\", text)\r\n text = re.sub(r'[\" \"]+', \" \", text) # remove extra spaces in between\r\n text = re.sub(r\"[-()\\\"#/@;:<>{}`+=~|.!?,]\", \"\", text)\r\n text = '<start> ' + text + ' <end>'\r\n return text", "title": "" }, { "docid": "04edfda390fef2b3f014fc3061a5fceb", "score": "0.6516315", "text": "def _remove_any_numbers_and_non_english_characters_from_text(pos_tagged_text):\n for i in xrange(len(pos_tagged_text)):\n word = pos_tagged_text[i][0]\n # Remove numbers\n for digit in string.digits:\n word = word.replace(digit, '')\n # Remove words with non-Latin characters\n try:\n word.decode('ascii')\n except UnicodeDecodeError:\n word = ''\n # Write the preprocessed word back\n pos_tagged_text[i][0] = word\n return pos_tagged_text", "title": "" }, { "docid": "67bf39279b32d413e779e3dad888c4c9", "score": "0.65065104", "text": "def clean_article_text(text):\r\n text = remove_piece_of_text(text, '<a', '>')\r\n text = text.replace('</a>', '')\r\n return text", "title": "" }, { "docid": "616fcc6dd08b5608264b3d94abfe134c", "score": "0.6477654", "text": "def clean_up_text(text):\n # TODO add special characters\n text = re.sub('\\( \\(([^\\)]+)\\)\\)', ' ', text)\n text = re.sub('([a-z])([.!?])([A-Z])', r'\\1\\2 \\3', text)\n text = re.sub('[\\s]+', ' ', text)\n\n return text", "title": "" }, { "docid": "283fc4c63a23b691da69908c290cb32e", "score": "0.6468758", "text": "def cleantext(text):\n # Convert any non-ASCII characters to XML like '&#40960;abcd&#1972;'\n text = text.encode(\"ascii\", \"xmlcharrefreplace\").decode(\"ascii\")\n lines = [x.strip() for x in text.splitlines()]\n text = \" \".join(lines)\n for match, out in REPLACES:\n text = re.sub(match, out, text)\n text = text.strip() # some spaces may have been generated\n\n return text", "title": "" }, { "docid": "6e0e7484b5ba593baa7ca9e6f233b94e", "score": "0.64462006", "text": "def cleanText(self):\n text = self.text().strip()\n if self.__prefix and text.startswith(self.__prefix):\n text = text[len(self.__prefix):]\n if self.__suffix and text.endswith(self.__suffix):\n text = text[:-len(self.__suffix)]\n return text.strip()", "title": "" }, { "docid": "6e0e7484b5ba593baa7ca9e6f233b94e", "score": "0.64462006", "text": "def cleanText(self):\n text = self.text().strip()\n if self.__prefix and text.startswith(self.__prefix):\n text = text[len(self.__prefix):]\n if self.__suffix and text.endswith(self.__suffix):\n text = text[:-len(self.__suffix)]\n return text.strip()", "title": "" }, { "docid": "6476d255266d39d7491fea86957199fc", "score": "0.6420874", "text": "def clean_text(text):\n \n # remove Unicode characters\n text = re.sub(r'[^\\x00-\\x7F]+', '', text)\n\n # taking sequences of characters with alphanumeric characters separated by other characters\n text = re.sub(r\"[-?!&]\",' ',text)\n text = re.sub(r'''[\"#$%()*+,./:;<=>@[\\]^_`{|}~]''','',text)\n \n # remove all numeric charracters\n text = re.sub(r'[0-9]','',text)\n\n # Remove new line characters\n text = re.sub(r'\\s+', ' ', text)\n\n # remove all numeric charracters\n text = re.sub(r'^https?:\\/\\/.*[\\r\\n]*', '',text)\n\n Apos_dict={\"'s\":\" is\",\"n't\":\" not\",\"'m\":\" am\",\"'ll\":\" will\",\n \"'d\":\" would\",\"'ve\":\" have\",\"'re\":\" are\"}\n \n #replace the contractions\n for key,value in Apos_dict.items():\n if key in text:\n text=text.replace(key,value)\n\n # convert to lowercase to maintain consistency\n text = text.lower()\n \n return text", "title": "" }, { "docid": "0f49d845d0d6f6ff334cfea8b6766cbf", "score": "0.63663334", "text": "def clean_text(text: str):\n position = text.lower().rfind(\"references\")\n text = text[:position] # removing bibliography\n text = text.replace(\"\\n\", \" \") # removing newlines\n text = re.sub(r\"\\[[^\\[^\\]]*\\]\", \"\", text) # removing references\n return text", "title": "" }, { "docid": "9e29448fa18d208e71d63ed06e30ce1b", "score": "0.63571817", "text": "def _remove_html_tags(self, text):\r\n return re.compile(r'<[^>]+>').sub('', text)", "title": "" }, { "docid": "bc6604b9eac2108048a41f4543e4766b", "score": "0.6338481", "text": "def strip_markup(wikitext):\n if not wikitext:\n return ''\n\n # remove templates\n text = remove_templates(wikitext)\n\n # remove irrelevant spans\n text = re_comments.sub('', text)\n text = re_ignored_html_tags.sub(r'\\2', text)\n text = re_self_closing_html_tags.sub('', text)\n text = re_dropped_elements.sub('', text)\n text = re_categories.sub('', text)\n text = re_files_images.sub('', text) # TODO: keep file/image captions?\n\n # replace external links with just labels or just URLs\n text = replace_external_links(text)\n\n # drop magic words behavioral switches\n text = re_magic_words.sub('', text)\n\n # replace internal links with just their labels\n text = replace_internal_links(text)\n # text = replace_internal_links(text) # TODO: is this needed?\n\n # remove table markup\n text = text.replace('||', '\\n|').replace('!!', '\\n!') # put each cell on a separate line\n text = re_table_formatting.sub('\\n', text) # remove formatting lines\n text = re_table_cell_formatting.sub('\\n\\\\3', text) # leave only cell content\n\n # strip out text formatting\n text = re_italic_quote.sub(r'\"\\1\"', text)\n text = re_bold_italic.sub(r'\\1', text)\n text = re_quote_quote.sub(r'\"\\1\"', text)\n\n # unescape html entities\n text = ftfy.fixes.unescape_html(text)\n\n # final cleanup\n text = re_headings.sub(r'\\n\\n\\2\\n\\n', text)\n text = re_dots.sub('...', text)\n text = re_brackets.sub(r'', text)\n text = text.replace('[[', '').replace(']]', '')\n text = text.replace('<<', '«').replace('>>', '»')\n text = re_random_cruft.sub(r'\\1', text)\n text = re.sub(r'\\n\\W+?\\n', r'\\n', text, flags=re.UNICODE)\n text = text.replace(',,', ',').replace(',.', '.')\n text = re_spaces.sub(' ', text)\n text = re_linebreaks.sub(r'\\n\\n', text)\n\n return text.strip()", "title": "" }, { "docid": "dd990b81f16b5ea23281fcb150630477", "score": "0.63193804", "text": "def cleanse_sentence(text):\n \n text = re.sub(hash_regex, hash_repl, text)\n text = re.sub(hndl_regex, hndl_repl, text)\n text = re.sub(url_regex, ' __URL ', text)\n \n for (repl, regx) in emoticons_regex :\n text = re.sub(regx, ' '+repl+' ', text)\n\n text = text.replace('\\'','')\n text = re.sub( word_bound_regex , punctuations_repl, text )\n text = re.sub( rpt_regex, rpt_repl, text )\n \n return text", "title": "" }, { "docid": "43f85bc2347365b9c12ca2580d44a348", "score": "0.6308067", "text": "def clean_text(text):\n text = re.sub('[-=+,#/\\?:^$.@*\\\"※~&%ㆍ!』\\\\‘|\\(\\)\\[\\]\\<\\>`…》]', '', text)\n return text", "title": "" }, { "docid": "5c7d913e073cac757804fda010b6c80c", "score": "0.6296954", "text": "def clean_text_1(text, stpwds, valid_tags, stemming=False):\n\tcleaned_text = clean_basic(text)\n\ttokens = cleaned_text.split()\n\ttokens = postagging(tokens, valid_tags)\n\ttokens = remove_stopwords(tokens, stpwds)\n\ttokens = remove_short_tokens(tokens)\n\tif stemming:\n\t\ttokens = stem(tokens)\n\tcleaned_text = remove_hyphen(' '.join(tokens))\n\treturn cleaned_text", "title": "" }, { "docid": "9247a1dbe906adbe476252f075860812", "score": "0.6290065", "text": "def clean_text(text):\n # remove html tags, lowercase, remove nonsense, remove non-letter\n aff = BeautifulSoup(text, \"lxml\").text \n aff = aff.lower()\n aff = re.sub(r'xa0|#n#‡#n#|#tab#|#r#|\\[|\\]', \"\", aff)\n aff = re.sub(r'[^a-z]+', ' ', aff)\n return aff", "title": "" }, { "docid": "4549a815eef93b30dc8e49e520ea9d4c", "score": "0.6278033", "text": "def demarkup_text(text):\n\n # keep the opening tag name of certain tags that contains these strings\n # note: <s> are from debian copyright files\n kept_tags = (\n 'lic', 'copy', 'www', 'http', 'auth', 'contr', 'leg', 'inc', '@', \n '<s>', '</s>', '169', 'a9'\n )\n\n # find start and closing tags or the first white space whichever comes first\n # or entities\n # this regex is such that ' '.join(tags.split(a))==a\n\n tags_ents = re.compile(r'(</?[^\\s></]+(?:>|\\s)?|&[^\\s&]+;|href|[\\'\"]?\\/\\>)', re.IGNORECASE).split\n\n cleaned = []\n for token in tags_ents(text):\n if token.lower().startswith(('<', '&', 'href')) and not any(k in token.lower() for k in kept_tags):\n continue\n else:\n cleaned.append(token)\n return u' '.join(cleaned)", "title": "" }, { "docid": "47cfd092920700f1c2f23d0fba4d528b", "score": "0.6265034", "text": "def remove_tashkeel(self, input_text):\n\n p_tashkeel = re.compile(r'[\\u0617-\\u061A\\u064B-\\u0652]')\n return re.sub(p_tashkeel,\"\", input_text)", "title": "" }, { "docid": "3afa4198b471da03ad24ac8551e73fa3", "score": "0.62647754", "text": "def clean(self):\n # Replace double \\n's in case this ever happens on windows for some reason.\n if self.text.__contains__('\\n\\n'):\n self.text = self.text.replace('\\n\\n', '\\n')", "title": "" }, { "docid": "b33f5fa7f977b65ed4d9a97b616ecbba", "score": "0.6247174", "text": "def preprocess(self):\n self.text = remove_control_characters(self.text)", "title": "" }, { "docid": "3503132af39ea4b05ddf42ac5ed12ffb", "score": "0.6229983", "text": "def strip_markup(text):\n return (text\n .replace(\"<strong class=\\\"label\\\">\", \"\")\n .replace(\"</strong>\", \"\")\n .replace(\"<span class=\\\"hotel-type\\\">\", \"\")\n .replace(\"</span>\", \"\")\n .replace(\"<strong>\", \"\")\n )", "title": "" }, { "docid": "f27009c1809229c97e68cb97aa67e615", "score": "0.62280345", "text": "def _del_text(self):\r\n # check whether element already has text node\r\n if self._root.HasChildNodes and \\\r\n self._root.FirstChild and \\\r\n isinstance(self._root.FirstChild, XmlText):\r\n self._root.RemoveChild(self._root.FirstChild)", "title": "" }, { "docid": "16f35e7f296801a447055b16f013b54b", "score": "0.6215954", "text": "def clean_text(text):\n # lowercase text\n text = text.lower()\n\n # substitute contractions\n # text = re.sub(r\"i'm\", \"i am\", text)\n # text = re.sub(r\"he's\", \"he is\", text)\n # text = re.sub(r\"she's\", \"she is\", text)\n # text = re.sub(r\"it's\", \"it is\", text)\n # text = re.sub(r\"that's\", \"that is\", text)\n # text = re.sub(r\"what's\", \"what is\", text)\n # text = re.sub(r\"where's\", \"where is\", text)\n # text = re.sub(r\"how's\", \"how is\", text)\n # text = re.sub(r\"\\'ll\", \" will\", text)\n # text = re.sub(r\"\\'ve\", \" have\", text)\n # text = re.sub(r\"\\'re\", \" are\", text)\n # text = re.sub(r\"\\'d\", \" would\", text)\n # text = re.sub(r\"\\'re\", \" are\", text)\n # text = re.sub(r\"won't\", \"will not\", text)\n # text = re.sub(r\"can't\", \"cannot\", text)\n # text = re.sub(r\"n't\", \" not\", text)\n # text = re.sub(r\"n'\", \"ng\", text)\n # text = re.sub(r\"'bout\", \"about\", text)\n # text = re.sub(r\"'til\", \"until\", text)\n\n # get rid of username handles\n text = re.sub(r\"(?:@)(\\S+|$)|\", \"\", text)\n\n # get rid of special characters\n text = re.sub(r\"[-()\\\"#/@;:<>{}`+=~|.!?,]\", \"\", text)\n\n return text", "title": "" }, { "docid": "88c982c1d7c70e2702c8b9fafed7d4d5", "score": "0.6204147", "text": "def strip_tags(text):\n\n return re.sub('<[^<]+?>', '', text)", "title": "" }, { "docid": "0fd6daf8f52ba6aeedce909badebd364", "score": "0.6194485", "text": "def clean_text(raw_text):\n # Must strip HTML tags out first!\n text = re.sub('<[^<]+?>', ' ', raw_text)\n text = handle_unicode(text)\n text = handle_html_unquote(text)\n text = handle_mac_quotes(text)\n text = handle_text_break_dash(text)\n text = text.lower()\n\n regex_subs = ['\\t', '\\n', '\\r', '\\s+', '&']\n for regex_sub in regex_subs:\n text = re.sub(regex_sub, ' ', text)\n return text", "title": "" }, { "docid": "14b2ba621e89303b47c25fd967a686e4", "score": "0.6182298", "text": "def _clean_text(source):\n source = str(source)\n source = source.upper()\n source = re.sub('[?|?|!|!|,|,|.|。|#|《|》|<|>|(|)|(|)|〉|、|/|…|-]', '', source)\n source = re.sub('[哈|嘿|呵]', '', source)\n source = re.sub('NAN', '', source)\n source = re.sub('\\xa0', '', source)\n return source", "title": "" }, { "docid": "30006085bad7c937637e584674a6cc02", "score": "0.6176192", "text": "def _clean_bad_chars(self, text):\n return text.replace(\"\\u200b\", \"\")", "title": "" }, { "docid": "1c3428df1fe332453e101d924bcc714b", "score": "0.6171018", "text": "def text2cleanish(text):\n logger.info(\"Cleaning text ...\")\n text = CLEANER1.sub(r\"\\1 \\2 \\3\", text)\n text = CLEANER2.sub(r\"\\1 ... \\3\", text)\n text = CLEANER3.sub(r\"\\1 ... \", text)\n # text = CLEANER4.sub(r\"\\1 X... \\3\", text)\n return text", "title": "" }, { "docid": "928336714d22728e7f69363c3345b4c8", "score": "0.6169784", "text": "def clean(self, text = None) -> str:\n return self.normalize(\n self.clean_double_whitespaces(\n self.clean_edge_punctuation(\n self.clean_numeric(\n self.clean_web_tags(\n self.clean_html_tags(\n self.clean_non_ascii(\n text\n )\n )\n )\n )\n )\n )\n )", "title": "" }, { "docid": "cff7fa0a0cef7e0a4aa8555fc2fa3982", "score": "0.61514693", "text": "def clean_text_remove_puncs(cls, text):\n if isinstance(text, str):\n try:\n text = cls.clean_text(text)\n text = cls._remove_punctuations(text)\n\n except (TypeError, AttributeError):\n logger.warning(\"Error occured in preprocessing text: {}\".format(text))\n sys.exit(0)\n\n return text.strip()\n\n return \"\"", "title": "" }, { "docid": "6400609a86643ee6fd2cb125e5b5aae5", "score": "0.61486775", "text": "def safe_clean_text(text):\n # strip\n text = text.strip()\n\n # weird words\n text = delete_words(delete_list, text)\n\n # handle \\\\\\'t \\\\\\'ve\n text = text.replace(r\"\\\\\", \"\")\n\n # remove extra spaces\n text = re.sub(r'\\s+', ' ', text)\n\n # fix puncutations\n for p in punctuations:\n text = text.replace(p, p[1:])\n text = re.sub(\" (\\u2018|\\u2019|') \", r\"\\1\", text)\n\n # replace to EN dash\n text = re.sub(u\"\\u2014\", \"-\", text)\n text = re.sub(u\"\\u2013\", \"-\", text)\n\n return text", "title": "" }, { "docid": "6857e424290706ed34fd0100320e8694", "score": "0.61465544", "text": "def remove_tags(text):\n clean = re.compile('<.*?>')\n return re.sub(clean, '', text)", "title": "" }, { "docid": "5086afb5e199b8a13a21c4f58efbca2f", "score": "0.61259377", "text": "def remove_smart_quotes(self, text):\r\n return text.replace(u\"\\u201c\",\"\").replace(u\"\\u201d\", \"\")", "title": "" }, { "docid": "49b44d02ed0cc749e2cc7f1d32af1451", "score": "0.6104445", "text": "def clean_txt(self, text):\n\n text = re.sub(\"'\", \"\", text)\n text = re.sub(\"(\\\\W)+\", \" \", text)\n return text", "title": "" }, { "docid": "faaffdfe0444eca15482157446d68b10", "score": "0.6088906", "text": "def clean_text(text):\n translation_dict = {\n '’':\"'\",\n '‘':\"'\",\n # \"œ\":'oe',\n }\n # remove control characters not supported by XML.\n text = strip_chars(text)\n text = unicodedata.normalize('NFC', text.strip())\n text = re.sub(r'(\\n\\n )+', ' ', text)\n text = re.sub(r'([a-zA-Zàâçéèêëîïôûùüÿñæœ,;-])(\\n)+([^\\s])', r'\\1 \\3', text)\n return text.translate(str.maketrans(translation_dict))", "title": "" }, { "docid": "ab72be9d87c9f54053505d1b1eaf6b2f", "score": "0.6088736", "text": "def clearMyText(self):\n self.clearText(self.textName)\n self.clearText(self.textCR)\n self.clearText(self.textAL)\n self.clearText(self.textEC)\n self.clearText(self.textIA)", "title": "" }, { "docid": "b81ac2e011b854386493cc068a9e46dd", "score": "0.60839474", "text": "def clear_texts(self):\n\n for t in self.texts[:]:\n if not t[4]:\n self.texts.remove(t)", "title": "" }, { "docid": "b3eb01e6647165fa5f3f6cc66a802663", "score": "0.6076601", "text": "def cleanText(self, s):\n\n\t\tdef _sent_to_lines(sent):\n\t\t\t\"\"\"Converts a string of text to one sentence each line.\"\"\"\n\t\t\tsent = re.sub(r\"\\n\", \" \", sent)\n\t\t\treturn re.sub(r\"(\\.|\\!|\\?|--)+\\s*\", \"\\n\", sent)\n\n\t\tdef _strip_punct(sent):\n\t\t\t\"\"\"Strips away all punctuations from the text.\"\"\"\n\t\t\treturn sent.translate(string.maketrans(\"\",\"\"), string.punctuation)\n\n\t\treturn _strip_punct(_sent_to_lines(s)).lower()", "title": "" }, { "docid": "e1727f2673de7c749d5ef2a767231eba", "score": "0.60653853", "text": "def clean_message(self, text):\n text = strip_tags(text)\n return text", "title": "" }, { "docid": "a8d8abba63fc621d1b31a7dad4026d4e", "score": "0.60498005", "text": "def __remove_tags(self, html_text):\n tags_regex = re.compile(\n '<.*?>|&([a-z0-9]+|#[0-9]{1,6}|#x[0-9a-f]{1,6});')\n tag_free_text = re.sub(tags_regex, ' ', html_text)\n\n return tag_free_text", "title": "" }, { "docid": "9ba3be36a740be33aa131f168e63ba4e", "score": "0.6047604", "text": "def clean_text(text):\n if text:\n text = html2text.html2text(clean_markdown(text))\n return re.sub(r'\\s+', ' ', text).strip()", "title": "" }, { "docid": "9a1fbdfc06773a8ebff8e45493c51bd1", "score": "0.603565", "text": "def remove_html_tags(text):\r\n clean = re.compile('\\{<.*?>\\}')\r\n return re.sub(clean, '', text)", "title": "" }, { "docid": "4c1f72d624a2444e1814a702ea0cf7e8", "score": "0.6031818", "text": "def _clean_text(self, text):\n output = []\n for char in text:\n cp = ord(char)\n if cp == 0 or cp == 0xfffd or _is_control(char):\n continue\n if _is_whitespace(char):\n output.append(\" \")\n else:\n output.append(char)\n return \"\".join(output)", "title": "" }, { "docid": "4c1f72d624a2444e1814a702ea0cf7e8", "score": "0.6031818", "text": "def _clean_text(self, text):\n output = []\n for char in text:\n cp = ord(char)\n if cp == 0 or cp == 0xfffd or _is_control(char):\n continue\n if _is_whitespace(char):\n output.append(\" \")\n else:\n output.append(char)\n return \"\".join(output)", "title": "" }, { "docid": "e4682d3a218fa9f846931042e24d0607", "score": "0.60222965", "text": "def _clean_text(self, text):\n output = []\n for char in text:\n cp = ord(char)\n if cp == 0 or cp == 0xfffd or self._is_control(char):\n continue\n if self._is_whitespace(char):\n output.append(\" \")\n else:\n output.append(char)\n return \"\".join(output)", "title": "" }, { "docid": "e4f3d2d2e47513b04b3d7b686b294cdd", "score": "0.6021114", "text": "def _clean_text(self, listing_text):\n if len(listing_text)>0:\n listing_text_clean = ' '.join(listing_text[0].findAll(text=True))\n listing_text_clean = re.sub('\\n|\\t', ' ', listing_text_clean)\n listing_text_clean = re.sub('\\s+', ' ', listing_text_clean)\n listing_text_clean = re.sub(\"'\", '', listing_text_clean)\n listing_text_clean = re.sub('\"', '', listing_text_clean)\n else:\n listing_text_clean = ' ' \n return listing_text_clean", "title": "" }, { "docid": "df88aa6338c651f311f8313abd30f022", "score": "0.6015763", "text": "def clean(text, tokenizer, stemmer):\n doc = ''.join(text).lower()\n doc = re.sub(r'[<>\\{}/;|\\[\\]-]', ' ', doc)\n doc = re.sub(r'[0-9]', ' ', doc)\n doc = re.sub(r'\\'', ' ', doc)\n doc = re.sub(r'=', ' ', doc)\n doc = re.sub(r':', ' ', doc)\n doc = re.sub(r'\"', ' ', doc)\n doc = re.sub(r'\\s+', ' ', doc)\n doc = re.sub(r'\\(', ' ', doc)\n doc = re.sub(r'\\)', ' ', doc)\n doc = re.sub(r'\\s{2,}', ' ', doc)\n doc = re.sub(r'\\.', '', doc)\n doc = re.sub(r',', '', doc)\n\n return doc", "title": "" }, { "docid": "6d531dc571d409ba0f71d86b949d095b", "score": "0.6010883", "text": "def _clean_text(self, text):\n output = []\n for char in text:\n cp = ord(char)\n if cp == 0 or cp == 0xfffd or _is_control(char):\n continue\n if _is_whitespace(char):\n output.append(\" \")\n else:\n output.append(char)\n return \"\".join(output)", "title": "" }, { "docid": "2c801ea1e26a1508d732d0d546871740", "score": "0.60044676", "text": "def ocr_cleaner(text):\n\n text = text.lower().replace('\\n', ' ').replace('\\r', ' ').replace('\\t', ' ').strip() # replace all whitespace, lower\n text = ' '.join(text.split()) # remove consecutive spaces\n text = re.sub(r'(?<=[a-z]{3})[.]', '.\\n', text)\n text = text.encode('ascii', errors='ignore').decode(encoding='utf8').replace(r'\\\\', '').replace('\"', '')\n text = re.sub(r'(?<=nae)', '\\n', text)\n text = re.sub(r'(?<=dae)', '\\n', text)\n text = text.replace('life histories of cascadia butterflies', '').replace('\\\\', '').replace('|', '')\n text = text.replace('adult biology', '').replace('immature stage biology', '')\n text = text.replace('description of immature stages', '').replace('discussion', '')\n text = re.sub(r'([0-9]){3}', '', text)\n text = re.sub(r'([i])\\b', '', text)\n text = re.sub(r'(family|subfamily).*', '', text)\n text = text.replace('-', ' ').replace('tlie', 'the').replace('ihis', 'this').replace('ditilicult', 'difficult')\n text = re.sub(r'^\\s*$', '', text)\n text = ' '.join(text.split()) # remove consecutive spaces\n text = re.sub(r'(?<=[a-z]{4})[.]', '.\\n', text)\n text = text.strip()\n text = re.sub(r'[.,();\\']', '', text)\n\n return text", "title": "" }, { "docid": "9bbe6034a0e46963cb33decf1d35dfa4", "score": "0.59949124", "text": "def remove_punc(data):\n \n #All text to lower case\n data = data.lower()\n #Punctuation subbed out for ''\n data = punc.sub( '', data)\n \n return data", "title": "" }, { "docid": "cc5773a680ca43dad9ee13f3609035f2", "score": "0.5977697", "text": "def _remove_all_other_punctuation(pos_tagged_text):\n for j in xrange(len(pos_tagged_text)):\n word = pos_tagged_text[j][0]\n for symbol in (',', '.', ';', '-'):\n word = word.replace(symbol, ' ')\n # Write the preprocessed word back\n pos_tagged_text[j][0] = word\n return pos_tagged_text", "title": "" }, { "docid": "64bca5555bdd1a051e9978c44b240c42", "score": "0.597209", "text": "def remove_html(text):\n return html.unescape(re.sub(doubleSpacingRe, ' ', re.sub(nbspRe, ' ', text)))", "title": "" }, { "docid": "f8f082dba8399241d49d19b16ec80984", "score": "0.5968257", "text": "def get_cleaned_text(self, text):\n cleaned_text = text.replace('\\\"','').replace('\\'','').replace('-',' ')\n cleaned_text = self.remove_non_ascii_chars(cleaned_text)\n \n # retweet\n if re.match(r'RT @[_A-Za-z0-9]+:',cleaned_text): # retweet\n if self.remove_retweets: return ''\n retweet_info = cleaned_text[:cleaned_text.index(':')+2] # 'RT @name: ' will be again added in the text after cleaning\n cleaned_text = cleaned_text[cleaned_text.index(':')+2:]\n else:\n retweet_info = ''\n cleaned_text = self.remove_hyperlinks(cleaned_text)\n cleaned_text = cleaned_text.replace('#','HASHTAGSYMBOL').replace('@','ATSYMBOL') # to avoid being removed while removing punctuations\n \n tokens = [w.translate(self.punc_table) for w in word_tokenize(cleaned_text)] # remove punctuations and tokenize\n tokens = [w for w in tokens if not w.lower() in self.stop_words and len(w)>1] # remove stopwords and single length words\n cleaned_text = ' '.join(tokens)\n \n cleaned_text = cleaned_text.replace('HASHTAGSYMBOL','#').replace('ATSYMBOL','@')\n cleaned_text = retweet_info + cleaned_text\n \n return cleaned_text", "title": "" }, { "docid": "5e0c7d2d36d3f9e03b06bb850b936efa", "score": "0.5966438", "text": "def remove_char(stringedText):\n\ttext = stringedText\n\t#cleanedText = '' # need a place to store the chars that actually belong\n\t\n\t# new method that skips all the if statement branches\n\t# 1. Put all the special chars in a single list\n\t# 2. Iterate through the list when analyzing each char\n\t# 3. recurs as before\n\n\t# if it's Alice.txt, then use the full list. otherwise use shorter list\n\tif sys.argv[2] == '-2':\n\t\tlistOfSpecialChars = [',', '.', '?', '!', '`', '<del>', '<esc>', '<cntrl>', '<tab>', '<cmd>', '<right>', '<left>', '<down>', '<up>', '<shift>']\n\telse:\n\t\tlistOfSpecialChars = ['<del>', '<esc>', '<cntrl>', '<tab>', '<cmd>', '<right>', '<left>', '<down>', '<up>', '<shift>']\n\n\n\tfor item in listOfSpecialChars:\n\t\tif item in text:\n\t\t\ttext = text.replace(item, '')\n\t\t\tremove_char(text) # recursively call until item no longer in text, then fail condition\n\treturn text", "title": "" }, { "docid": "6ce6cdf443d7400675c4686b8fff22d3", "score": "0.59407234", "text": "def remove_html(self, text):\n soup = BeautifulSoup(text, 'html.parser')\n return soup.get_text(separator=\" \")", "title": "" }, { "docid": "6fadf8ad9b6688d84f136701aeff7897", "score": "0.59321564", "text": "def clean_text(doc):\n \n words = re.sub(\"< ?/?[a-z]+ ?>|\\n\", \"\", doc)\n words = tokenize_text(words)\n words = lemmatize_text(words)\n words = remove_stopwords(words)\n doc = [w for w in words if w.isalnum()]\n doc = ' '.join(doc)\n \n return doc", "title": "" }, { "docid": "83730e0c1709a337afd4043992bf3a87", "score": "0.5919678", "text": "def preprocess(self, text):\r\n return text", "title": "" }, { "docid": "895767f9567130d8209cec47ed740cd6", "score": "0.5917767", "text": "def basic_cleaners(self, text):\n text = self.lowercase(text)\n text = self.collapse_whitespace(text)\n return text", "title": "" }, { "docid": "acefa458000e3f328bffb60228142df1", "score": "0.5913148", "text": "def clear_text(self):\n self.ax.texts.clear()", "title": "" }, { "docid": "15b60b67b4c52b195697d002d8f248cc", "score": "0.59017366", "text": "def _do_smart_punctuation(self, text):\r\n if \"'\" in text: # guard for perf\r\n text = self._do_smart_contractions(text)\r\n text = self._opening_single_quote_re.sub(\"&#8216;\", text)\r\n text = self._closing_single_quote_re.sub(\"&#8217;\", text)\r\n\r\n if '\"' in text: # guard for perf\r\n text = self._opening_double_quote_re.sub(\"&#8220;\", text)\r\n text = self._closing_double_quote_re.sub(\"&#8221;\", text)\r\n\r\n text = text.replace(\"---\", \"&#8212;\")\r\n text = text.replace(\"--\", \"&#8211;\")\r\n text = text.replace(\"...\", \"&#8230;\")\r\n text = text.replace(\" . . . \", \"&#8230;\")\r\n text = text.replace(\". . .\", \"&#8230;\")\r\n return text", "title": "" }, { "docid": "d69ee53fb8109b8eac653e744ce6cf16", "score": "0.5900395", "text": "def preprocess_text(data):\n if 'Body' in data:\n data['Body'] = sanitise_xml(data['Body'])\n \n if 'Tags' in data:\n data['Tags'] = re.findall('\\<([^>]+)', data['Tags'])\n \n return data", "title": "" }, { "docid": "ee91042b95d6239ece6d61938a4c2c0c", "score": "0.589461", "text": "def cleanupText(path):\n \n text_cleaned = ''\n try:\n f = open(path)\n raw = f.read().lower()\n text = raw \n text_cleaned = re.sub(r'[-.?!,\":;()/|0-9]','', text)\n \n# splitword = text_cleaned.split(\" \")\n# \n# for word in splitword:\n# if word in stop_word:\n# splitword.remove(word)\n# text_cleaned = \" \".join(splitword)\n# # print \"\\n Word count after:\" + str(len(text_translated.split())) + \"\\n\"\n# text_cleaned = ' '.join([word for word in text_cleaned.split(' ') if (word and len(word) > 1)])\n finally:\n f.close()\n return text_cleaned", "title": "" }, { "docid": "56e9121c9e15b11560fa8798e6959444", "score": "0.58939534", "text": "def normalize_text(self):\n self.normalized_text = normalize_text(remove_nonwords(self.raw_text))\n return True", "title": "" }, { "docid": "ae4d9363dc24767294beb89a6c113215", "score": "0.5891619", "text": "def delete_text(self):\n self.in_title.delete(0, \"end\")\n self.in_author.delete(0, \"end\")\n self.answer.configure(text=\"\")\n self.answer.configure(text=\"\")", "title": "" }, { "docid": "9777cfd2650717c6e9d64f74f792a8dc", "score": "0.58881503", "text": "def clean_text(s: str) -> str:\n s = unicodedata.normalize(\"NFKD\", s) # simplify unicode of string\n s = html.unescape(s) # convert html entities to unicode characters\n s = re.sub(r\"(</p>|</li>)\", \"\\n\", s) # replace p or li close with newline \n s = re.sub(r\"<li>\", \"• \", s) # replace list item start with bullets\n s = re.sub(r\"<[^<]+?>\", \"\", s) # otherwise strip HTML tags\n return s", "title": "" }, { "docid": "c27e37a99ab23a9aadfc8937ecf3c815", "score": "0.5877997", "text": "def remove_html_tags(text):\n return HTMLTagsStripper.strip_tags_(text)", "title": "" }, { "docid": "d22683fdf5d6c00468b6eac0349e1afe", "score": "0.587424", "text": "def remove_meta(text):\n text = re.sub(r'(\\[.*?\\]+)', '', text) # e.g. [one] or [[ two ]]\n text = re.sub(r'(\\{.*?\\}+)', '', text) # e.g. {one} or {{ two }}\n text = re.sub(r'(<.*?>+)', '', text) # e.g. <one> or <<two>>\n return text", "title": "" }, { "docid": "39cb9610b3068c092810921d8c94fb7b", "score": "0.5866584", "text": "def clean_latex(text):\n # edge case\n text = re.sub(r'\\[math\\]', ' LaTex math ', text)\n text = re.sub(r'\\[\\/math\\]', ' LaTex math ', text)\n text = re.sub(r'\\\\', ' LaTex ', text)\n\n pattern_to_sub = {\n r'\\\\mathrm': ' LaTex math mode ',\n r'\\\\mathbb': ' LaTex math mode ',\n r'\\\\boxed': ' LaTex equation ',\n r'\\\\begin': ' LaTex equation ',\n r'\\\\end': ' LaTex equation ',\n r'\\\\left': ' LaTex equation ',\n r'\\\\right': ' LaTex equation ',\n r'\\\\(over|under)brace': ' LaTex equation ',\n r'\\\\text': ' LaTex equation ',\n r'\\\\vec': ' vector ',\n r'\\\\var': ' variable ',\n r'\\\\theta': ' theta ',\n r'\\\\mu': ' average ',\n r'\\\\min': ' minimum ',\n r'\\\\max': ' maximum ',\n r'\\\\sum': ' + ',\n r'\\\\times': ' * ',\n r'\\\\cdot': ' * ',\n r'\\\\hat': ' ^ ',\n r'\\\\frac': ' / ',\n r'\\\\div': ' / ',\n r'\\\\sin': ' Sine ',\n r'\\\\cos': ' Cosine ',\n r'\\\\tan': ' Tangent ',\n r'\\\\infty': ' infinity ',\n r'\\\\int': ' integer ',\n r'\\\\in': ' in ',\n }\n # post process for look up\n pattern_dict = {k.strip('\\\\'): v for k, v in pattern_to_sub.items()}\n # init re\n patterns = pattern_to_sub.keys()\n pattern_re = re.compile('(%s)' % '|'.join(patterns))\n\n def _replace(match):\n \"\"\"\n reference: https://www.kaggle.com/hengzheng/attention-capsule-why-not-both-lb-0-694 # noqa\n \"\"\"\n try:\n word = pattern_dict.get(match.group(0).strip('\\\\'))\n except KeyError:\n word = match.group(0)\n print('!!Error: Could Not Find Key: {}'.format(word))\n return word\n return pattern_re.sub(_replace, text)", "title": "" }, { "docid": "39cb9610b3068c092810921d8c94fb7b", "score": "0.5866584", "text": "def clean_latex(text):\n # edge case\n text = re.sub(r'\\[math\\]', ' LaTex math ', text)\n text = re.sub(r'\\[\\/math\\]', ' LaTex math ', text)\n text = re.sub(r'\\\\', ' LaTex ', text)\n\n pattern_to_sub = {\n r'\\\\mathrm': ' LaTex math mode ',\n r'\\\\mathbb': ' LaTex math mode ',\n r'\\\\boxed': ' LaTex equation ',\n r'\\\\begin': ' LaTex equation ',\n r'\\\\end': ' LaTex equation ',\n r'\\\\left': ' LaTex equation ',\n r'\\\\right': ' LaTex equation ',\n r'\\\\(over|under)brace': ' LaTex equation ',\n r'\\\\text': ' LaTex equation ',\n r'\\\\vec': ' vector ',\n r'\\\\var': ' variable ',\n r'\\\\theta': ' theta ',\n r'\\\\mu': ' average ',\n r'\\\\min': ' minimum ',\n r'\\\\max': ' maximum ',\n r'\\\\sum': ' + ',\n r'\\\\times': ' * ',\n r'\\\\cdot': ' * ',\n r'\\\\hat': ' ^ ',\n r'\\\\frac': ' / ',\n r'\\\\div': ' / ',\n r'\\\\sin': ' Sine ',\n r'\\\\cos': ' Cosine ',\n r'\\\\tan': ' Tangent ',\n r'\\\\infty': ' infinity ',\n r'\\\\int': ' integer ',\n r'\\\\in': ' in ',\n }\n # post process for look up\n pattern_dict = {k.strip('\\\\'): v for k, v in pattern_to_sub.items()}\n # init re\n patterns = pattern_to_sub.keys()\n pattern_re = re.compile('(%s)' % '|'.join(patterns))\n\n def _replace(match):\n \"\"\"\n reference: https://www.kaggle.com/hengzheng/attention-capsule-why-not-both-lb-0-694 # noqa\n \"\"\"\n try:\n word = pattern_dict.get(match.group(0).strip('\\\\'))\n except KeyError:\n word = match.group(0)\n print('!!Error: Could Not Find Key: {}'.format(word))\n return word\n return pattern_re.sub(_replace, text)", "title": "" }, { "docid": "8c3df43473fa1625aa47e63ad2cfc900", "score": "0.58606964", "text": "def _clear_tag_string(self, tag_string):\n #clean = re.compile(\".?>\")\n clean = re.compile('.?>|\\\"|.?/>|</.+')\n text = re.sub(clean, \"\", tag_string)\n\n return text", "title": "" }, { "docid": "0f836543dc5442e2680791c89d334a8b", "score": "0.58566207", "text": "def remove_html_tags(text):\n tag_free = re.sub(r\"<.*?>\", \" \", text)\n return re.sub(r\" +\", \" \", tag_free).strip() # make it nice", "title": "" }, { "docid": "bb4c313f87e01e348ee6179acdfb5eaa", "score": "0.58543164", "text": "def remove_unicode(text):\n regex = r\"(\\\\u....)\"\n text = re.sub(regex, ' ', text)\n return text", "title": "" }, { "docid": "1d664d3566821921a5d68e2c86a08931", "score": "0.5848827", "text": "def remove_html_tags(text):\r\n import re\r\n clean = re.compile('<.*?>')\r\n return re.sub(clean, '', text)", "title": "" }, { "docid": "ddd0c2368ecf797bcb9dfcd574fc6ff7", "score": "0.584826", "text": "def clean_text(self):\n return self.sanitize_html(self.cleaned_data['text'])", "title": "" }, { "docid": "e482c773dc5fd21e1ebd09d9eb3ef6ad", "score": "0.58404464", "text": "def _process_text(self,\n index: int,\n lower=True,\n delete_whitespace=True,\n delete_urls=True) -> str:\n text = self._texts[index]\n if not text:\n return None\n\n if not isinstance(text, str):\n raise TypeError(\"Text value is not string\")\n\n if lower:\n text = text.lower()\n if delete_whitespace:\n text = re.sub(r'[\\s]+', r' ', text).strip()\n if delete_urls:\n text = URLS.sub('', text)\n return text", "title": "" }, { "docid": "7e4f38466182c733dd2f112000f2c369", "score": "0.5832301", "text": "def clean(self, text):\n\n return text.replace(\"$=\", \"<=\")", "title": "" }, { "docid": "7db598df2c8505b0d3bd9bcdd77e327c", "score": "0.5831346", "text": "def textilize(s):\n s = s.replace(\"<p>\", \" \").replace('&nbsp;', ' ')\n return _re_html.sub(\"\", s)", "title": "" }, { "docid": "595bc2579550e46f323aeeb024cfdaea", "score": "0.5830553", "text": "def character_cleanup(self, char):\n if self.tagcategory:\n char.tags.remove(category=self.tagcategory)\n char.attributes.remove(category=self.tagcategory)", "title": "" }, { "docid": "523d6b90dc8d3f1d665155c824d38e3d", "score": "0.5829617", "text": "def epub_to_clean_text(ebook):\n out = epub2text(ebook)\n text=\"\"\n for chapter in out : \n text += filter_nonprintable(chapter) \n return text", "title": "" }, { "docid": "15535fe57d25f9aa6661008b09fe0ecc", "score": "0.5823075", "text": "def remove_html_tags(text):\n\t#print(text)\n\t#First replace linebreaks to something NON-HTML so that it wont be cleared in the next step.\n\ttext = text.replace(\"<br>\", \"BRBRBR\")\n\t#print(text)\n\n\timport re\n\tclean = re.compile('<.*?>')\n\tclean = re.sub(clean, '', text)\n\t#print(clean)\n\t#Again replace back our linebreaks to HTML format\n\tclean = clean.replace(\"BRBRBR\", \"<br>\")\n\t#print(clean)\n\t#print(\"---end---\")\n\treturn Markup(clean)", "title": "" }, { "docid": "40d8dc5dfdcf10201dd1eb69952be726", "score": "0.5818737", "text": "def text_standardize(text):\n text = text.replace('—', '-')\n text = text.replace('–', '-')\n text = text.replace('―', '-')\n text = text.replace('…', '...')\n text = text.replace('´', \"'\")\n text = re.sub(r'''(-+|~+|!+|\"+|;+|\\?+|\\++|,+|\\)+|\\(+|\\\\+|\\/+|\\*+|\\[+|\\]+|}+|{+|\\|+|_+)''', r' \\1 ', text)\n text = re.sub(r'\\s*\\n\\s*', ' \\n ', text)\n text = re.sub(r'[^\\S\\n]+', ' ', text)\n return text.strip()", "title": "" }, { "docid": "5e09095356905e64ce0cbd9278becd9a", "score": "0.581436", "text": "def clean_text(text):\n digi_punct = \"[^a-zA-Z.1234567890'# ]\" # used by clean_text(text_corpus)\n text = text.replace(\"´\", \"'\")\n text = re.sub(digi_punct, \" \", text)\n\n # digi_punct = \"[^a-zA-Z.1234567890' ]\"\n text = \" \".join(text.split())\n text = text.lower()\n return text", "title": "" }, { "docid": "6dc88ebeb02bbc3589d10ad07550933f", "score": "0.58119494", "text": "def remove_speech_meta(text):\n text = re.sub(r'^(\\s*\\([^)]*\\)\\s*)', '', text) # e.g. (to Ray) Hello\n text = re.sub(r'(\\s*\\([^)]*\\)\\s*)$', '', text) # e.g. Hello (motions at Ray)\n return text", "title": "" }, { "docid": "95693e0352cd0d901a0f5a44a8f4cfe8", "score": "0.58030546", "text": "def clean_tweet_text(tweet):\n tweet = tweet.strip(\"\\n\")\n tweet = tweet.strip(\"\\t\")\n tweet = remove_emojis(tweet)\n tweet = remove_hashtag_symbol(tweet)\n return tweet", "title": "" }, { "docid": "09587b5540da31a7dfd6b6bbd3d7785d", "score": "0.5796727", "text": "def clean(text: str) -> str:\n\n clean_str = ''\n for char in text:\n if char.isalnum() or char == UNDERSCORE:\n clean_str = clean_str + char\n elif char == HASHTAG_SYMBOL or char == MENTION_SYMBOL:\n clean_str = clean_str + SPACE + char\n else:\n clean_str = clean_str + SPACE\n return clean_str", "title": "" }, { "docid": "4136548aa14d7139072f51ccbad881f9", "score": "0.57962835", "text": "def _remove_tags(html):\n html = _replace_ampersands(html)\n html = re.sub('\\<sup\\>|\\</sup\\>', '', html)\n html = re.sub('\\<sub\\>|\\</sub\\>', '', html)\n tagrx = re.compile('\\<.+?\\>', flags=re.DOTALL)\n html = tagrx.sub(' ', html)\n return html", "title": "" }, { "docid": "cdd98a8e4b041ee8fc317d9868467d75", "score": "0.57946014", "text": "def clean_paragraph(p):\n\n\tif p.find_all(\"span\") == None:\n\t\tp_text = p\n\telse:\n\t\tfor span_tag in p.find_all(\"span\"):\n\t\t\tspan_tag.replace_with(\"\")\n\t\tp_text = str(p)\n\tcorrected_p = re.sub(r' \\(.*?\\)', \"\", p_text)\n\treturn corrected_p", "title": "" }, { "docid": "f60bbf591aad17c3da73f1146cab065d", "score": "0.5793579", "text": "def compact_text(data):\n interim_codes = compact_text_interim(data)\n return (_compact_chunk(chunk) for chunk in chunks(interim_codes, 2))", "title": "" }, { "docid": "7546c60a27ce53548e27a01df5be84cc", "score": "0.57929564", "text": "def normalize_text(text):\r\n text = text.replace('\\r\\n', '\\n').replace('\\r', '\\n').expandtabs()\r\n if not text.endswith('\\n'):\r\n text += '\\n'\r\n\r\n # ignore BOM chars at the begining of template\r\n BOM = '\\xef\\xbb\\xbf'\r\n if isinstance(text, str) and text.startswith(BOM):\r\n text = text[len(BOM):]\r\n \r\n # support fort \\$ for backward-compatibility \r\n text = text.replace(r'\\$', '$$')\r\n return text", "title": "" }, { "docid": "b56abfb553c3d5584260c5b0cf4c18d3", "score": "0.57876676", "text": "def delete_chars(text, unwanted_chars):\n for char in unwanted_chars:\n text = text.replace(char, '')\n return text", "title": "" } ]
ffb57bc6ba104f32dc98f65948a63fa8
Returns best solution and energy after all runs
[ { "docid": "4e583b5900abb5f15a91f16db7969b24", "score": "0.0", "text": "def mws(model):\n \n def max_score_local(x_val, k = 0):\n \"\"\"\n Generates best neighbor upon mutating in one random direction\n Input: x_val to be mutated\n Output: neighbor with best score along index k in x_val\n \"\"\"\n steps = 10\n best_neigh = x_val[:]\n mutated_neigh = x_val\n if isinstance(model.max_x, list):\n step_max = model.max_x[k]\n step_min = model.min_x[k]\n else:\n step_max = model.max_x\n step_min = model.min_x\n increment = (step_max - step_min)/steps\n for i in xrange(steps):\n mutated_neigh[k] = step_min + increment*i\n mutated_neigh = model.ok(mutated_neigh)\n mutant_e = model.aggregator(mutated_neigh)\n best_e = model.aggregator(best_neigh)\n if mutant_e > best_e:\n best_neigh = mutated_neigh\n return best_neigh\n \n \n max_tries = 100\n max_changes = 50\n threshold = 100\n p = 0.5\n k = 0\n output = \" \"\n better_count = 0\n confused_count = 0\n best_count = 0\n init_solution, init_score = model.solution()\n #print init_solution, init_score\n print \"#\"*120\n print \"Running MWS for \", type(model).__name__\n print \"#\"*120\n print \"Constraints: \"\n print \"Lower Bound for x= \", model.min_x\n print \"Upper Bound for x= \", model.max_x\n print \"Max trials = %d, Max changes = %d, p = %0.2f, threshold = %d\" % (max_tries, max_changes, p, threshold)\n print \"-\"*120\n for i in range(max_tries):\n \n x_vec, x_score = model.solution()\n for j in range(max_changes):\n c = randrange(0, len(init_solution))\n if x_score > threshold:\n return x_vec\n\n if p < random():\n x_vec, x_score = model.mutate_solution(x_vec, c)\n output += \"?\"\n confused_count += 1\n \n else:\n out_x = max_score_local(x_vec, c)\n if out_x == x_vec:\n output += \".\"\n better_count += 1\n else:\n output += \"+\"\n x_vec = out_x\n best_count += 1\n \n if x_score > init_score:\n init_solution = x_vec\n init_score = x_score\n\n if k % 50 == 0: \n print \"eb = %6f | ? = %d | + = %d | . = %d | %s\" % (init_score, confused_count, best_count, better_count, output)\n output = \" \"\n better_count = 0\n confused_count = 0\n best_count = 0\n k = k + 1\n print \"-\"*120\n print \"Best solution: \", init_solution\n print \"Best energy: \", init_score", "title": "" } ]
[ { "docid": "dc23f9d302e3c5b94328c54bbe84fee4", "score": "0.7375325", "text": "def solution(self):\n return self.Mbest", "title": "" }, { "docid": "a7a458447c66af157f126dfa185c1878", "score": "0.6866638", "text": "def optimize(self):\n\n # Initiate particles\n self.__initPart()\n self.listOfPos = []\n\n NFC = 0\n while(NFC < self.maxGen):\n #print \"Run: \" + str(NFC) + \" Best: \" + str(self.globBestFit)\n\n # Perform search\n self.update()\n\n # Acceptably close to solution\n #if self.globBestFit < self.epsError:\n #return self.globBestPos, self.globBestFit\n\n # next gen\n NFC += 1\n self.listOfPos.append(self.globBestFit)\n # Search finished\n return self.globBestPos, self.globBestFit, self.listOfPos", "title": "" }, { "docid": "8f7c273cb7bfaf99e55d77e2de198e94", "score": "0.68366396", "text": "def solve(self):\r\n\r\n nfev, nit, warning_flag = 0, 0, False\r\n status_message = _status_message['success']\r\n\r\n # calculate energies to start with\r\n for index, candidate in enumerate(self.population):\r\n parameters = self._scale_parameters(candidate)\r\n self.population_energies[index] = self.func(parameters,\r\n *self.args)\r\n nfev += 1\r\n\r\n if nfev > self.maxfun:\r\n warning_flag = True\r\n status_message = _status_message['maxfev']\r\n break\r\n\r\n minval = np.argmin(self.population_energies)\r\n\r\n # put the lowest energy into the best solution position.\r\n lowest_energy = self.population_energies[minval]\r\n self.population_energies[minval] = self.population_energies[0]\r\n self.population_energies[0] = lowest_energy\r\n\r\n self.population[[0, minval], :] = self.population[[minval, 0], :]\r\n\r\n if warning_flag:\r\n return OptimizeResult(\r\n x=self.x,\r\n fun=self.population_energies[0],\r\n nfev=nfev,\r\n nit=nit,\r\n message=status_message,\r\n success=(warning_flag != True))\r\n\r\n # do the optimisation.\r\n for nit in range(1, self.maxiter + 1):\r\n if self.dither is not None:\r\n self.scale = self.random_number_generator.rand(\r\n ) * (self.dither[1] - self.dither[0]) + self.dither[0]\r\n for candidate in range(np.size(self.population, 0)):\r\n if nfev > self.maxfun:\r\n warning_flag = True\r\n status_message = _status_message['maxfev']\r\n break\r\n\r\n trial = self._mutate(candidate)\r\n self._ensure_constraint(trial)\r\n parameters = self._scale_parameters(trial)\r\n\r\n energy = self.func(parameters, *self.args)\r\n nfev += 1\r\n\r\n if energy < self.population_energies[candidate]:\r\n self.population[candidate] = trial\r\n self.population_energies[candidate] = energy\r\n\r\n if energy < self.population_energies[0]:\r\n self.population_energies[0] = energy\r\n self.population[0] = trial\r\n\r\n # stop when the fractional s.d. of the population is less than tol\r\n # of the mean energy\r\n convergence = (np.std(self.population_energies) /\r\n np.abs(np.mean(self.population_energies) +\r\n _MACHEPS))\r\n\r\n if self.disp:\r\n print(\"differential_evolution step %d: f(x)= %g\"\r\n % (nit,\r\n self.population_energies[0]))\r\n\r\n if (self.callback and\r\n self.callback(self._scale_parameters(self.population[0]),\r\n convergence=self.tol / convergence) is True):\r\n\r\n warning_flag = True\r\n status_message = ('callback function requested stop early '\r\n 'by returning True')\r\n break\r\n\r\n if convergence < self.tol or warning_flag:\r\n break\r\n\r\n else:\r\n status_message = _status_message['maxiter']\r\n warning_flag = True\r\n\r\n DE_result = OptimizeResult(\r\n x=self.x,\r\n fun=self.population_energies[0],\r\n nfev=nfev,\r\n nit=nit,\r\n message=status_message,\r\n success=(warning_flag != True))\r\n\r\n if self.polish:\r\n result = minimize(self.func,\r\n np.copy(DE_result.x),\r\n method='L-BFGS-B',\r\n bounds=self.limits.T,\r\n args=self.args)\r\n\r\n nfev += result.nfev\r\n DE_result.nfev = nfev\r\n\r\n if result.fun < DE_result.fun:\r\n DE_result.fun = result.fun\r\n DE_result.x = result.x\r\n DE_result.jac = result.jac\r\n # to keep internal state consistent\r\n self.population_energies[0] = result.fun\r\n self.population[0] = self._unscale_parameters(result.x)\r\n\r\n return DE_result", "title": "" }, { "docid": "a0117546e96810312ac163daf208b031", "score": "0.6755187", "text": "def search_optimal(self):\n\n for i in range(self.get_nsource()):\n ith_bee_fitness = self.bee[i].get_fitness()\n best_fitness = self.optimal.get_fitness()\n if best_fitness < ith_bee_fitness:\n self.suboptimal.greedy_choice(self.optimal)\n self.optimal.greedy_choice(self.bee[i])\n elif best_fitness == ith_bee_fitness:\n pass # protect updating suboptimal in this case\n elif self.suboptimal.get_fitness() < ith_bee_fitness:\n self.suboptimal.greedy_choice(self.bee[i])", "title": "" }, { "docid": "f474c1ee4cea7c9306c18d7443490471", "score": "0.6726404", "text": "def _getBestSolution(obj):\n filename = _getOutputFilename(obj.folder_output + BEST_SOLUTION_FILENAME)\n obj.bestSolution = _loadCSV(filename)", "title": "" }, { "docid": "6112d959c74a77caaeb6a8ef2f3e1704", "score": "0.6724941", "text": "def getstart_temp(self):\n\n assert(not self.dims is None)\n lrange = self.lower\n urange = self.upper\n fmax = _double_min\n fmin = _double_max\n for _ in range(self.Ninit):\n x0 = random.uniform(size=self.dims)*(urange-lrange) + lrange\n fval = self.func(x0, *self.args)\n self.feval += 1\n if fval > fmax:\n fmax = fval\n if fval < fmin:\n fmin = fval\n bestEnergy = fval\n bestSolution = array(x0)\n\n self.T0 = (fmax-fmin)*1.5\n return bestSolution, bestEnergy", "title": "" }, { "docid": "57f818ca9f877589cbc25e77d1157ae1", "score": "0.6616518", "text": "def get_best():\n\n best_h = 3\n best_not = 3\n runs = 7\n\n np.save(\"H_START_S_BEST.npy\",0)\n np.save(\"NOT_START_S_BEST.npy\",0)\n np.save(\"H_START_S.npy\",0)\n np.save(\"NOT_START_S.npy\",0)\n\n for i in range(runs):\n \n # Run Sim-H\n sim_h.run_main_h()\n # Run Sim-NOT\n sim_not.run_main_not()\n\n hloss = run_waves(\"H\")\n print(\"Loss of H Gate\")\n print(hloss)\n nloss = run_waves(\"N\")\n print(\"Loss of NOT Gate\")\n print(nloss)\n\n # If this is a better H value, update\n if (hloss < best_h):\n best_h = hloss\n overwriteH()\n\n # If this is a better NOT value, update\n if (nloss < best_not):\n best_not = nloss\n overwriteNOT()\n\n\n h_waves_list = np.load(\"H_START_S_BEST.npy\")\n not_waves_list = np.load(\"NOT_START_S_BEST.npy\")\n\n return (h_waves_list, not_waves_list)", "title": "" }, { "docid": "90862e6480a983a07b72ffac8f4a63f1", "score": "0.6562416", "text": "def solve(self):\n self.runtime = Timer() # Start the timer to measure the runtime\n self.model.min_to_max() # Convert minimization problems to maximization problems\n self.model.construct_payoff() # Construct a payoff table from the objective function values\n self.model.find_obj_range() # Find the range of each objective function\n self.model.convert_prob() # Convert the payoff table\n\n self._find_solutions() # Find all solutions to the optimization problem\n self._process_solutions() # Identify the unique solutions\n self._get_hv_indicator() # Compute the HV indicator\n\n # Save the results to an Excel file if requested\n if self.opts.output_excel:\n self._output_excel()\n\n # Compute the total runtime and print a summary of the results\n self.runtime = round(self.runtime.get(), 2)\n Helper.clear_line()\n print(\n f\"Solved {self.model.models_solved.value()} models for \"\n f\"{self.num_unique_pareto_sols} unique Pareto solutions in \"\n f\"{self.runtime} seconds\"\n )\n\n # Log a summary of the results\n self.logger.info(Helper.separator())\n self.logger.info(f\"Runtime: {self.runtime} seconds\")\n self.logger.info(f\"Models solved: {self.model.models_solved.value()}\")\n self.logger.info(f\"Infeasibilities: {self.model.infeasibilities.value()}\")\n self.logger.info(f\"Solutions: {self.num_sols}\")\n self.logger.info(f\"Unique solutions: {self.num_unique_sols}\")\n self.logger.info(f\"Unique Pareto solutions: {self.num_unique_pareto_sols}\")\n self.logger.info(f\"Hypervolume indicator: {self.hv_indicator}\")", "title": "" }, { "docid": "7f57fe14d76194d4919d36fb73dcce81", "score": "0.6554548", "text": "def get_best_result(self):\n # self.result_data\n # raise NotImplementedError\n param_names = ['r', 'z', 'phi', 'scale', 'align', 'cloud']\n \n\n for chan in self.chan_list:\n for i in self.index:\n self.best_params[chan][i] = []\n #loops over all the fit waveforms\n # for model_num,single_wf_model in enumerate(self.wf_model):\n model_num = i\n single_wf_model = self.wf_model[i]\n print(\"Calculating best values for {}\".format(model_num))\n\n for j in range(single_wf_model.num_params):\n # loops over all the parameters\n param_values = self.result_data[i][j]\n\n param_avg = np.mean(param_values)\n param_std = np.std(param_values)\n\n self.best_params[model_num].append(param_avg)\n\n print(\"{name}: {avg:4.4f} +/- {std:4.4f}\".format(\n name=param_names[j], \n avg=param_avg, \n std=param_std)\n )", "title": "" }, { "docid": "c22ab22848e4af7f6852a38c863bd374", "score": "0.6523312", "text": "def solve(self):\n fit_partial = self.fit_partial\n n_jobs = self.n_jobs\n gen = self.gen\n\n # get results in parallel\n all_res = Parallel(n_jobs=n_jobs)(\n delayed(fit_partial)(\n order=order,\n seasonal_order=seasonal_order,\n )\n for order, seasonal_order in gen\n )\n\n sorted_fits = _sort_and_filter_fits(all_res)\n if self.trace and sorted_fits:\n print(f\"\\nBest model: {str(sorted_fits[0])}\")\n\n return sorted_fits", "title": "" }, { "docid": "390db28702298605d2d7b3a6236568e8", "score": "0.6472718", "text": "def run(self):\n self.time_start = time.time()\n self.step = 0\n self.step_idle = 0\n\n E = self.energy()\n\n prev_state = self.copy_state(self.state)\n prev_energy = E\n\n self.best_state = self.copy_state(self.state)\n self.best_energy = E\n self.best_step = 0\n self.energy_history = [E] * self.history_length\n Ehmean = E\n Ehvar = 0.\n Nvar = float(max(self.history_length - 1, 1))\n\n if self.updates_every > 0:\n self.update(self.step, self.step_idle, E, Ehmean, Ehvar)\n steps_since_update = 0\n\n while not self.terminate_search() and not self.user_exit:\n self.move()\n E = self.energy()\n\n if E >= prev_energy:\n self.step_idle += 1\n else:\n self.step_idle = 0\n\n v = self.step % self.history_length\n Ev = self.energy_history[v]\n if E < Ev or E <= prev_energy:\n # accept candidate state\n prev_state = self.copy_state(self.state)\n prev_energy = E\n\n if E < self.best_energy:\n self.best_state = self.copy_state(self.state)\n self.best_energy = E\n self.best_step = self.step\n else:\n # restore previous state\n self.state = self.copy_state(prev_state)\n E = prev_energy\n\n if E < Ev:\n # Update energy history\n self.energy_history[v] = E\n\n # and its mean and variance\n dE = E - Ev\n Ehmean_old = Ehmean\n\n Ehmean += dE / self.history_length\n Ehvar += dE * (E-Ehmean+Ev-Ehmean_old) / Nvar\n\n self.step += 1\n steps_since_update += 1\n\n if steps_since_update == self.updates_every:\n self.update(self.step, self.step_idle, E, Ehmean, Ehvar)\n steps_since_update = 0\n\n self.state = self.copy_state(self.best_state)\n if self.save_state_on_exit:\n self.save_state()\n\n self.time_end = time.time()\n return self.best_state, self.best_energy", "title": "" }, { "docid": "7ec8c15419ad8bc00193cb7e262615f0", "score": "0.646268", "text": "def optimal(self):\n optimal_check = self.checker(True)\n return optimal_check", "title": "" }, { "docid": "e8707ac8ab5a432499cd686262931a91", "score": "0.6461675", "text": "def get_best_solution(self):\n print \"getting best solution\"\n greatest_number_of_words = 0\n prelim_best_solutions = []\n for psol in self.potential_solutions:\n l = len(\" \".join(map(lambda x:x['title'],psol)))\n if l == greatest_number_of_words:\n prelim_best_solutions.append(psol)\n elif l > greatest_number_of_words:\n greatest_number_of_words = l\n prelim_best_solutions = [psol]\n best_solution = None\n least_number_of_tracks = 999999\n for bsol in prelim_best_solutions:\n if len(bsol) < least_number_of_tracks:\n least_number_of_tracks = len(bsol)\n best_solution = bsol\n return best_solution", "title": "" }, { "docid": "dbbafe1f015464da4a494c735c33ee07", "score": "0.6442411", "text": "def getSolution(self):\n self.var_constr_dict = self.mapVarToConstraints()\n\n start = time.time()\n solution = self.solver.getSolution(self)\n self.runtime = time.time() - start\n # solution is an assignment of problem, where the assignments for all variables are filled in. \n return solution, self.getStatistics()", "title": "" }, { "docid": "ede2c84a0104dd19a0e4737190f698ad", "score": "0.63996756", "text": "def save_best_sol(self):\n\n self.save(result=self.best_solutions_list)", "title": "" }, { "docid": "b4ba86d89bccfe98a869ed480491d257", "score": "0.6390985", "text": "def optimize(self):\n rankedSol = self._rankSolutions()\n bestSol = rankedSol[0]\n # include best solution in next generation solutions\n newSols = [bestSol]\n\n for s in rankedSol[1:]:\n if np.random.uniform(0.0, 1.0) > 0.5:\n a = self._computeA()\n normA = np.linalg.norm(a)\n if normA < 1.0:\n newS = self._encircle(s, bestSol, a)\n else:\n # ##select random sol\n randomSol = self._sols[np.random.randint(self._sols.shape[0])]\n newS = self._search(s, randomSol, a)\n else:\n newS = self._attack(s, bestSol)\n newSols.append(self._constrain_solution(newS))\n\n self._sols = np.stack(newSols)\n self._a -= self._aStep", "title": "" }, { "docid": "75c1eca33dce422738cb450fdee65c6e", "score": "0.63894033", "text": "def solver_optimize(self):\n parts = self.feats_obj.feat_partitions\n self.N_s = [None for i in parts]\n self.opt_sol = [None for i in parts]\n self.norm_z = [None for i in parts]\n self.compute_zero_stats()\n\n for i, partition in enumerate(parts):\n self.compute_data_stats(partition, i)\n self.compute_zc(partition, i)\n self.compute_theta(partition, i)\n\n # print('N_s is: ', self.N_s)\n # print('Z_c is: ', self.norm_z)\n # print('Theta is: ', self.opt_sol)", "title": "" }, { "docid": "8d4b156c2e290526811e8a8e8d75a345", "score": "0.6357096", "text": "def _solve(self):\n #algorithm = self.config['algorithm'](self.config)\n \n self.output = self.algorithm.run(self.config['u_0'],self.config['TOL'],self.config['MAXIT'])\n print('Iterations:' + str(self.output['iter']))", "title": "" }, { "docid": "98e45c5f527db22d70313ad80ec8673d", "score": "0.6339497", "text": "def solve(self):\n\n while True:\n self.iteration += 1\n\n logger.debug('iteration: n=%s', self.iteration)\n\n self._iterate()\n\n if self._converged():\n break\n\n return self._find_strongest_solution()", "title": "" }, { "docid": "149015545ba83e85aff88252b4aa13f7", "score": "0.63234216", "text": "def run(self, iterations, run_from_start=True):\n if run_from_start:\n self.run_init()\n for _ in range(iterations):\n self._build_ant_solutions()\n self._update_iteration_best()\n self._update_global_best()\n self.global_pheromone_update(self.global_best)\n self.iteration += 1\n return self.global_best", "title": "" }, { "docid": "7e2f056ef0ae1d598ecbc81e870cab69", "score": "0.62895966", "text": "def best_fitness(self):\n return self.fitness[self.order[-1]]", "title": "" }, { "docid": "c0f291c79ddcfe9833db29d6e0a3b0f6", "score": "0.62888795", "text": "def _solve_impl(self):\n\n self.start_time = clock() # Used to show the total time took the process\n bellman_updates = 0 # used to track the performance improvement.\n converged = False\n iteration = 0\n\n num_states = self.representation.num_states_total\n\n while self.has_time() and not converged:\n iteration += 1\n\n # Store the weight vector for comparison\n prev_weight = self.representation.weight.copy()\n\n # Sweep The State Space\n for i in range(num_states):\n\n s = self.representation.stateID2state(i)\n # Sweep through possible actions\n if self.domain.is_terminal(s):\n continue\n for a in self.domain.possible_actions(s):\n\n self.bellman_backup(s, a, ns_samples=self.ns_samples)\n bellman_updates += 1\n\n # Create Log\n if bellman_updates % self.log_interval == 0:\n performance_return, _, _, _ = self.performance_run()\n self._log_updates(performance_return, bellman_updates)\n\n # check for convergence\n weight_diff = l_norm(prev_weight - self.representation.weight)\n converged = weight_diff < self.convergence_threshold\n\n # log the stats\n (\n perf_return,\n perf_steps,\n perf_term,\n perf_disc_return,\n ) = self.performance_run()\n self.logger.info(\n \"PI #%d [%s]: BellmanUpdates=%d, ||delta-weight_vec||=%0.4f, \"\n \"Return=%0.4f, Steps=%d\"\n % (\n iteration,\n hhmmss(deltaT(self.start_time)),\n bellman_updates,\n weight_diff,\n perf_return,\n perf_steps,\n )\n )\n\n # Show the domain and value function\n if self._visualize_mode:\n self.domain.show_learning(self.representation)\n\n # store stats\n self.result[\"bellman_updates\"].append(bellman_updates)\n self.result[\"return\"].append(perf_return)\n self.result[\"planning_time\"].append(deltaT(self.start_time))\n self.result[\"num_features\"].append(self.representation.features_num)\n self.result[\"steps\"].append(perf_steps)\n self.result[\"terminated\"].append(perf_term)\n self.result[\"discounted_return\"].append(perf_disc_return)\n self.result[\"iteration\"].append(iteration)\n\n if converged:\n self.logger.info(\"Converged!\")\n self.log_value()", "title": "" }, { "docid": "d2b1c534c039f242d2a7c458ea6fb951", "score": "0.6226838", "text": "def calculateBest():\r\n\r\n if var.score>var.best:\r\n setNewBest()\r\n return var.score\r\n else: return var.best\r\n #if the actual score is bigger than the best score\r\n #set the best score to the actual score\r", "title": "" }, { "docid": "c7fe40017c2980b624d976cb8e5d1632", "score": "0.6216328", "text": "def search(self, config):\n self.iteration = 0\n while self.iteration < self.maxIterations:\n self.GAStep(config)\n self.iteration += 1\n\n print (\"Total iterations: \",self.iteration)\n print (\"Best Solution: \", self.best.getFitness())", "title": "" }, { "docid": "b0ba52f8f0040d777dcae8c696c03ef8", "score": "0.6213454", "text": "def _solution(self):\n return self.__solution", "title": "" }, { "docid": "07d78b5b7da1a48b218ae692e26ffa46", "score": "0.62130654", "text": "def solve(self):\n \n # Do the actual run to find the fit and trendline\n self.fitModel = WarmingFitModel(self.dates, self.temps)\n self.constants = self.fitModel.solve()\n self.devs = self.fitModel.deviations()\n \n # Verify the optimality of solutions\n xvariabilities = { \"min\" : [], \"max\" : [] }\n if not self.DISABLE_VARIABILITY_CHECK:\n for op in xvariabilities.keys():\n for xsub in [0, 1]:\n variabilityModel = WarmingVariabilityModel(self.dates, self.temps, xsub, op, self.constants, sum(list(map(abs, self.devs))))\n xvariabilities[op].append(variabilityModel.solve())\n pprint.pprint(xvariabilities)\n \n # Verify the chosen value for solar cycle length\n cycleLengths = [9.7 + 0.2 * i for i in range(11)]\n cycleDevs = []\n if not self.DISABLE_CYCLE_LENGTH_CHECK:\n for cycleLength in cycleLengths:\n solarFitModel = WarmingFitModel(self.dates, self.temps, solarCycle=cycleLength)\n cycleDevs.append(sum([abs(dev) for dev in solarFitModel.deviations()]))\n pprint.pprint(cycleDevs)\n \n stdevs = [0 for x in self.constants]\n if not self.DISABLE_CONFIDENCE_INTERVALS:\n # Do a bunch more iterations to find a confidence interval for the original fit\n ITER_COUNT = 50\n random.seed()\n xstar = []\n for fuzzIter in range(ITER_COUNT):\n fuzzedTemps = [self.temps[i] + random.choice(self.devs) for i in list(range(len(self.dates)))]\n fuzzedModel = WarmingFitModel(self.dates, fuzzedTemps)\n fuzzedConstants = fuzzedModel.solve()\n print(\"Fuzzy constants: \" + str(fuzzedConstants))\n xstar.append(fuzzedConstants)\n \n # Actually compute the confidence interval\n if ITER_COUNT > 0:\n cMeans = [sum(fuzzedX[xpos] for fuzzedX in xstar) / float(ITER_COUNT) for xpos in range(len(self.constants))]\n for pos in range(len(self.constants)):\n cDevs = [(xstar[i][pos] - cMeans[pos]) ** 2 for i in range(ITER_COUNT)]\n cStdDev = math.sqrt(sum(cDevs) / float(len(cDevs)))\n stdevs[pos] = cStdDev\n \n return {\"fit\" : [(self.constants[i], 2 * stdevs[i]) for i in range(len(self.constants))], \\\n \"xvariabilities\" : xvariabilities, \\\n \"cycle\" : (cycleLengths, cycleDevs) }", "title": "" }, { "docid": "21b1caf17582b8c1975b073d829c4e34", "score": "0.62079585", "text": "def find_solution(self):\n start_time = timer.time()\n result = []\n constraints = []\n \n ##############################\n # Task 2.4 : Addressing Failure\n #reordering agent priority using heuristic costs\n agent_max_cost = []\n for agent in range(self.num_of_agents):\n max_cost = -1\n for loc, cost in self.heuristics[agent].items():\n print(loc, cost)\n if cost > max_cost:\n max_cost = cost\n agent_max_cost.append([cost, agent])\n agent_max_cost.sort()\n agent_max_cost.reverse()\n \n for _, i in agent_max_cost:\n ##############################\n\n # original\n # for i in range(self.num_of_agents): # Find path for each agent\n \n path = a_star(self.my_map, self.starts[i], self.goals[i], self.heuristics[i],\n i, constraints)\n if path is None:\n raise BaseException('No solutions')\n result.append(path)\n \n print('Agent[{}]'.format(i), 'path = ', path)\n \n ##############################\n # Task 2: Add constraints here\n # Useful variables:\n # * path contains the solution path of the current (i'th) agent, e.g., [(1,1),(1,2),(1,3)]\n # * self.num_of_agents has the number of total agents\n # * constraints: array of constraints to consider for future A* searches\n\n # Task 2.1 : adding vertex constraints\n # for node in path:\n # constraint = dict()\n # constraint['agent'] = i\n # constraint['loc'] = [node[0], None]\n # constraint['timestep'] = node[1]\n # constraints.append(constraint)\n \n # build constraint from a path\n for p in range(len(path)):\n curr_loc = path[p]\n next_loc = None\n curr_timestep = p\n \n # vertex constraint\n constraint = dict()\n constraint['agent'] = i\n constraint['loc'] = [curr_loc, next_loc]\n constraint['timestep'] = curr_timestep\n constraints.append(constraint)\n \n # edge constraint except start cell\n if p > 0:\n prev_loc = path[p - 1]\n next_timestep = p\n \n constraint = dict()\n constraint['agent'] = i\n constraint['loc'] = [curr_loc, prev_loc]\n constraint['timestep'] = next_timestep\n constraints.append(constraint)\n \n ##############################\n \n # Task 2.3: additional constraints\n # agent_0 reached its goal cell at timestep 3,\n # but, agent_1 will reach to the goal cell at timestep 7\n # then, agent_0 will be wait \n \n # but, we don't know the value 7\n # so, just estimate this value with the prior knowledge\n # for example, # of a valid cell\n \n ##############################\n \n last_loc = path[-1]\n last_timestep = len(path) -1\n \n max_cells = 0\n \n for x in range(len(self.my_map)):\n for y in range(len(self.my_map[0])):\n if not self.my_map[x][y]:\n max_cells += 1\n \n for timestep in range(last_timestep + 1, max_cells):\n constraint = dict()\n constraint['agent'] = i\n constraint['loc'] = [last_loc, None]\n constraint['timestep'] = timestep\n constraints.append(constraint) \n\n self.CPU_time = timer.time() - start_time\n\n print(\"\\n Found a solution! \\n\")\n print(\"CPU time (s): {:.2f}\".format(self.CPU_time))\n print(\"Sum of costs: {}\".format(get_sum_of_cost(result)))\n print(result)\n return result", "title": "" }, { "docid": "b65a27ab5b4a47ccc18024490760dc46", "score": "0.61903894", "text": "def result(self):\n return self.best.x", "title": "" }, { "docid": "2e2e8b4ec0309484e14adeec64980705", "score": "0.6185776", "text": "def getBestSol(sqe, Res, c_ms):\n #---docstring------------------------------------------------------------------\n #---end-docstring--------------------------------------------------------------\n res = dc(Res)\n C_ms = dc(c_ms)\n lsqSc = numpy.zeros(len(res),'d') + numpy.inf \n lsqMu = numpy.zeros(len(res),'d') + numpy.inf \n lsqSl = numpy.zeros(len(res),'d') + numpy.inf\n for i in range(len(res)):\n try:\n zeroCut = sqe.zeroInd + 1\n sqeCut = sqe.zeroInd+res[i][1].cutoffInd\n slopeCut = sqe.slopeCut()\n sqeLin = sqe.se[sqeCut:slopeCut]\n mulSqeLin = nar(nar(res[i][0][1:]))[sqeCut:slopeCut]\n eLin = sqe.e[sqeCut:slopeCut]\n sqeSlope,inter = mfit.poly(eLin,sqeLin ,1)\n mulSlope,inter = mfit.poly(eLin,mulSqeLin,1)\n line = mfit.poly_of_x(eLin,mfit.poly(eLin,sqeLin ,1) )\n lsqSl[i] = (sqeSlope-mulSlope)**2.0\n lsqMu[i] = nar( (mulSqeLin - line )**2.0 /float(len(line)) )\n lsqSc[i] = nar( nar( (sqe.sqe - nar(res[i][0]) )**2.0 ) )\n except:\n pass\n \n start = 0\n while start<len(lsqSc) and \\\n (numpy.isnan(lsqSc[start]) or numpy.isinf(lsqSc[start]) \\\n or numpy.isnan(lsqMu[start]) or numpy.isinf(lsqMu[start]) \\\n or numpy.isnan(lsqSl[start]) or numpy.isinf(lsqSl[start])):\n start += 1\n \n C_ms = C_ms[start:]\n res = res[start:]\n lsqSc = lsqSc[start:]\n lsqMu = lsqMu[start:]\n lsqSl = lsqSl[start:]\n lsqSc_copy = numpy.array(lsqSc)\n lsqMu_copy = numpy.array(lsqMu)\n lsqSl_copy = numpy.array(lsqSl)\n numBetter = lambda x,c : len(filter(lambda y : y < 0.98*x,c) )\n for i in range(len(C_ms)):\n lsqSc_copy[i] = numBetter(lsqSc[i],lsqSc)\n lsqMu_copy[i] = numBetter(lsqMu[i],lsqMu)\n lsqSl_copy[i] = numBetter(lsqSl[i],lsqSl)\n lsqSc= numpy.array(lsqSc_copy )\n lsqMu= numpy.array(lsqMu_copy )\n lsqSl= numpy.array(lsqSl_copy )\n \n LSQ = (lsqSc + lsqMu + lsqSl)/3.0\n \n best = LSQ.argmin()\n sqeCalc = res[best][0]\n dosCalc = res[best][1]\n cmsCalc = res[best][2]\n return sqeCalc,dosCalc,cmsCalc,res,C_ms,lsqSc,lsqMu,lsqSl,LSQ", "title": "" }, { "docid": "144adfb5462ee5a69aaeb4fed0faad88", "score": "0.61844385", "text": "def optimize(self):\n\n if self.scheme == \"linear\":\n iterations = floor(self.temp/self.cooling_rate)\n\n elif self.scheme == \"exp\":\n iterations = floor(log(1/self.temp)/log(1 - self.cooling_rate))\n\n for i in range(iterations):\n progress = round((i/iterations)*100, 2)\n if i % 20 == 0:\n print(f\"Progress: {progress}%\")\n if i == 0:\n self.get_costs()\n best_costs = copy.deepcopy(self.all_costs[0])\n best_option = copy.deepcopy(self.batteries)\n\n self.anneal()\n self.get_costs()\n\n if i > 0 and self.all_costs[-1] < best_costs:\n best_option = copy.deepcopy(self.batteries)\n best_costs = self.all_costs[-1]\n\n if self.scheme == \"linear\":\n self.temp = self.temp - self.cooling_rate\n elif self.scheme == \"exp\":\n self.temp *= 1 - self.cooling_rate\n\n self.batteries = best_option", "title": "" }, { "docid": "8e51584fbf426cc78699e1fa846fb810", "score": "0.61729866", "text": "def post_optimization(pdb):\n print('Optimizing...')\n energies = optimize(pdb, os.path.abspath(os.getcwd()))\n print('Energy before optimizing: %s' % str(energies[0][0]))\n print('Energy after optimizing: %s' % str(energies[1][0]))\n print('Model completed')", "title": "" }, { "docid": "5cbbdf5b657c88ce44e3b8c8d148fa37", "score": "0.6171333", "text": "def guessBest(self):\n #Get the true(past tries excluded) possible values for each value\n search_field = {i:(self.possibleValues[i] - self.impossibleValues[i]) for i in self.possibleValues}\n #Get the counts of times a number is a possible value @deprecated\n #counts = {i:sum(i in search_field[coord_values] for coord_values in search_field if len(search_field[coord_values])) for i in range(10)} \n #Get the coord with the least number of possible values\n bestCoord = min(search_field, key=lambda k: len(search_field[k]) if len(search_field[k]) > 1 else 10)\n #Apply the additional heuristic, sort the chosen coordinates possible values by how often those values could be other solutions @ deprecated\n #vals = sorted(search_field[bestCoord], key = lambda k: counts[k], reverse=True)\n vals = list(search_field[bestCoord])\n #print \"Guessing\", bestCoord, vals[0]\n #self.printData()\n self.setValue(bestCoord, guess=True, guessVal = vals[0])\n self.root = (bestCoord, self.getCoordValue(bestCoord))\n self.possibleValues[bestCoord] = {self.getCoordValue(bestCoord)}", "title": "" }, { "docid": "379f6fcd7411622d26dc90b601fbb81e", "score": "0.61595905", "text": "def extra_convergence_criteria(self, results):\n # type: (List[Result]) -> bool\n self.iter_count += len(results)\n\n # Skip the setup period\n if not results:\n return False\n\n # Update the best local\n crtl_elapsed_time = 0\n self.best_unchange += len(results)\n valid_result = 0\n iter_best_result = None\n for result in results:\n latency = result.time\n resource = result.size\n crtl_elapsed_time = max(crtl_elapsed_time, result.elapsed)\n if latency == float('inf'):\n continue\n elif resource >= 1.0:\n continue\n\n valid_result += 1\n if (not iter_best_result) or latency < iter_best_result.time:\n iter_best_result = result\n\n self.elapsed_time += crtl_elapsed_time\n\n # Terminate this partition if we cannot get a valid\n # result from the seed design point.\n #if (not self.best_result and self.iter_count >= len(self.seeds)\n # and valid_result == 0):\n # return True\n\n # Update the history\n if iter_best_result:\n if ((not self.best_result) or\n iter_best_result.time < self.best_result.time):\n self.best_result = iter_best_result\n self.best_unchange = -1\n if self.best_result is not None:\n self.log_best(self.iter_count, self.elapsed_time,\n self.best_result.time)\n\n # Keep the best result of current iteration for computing\n # the entropy at the next iteration.\n self.prev_result = iter_best_result\n\n # Update the best global\n count = 1\n new_ranking = float('inf')\n if os.path.exists('.who_is_the_best'):\n with open('.who_is_the_best') as filep:\n for line in filep:\n latency = float(line.replace('\\n', ''))\n if self.best_result:\n if latency == float(self.best_result.time):\n # Found itself\n new_ranking = count\n count += 1\n new_ranking /= count\n\n # Add one thread as the manager requested\n if os.path.exists('.add_threads'):\n with open('.add_threads') as filep:\n line = filep.readline()\n addition_thread = int(line.replace('\\n', ''))\n self.more_parallelism(addition_thread)\n run_command('rm -f .add_threads')\n\n # Skip the warm up period\n if new_ranking == float('inf') or count < 4:\n return False\n\n # Skip the rest criteria to let partitions run as long as possible\n return False", "title": "" }, { "docid": "32bff97f4800c45fadf86ac73a30a350", "score": "0.61526996", "text": "def get_best_values(self):\n return self.g_best_score, self.g_best_params", "title": "" }, { "docid": "d035d1802dc286f7a043d398d9b8e29d", "score": "0.6148896", "text": "def optimize():\n stopwatch = StopWatch()\n\n ANNEAL_AREA = False\n logger = LoggerUtils.configure_log(name='Optimization script', use_console=True, use_file=True)\n\n click.clear()\n\n metrics = GeneticAlgorithmConstants();\n\n # Generate initial population\n population = Evolution.populate(initial=True)\n\n # Calculate fitness for each individual and sort them\n ranked_pop = Evolution.adaptability(population)\n \n generation = 0\n same_solution_count = 0\n optimal = {\"genome\": None, \"fitness\": None, \"links\": None}\n\n logger.info('Starting first optimization loop.')\n # The first optimization loop defines the number of nodes for each of the line instalation areas, \n # while keeping squared areas number of nodes constant\n\n # Main loop for the first optimization\n while(generation < metrics.MAX_GENERATIONS and same_solution_count != metrics.MAX_STAGNATED_OPTIMAL):\n generation += 1\n \n # Perform the crossover \n ranked_pop = Evolution.crossover(ranked_pop, metrics.CROSSOVER_PROBABILITY)\n\n # Mutate population\n ranked_pop = Evolution.mutate(ranked_pop, metrics.MUTATION_PROB)\n\n # Next generation\n population = [individual.get('genome') for individual in ranked_pop]\n\n # Calculate fitness for each individual and sort them\n ranked_pop = Evolution.adaptability(population)\n\n # Update the optimal value and count improvements\n if optimal.get('fitness') is None:\n optimal = copy.deepcopy(ranked_pop[0])\n elif ranked_pop[0].get('fitness') < optimal.get('fitness'):\n optimal = copy.deepcopy(ranked_pop[0])\n same_solution_count = 0\n else:\n same_solution_count += 1\n\n # Perform environment pressure\n ranked_pop = Evolution.environment_pressure(ranked_pop)\n \n logger.info(''.join(['[Gen {}] '.format(generation), 'Optimal:{solution: ', str(optimal.get('genome')), ', fitness: ', str(optimal.get('fitness')), \n ', min_links: ', str(optimal.get('links')), '}']))\n \n logger.info('Finished first optimization with %s generations. Total time: %s.' % (generation, stopwatch.read()))\n logger.info('OPTIMAL FOR FIRST OPTIMIZATION: N1: {}, N2: {}, N3: {}, N4: {} '.format(optimal['genome'][0], \n optimal['genome'][1], optimal['genome'][2], optimal['genome'][3]))\n \n # SECOND OPTIMIZATION ROUTINE:\n # Voronoi : generate optimized points distribution in an area.\n # L-BFGS-B : \n # POSSIBILITY: Model the second optimization problem as max f(x) = min SUM distances(i,j) \n\n # GETTING READY FOR SECOND OPTIMIZATION\n\n n1, n2, n3, n4 = optimal['genome']\n # network = PreProcess.generateNetworkForConstants(n1, n2, n3, n4)\n fitness = NetworkModel.getFitnessForVariables(n1, n2, n3, n4)\n\n area_nodes = n4\n generation = 0\n\n logger.info('Starting second optimization.')\n # The second optimization cares to find the min number of nodes within squared areas. At this part of the code\n # it's assumed that the min number of nodes for the line areas are known due the first optimization loop.\n\n logger.info('Initial genome [N1 = {}, N2 = {}, N3 = {}, N4 = {}].'. format(n1, n2, n3, n4))\n stopwatch_loop_2 = StopWatch()\n\n\n network = PreProcess.generateNetworkForConstants(n1, n2, n3, n4) \n lines = ['N1', 'N2', 'N3', 'N4']\n nodeArray = [network.get('SINK')]\n for line in lines:\n nodeArray += list(network.get(line))\n # Plotter.plot_node_list(nodeArray, title='First Optimization Result', xLabel='Grid Coordinate (m)', yLabel='Grid Coordinate (m)')\n\n nodesCoordinates = [node.getCoordinates() for node in nodeArray]\n GeoUtils.writeGeoJSON(nodesCoordinates, OUTPUT_PATH + 'first.geojson')\n\n # Main loop for the second optimization\n while generation < metrics.MAX_GENERATIONS and fitness.minValidLinks >= 3:\n # First find the min number n of nodes of node at each side of the nxn grid\n \n generation += 1\n area_nodes -= 1\n\n if area_nodes == 0:\n break\n\n fitness = NetworkModel.getFitnessForVariables(n1, n2, n3, area_nodes)\n\n logger.info('[Gen {}] Fitness for N4 = {}: {}'.format(generation, area_nodes, fitness))\n\n # When the loop ends, area_nodes have a number with invalid fitness for n4.\n # So the correct min number is the result area_nodes + 1.\n n4 = area_nodes + 1\n fitness = NetworkModel.getFitnessForVariables(n1, n2, n3, n4)\n\n logger.info('Finished second optimization with %s generations. Total time: %s.' % (generation, stopwatch_loop_2.read()))\n logger.info('Optimal for second optimization: N1: {}, N2: {}, N3: {}, N4: {} '.format(n1, n2, n3, n4))\n logger.info('Fitness for second optimization: {}'.format(fitness))\n\n # Retrieve network \n network = PreProcess.generateNetworkForConstants(n1, n2, n3, n4)\n\n metrics = SelectiveAneelingConstants()\n\n networkArea = list(network.get('N4'))\n current_optimal_network_area = networkArea \n\n if len(networkArea) >= 2 :\n fitness = NetworkModel.getFitnessForNetwork(networkArea)\n generation = 1\n\n # Define area boundaries\n boundaries = dict()\n boundaries['MIN_X'] = GlobalParameters.N4_DIM.top_left[0]\n boundaries['MAX_X'] = GlobalParameters.N4_DIM.botom_right[0]\n boundaries['MIN_Y'] = GlobalParameters.N4_DIM.botom_right[1]\n boundaries['MAX_Y'] = GlobalParameters.N4_DIM.top_left[1]\n\n if ANNEAL_AREA: logger.info('Starting third optimization. Selective aneeling')\n\n # Finds the optimal number of nodes within area maximizing the distance between them\n while generation < metrics.MAX_GENERATIONS and fitness.minValidLinks >= 2 and len(networkArea) >= 2 and ANNEAL_AREA:\n\n current_optimal_network_area = networkArea\n networkArea.pop()\n\n # Configuring Annealer\n annealer = DistanceAnnealing(networkArea, boundaries)\n annealer.copy_strategy = metrics.COPY_STRATEGY\n\n # Calibrating Annealer\n click.secho('Calibrating annealer', fg='yellow')\n auto_schedule = annealer.auto(minutes=1)\n annealer.set_schedule(auto_schedule)\n click.secho('Annealer calibrated! Parameters: {}'.format(auto_schedule), fg='green')\n\n networkArea, energy = annealer.anneal()\n \n fitness = NetworkModel.getFitnessForNetwork(networkArea)\n\n logger.info(\"[GEN {}] Number of Links: {} Energy: {}, fitness: {}\".format(len(networkArea), generation, energy, fitness))\n\n generation += 1\n \n \n logger.info('Finished second optimization. Nodes positions inside area defined. {} nodes installed.'.format(len(current_optimal_network_area)))\n\n # Saving the new area node disposition\n network['N4'] = np.array(current_optimal_network_area)\n\n # Adding all nodes of the network in one array\n\n lines = ['N1', 'N2', 'N3', 'N4']\n nodeArray = [network.get('SINK')]\n\n for line in lines:\n nodeArray += list(network.get(line))\n \n Plotter.plot_node_list(nodeArray, add=True, annotate=False)\n nodesCoordinates = [node.getCoordinates() for node in nodeArray]\n GeoUtils.writeGeoJSON(nodesCoordinates, OUTPUT_PATH + 'second.geojson')\n \n logger.info('Annealing nodes. Current configuration have %s nodes.' %(len(nodeArray)))\n annealer = PositionAnnealing(nodeArray)\n annealer.copy_strategy = metrics.COPY_STRATEGY\n\n # click.secho('Calibrating annealer.', fg='yellow')\n\n # auto_schedule = annealer.auto(minutes=1)\n # annealer.set_schedule(auto_schedule)\n\n # click.secho('Annealer calibrated!', fg='green')\n # logger.info('Annealer parameters: {}'.format(auto_schedule))\n\n annealer.Tmax=1.5\n annealer.Tmin=1e-10\n\n nodeArray, energy = annealer.anneal()\n\n network[line] = nodeArray\n \n click.secho('Finished Optimization', fg='green')\n logger.info('Finished network optimization. Total elapsed time: {}'.format(stopwatch.read()))\n logger.info('Writing result geoJSON at {}.'.format(OUTPUT_PATH))\n\n nodesCoordinates = [node.getCoordinates() for node in nodeArray]\n GeoUtils.writeGeoJSON(nodesCoordinates, OUTPUT_PATH + 'result.geojson')\n\n Plotter.plot_node_list(nodeArray, title='Representação gráfica da Rede', xLabel='Longitude', yLabel='Latitude',\n color='red', annotate=False)", "title": "" }, { "docid": "8a6b2d1b0b101462372338089711edf7", "score": "0.61201286", "text": "def calc_energy(self, force=True):\n if self.atoms_moved:\n self.timer.switch_on(\"energy\")\n if self.qeq:\n self.timer.switch_to(\"qeq\")\n self.qeq.preopt_q()\n self.timer.switch_to(\"energy\")\n if self.virtual_atoms:\n self.timer.switch_to(\"virt atoms set pos\")\n self.virt_atom_set_pos()\n self.timer.switch_to(\"energy\")\n if self.enforce_nlist_rebuild:\n # if some operation (move atoms/molecules around) could break the neigborlist enforce its rebuild\n self.dlp_nlst.nlst_notest = True\n self.dlp_nlst.nlst_rebuild = True\n ### nullify stress tensor\n self.dlp_conf.stress[:]=0.0\n ### MAIN ROUTINE TO COMPUTE ENERGY AND FORCE IN DL_POLY ###\n self.dlp.calc_energy_force()\n ### ################################################### ###\n ### MAIN ROUTINE TO COMPUTE ENERGY AND FORCE IN DL_POLY ###\n if self.QMMM:\n self.timer.switch_to(\"qmmm\")\n self.QMMM_eng = self.QMMM_interface(force)\n self.timer.switch_to(\"energy\")\n # for a QMMM run we report the energies every step\n self.report_energies()\n if self.virtual_atoms:\n self.timer.switch_to(\"virt atoms dist force\")\n self.virt_atom_distribute_force()\n self.timer.switch_to(\"energy\")\n self.atoms_moved = False\n if self.enforce_nlist_rebuild:\n self.dlp_nlst.nlst_notest = False\n self.dlp_nlst.nlst_rebuild = False\n self.enforce_nlist_rebuild = False\n# if self.QMMM:\n# self.timer.switch_to(\"qmmm\")\n# self.QMMM_eng = self.QMMM_interface(force)\n# self.timer.switch_to(\"energy\")\n# # for a QMMM run we report the energies every step\n# self.report_energies()\n if self.extra_term:\n self.timer.switch_to(\"extra term\")\n self.extra_eng = self.extra_term()\n self.timer.switch_to(\"energy\")\n if self.plumed:\n self.timer.switch_to(\"metamd plumed\")\n self.meta_eng = self.plumed_energy()\n self.timer.switch_to(\"energy\")\n if self.constraint:\n self.multiply_force(self.constraint_mask)\n for s in self.extra_systems:\n self.timer.switch_to(s.get_name())\n s.calc_energy()\n self.timer.switch_to(\"energy\")\n self.timer.switch_off()\n # return configuration energy in current units\n energy = self.dlp.engcfg/self.dlp.engunit\n if self.QMMM: energy += self.QMMM_eng\n if self.extra_term: energy += self.extra_eng\n if self.plumed: energy += self.meta_eng\n return energy", "title": "" }, { "docid": "8abf4c4ebfe4d352c7ad6e941203b1e7", "score": "0.61065435", "text": "def get_solutions_fitness(self, solution):\n value = self.get_solutions_value(solution)\n return self.get_fitness(value)", "title": "" }, { "docid": "9d65270fe6d8d9e8590e16fe04479aff", "score": "0.6098882", "text": "def compute_solution(self,dataset):\n\t\tpass", "title": "" }, { "docid": "50508a445861c4588a2e65c595bf6459", "score": "0.60829246", "text": "def getOwnBestFitness(self):\r\n\t\t\r\n\t\treturn self.ownBestFitness", "title": "" }, { "docid": "af4add5b38f8e282631cc6b0b016fc55", "score": "0.6068796", "text": "def optimal(self):\n return np.max(self.expected_rewards())", "title": "" }, { "docid": "330e6fdf9357f731486615c89f7e2c32", "score": "0.6067864", "text": "def getBestFit(self):\n self.envLock.acquire()\n best = self.environment.getMostAdapted()\n self.envLock.release()\n return best", "title": "" }, { "docid": "380cba45db1ed8aaf173abe49ca24a3a", "score": "0.6064149", "text": "def get_solution(self):\n index = -1\n max_f = -math.inf\n\n for i in range(self.N):\n if self.train_v[i] > 1.2 and self.train_f[i] > max_f:\n max_f = self.train_f[i]\n index = i\n return self.train_x[index]", "title": "" }, { "docid": "4596652f65f223725709173384ec358d", "score": "0.6045643", "text": "def greedy():\n\tglobal table, weights, values, capacity, max_value, best_solution, unit_values\n\tunit_values = [((values[i]*1.0)/weights[i],i,weights[i],values[i]) for i in xrange(len(weights))]\n\tunit_values.sort()\n\tinv_sort_order = [(unit_values[i][1],i) for i in xrange(len(unit_values))]\n\tinv_sort_order.sort()\n\tsort_order = [x[1] for x in inv_sort_order]\n\t\n\tweight = 0\n\tmax_value = 0\n\tbest_solution = [0]*len(weights)\n\ti = len(weights) - 1\n\twhile(weight < capacity and i >= 0):\n\t\tif weight + unit_values[i][2] <= capacity:\n\t\t\tbest_solution[i] = 1\n\t\t\tmax_value += unit_values[i][3]\n\t\t\tweight += unit_values[i][2]\n\t\telse:\n\t\t\tbest_solution[i]=0\n\t\ti -= 1\n\n\tbest_solution = [best_solution[i] for i in sort_order]\n\t\n\treturn {'value':max_value, 'taken':best_solution, 'optimality':0}", "title": "" }, { "docid": "af394f586b631d9fba6859afb2e6cc75", "score": "0.60455203", "text": "def solve(self):\n convergence = False\n val = 0\n for i in range(self.maxiter):\n grad = self.dR()\n self.x = self.x - self.optStep()*grad\n if np.sum(np.abs(grad)) < self.eps*(np.log(self.x.shape[0])):\n convergence = True\n break\n\n if not convergence:\n print('WARNING: Did not converge. Try increasing maxiter or a smaller eps.') \n\n return(self.eig,self.x)", "title": "" }, { "docid": "dc50367fe11ae1b2a1e4fd8e6b9c5b37", "score": "0.60270655", "text": "def betterEvaluationFunction(currentGameState):\n \"*** YOUR CODE HERE ***\"\n #more food = worse score\n #closer food = good score\n #closer than 3 ghost = bad score\n \n foodGrid = currentGameState.getFood()\n food = foodGrid.asList()\n pos = currentGameState.getPacmanPosition()\n ghosts = []\n gdists = []\n for i in range(1, currentGameState.getNumAgents()):\n g = currentGameState.getGhostPosition(i)\n ghosts.append(g)\n gdists.append(manhattanDistance(pos, g))\n \n #fterm = len(food)\n fterm = foodGrid.width*foodGrid.height-len(food)\n #gterm = closest(pos, ghosts)[0]\n gterm = 0\n fdist, floc = closest(pos, food)\n \n # breadth first search\n #problem = PositionSearchProblem(currentGameState, goal=floc, start=pos, warn=False)\n #realDist = len(breadthFirstSearch(problem))\n \n dterm = 1/float(fdist+1)\n \n #coefficients\n a = 10\n b = 1\n c = 15\n\n for d in gdists:\n if d<3:\n gterm-=50\n if d<2:\n gterm-=50\n \n total = a*fterm + b*gterm + c*dterm\n \n # break ties\n #if right:\n # total+=2\n #if up:\n # total+=1\n return total", "title": "" }, { "docid": "287e49f8017272b659afe2b7a22ade44", "score": "0.60227233", "text": "def solve(self):", "title": "" }, { "docid": "80d1d5c90fceaa679fdec499b70a5175", "score": "0.60214704", "text": "def evaluate_best_agent(self):\n\n eval_eps = Config.EVALUATION_EPISODES\n reward_sum = 0\n log_list = []\n\n print(\"Evaluate The Best Network with Test data: episode \", self.best_episode, \" reward \", self.best_reward)\n self.restore_best_network_to_train_network()\n\n for i in range(eval_eps):\n [reward, dist, trainsize, pred] = self.evaluate(isValidation=False)\n reward_sum = reward_sum + reward\n log = {\n \"episode\": None,\n \"top_reward\": reward,\n \"exp_rate\": None,\n \"trainsize\": trainsize,\n \"top_dist\": dist,\n \"top_pred\": pred\n }\n log_list.append(log)\n\n AgentLogger.print_trianing_results(log)\n\n mean_reward = reward_sum/eval_eps\n print(\"Mean: \", mean_reward)\n AgentLogger.log_evaluation_results(log_list, self.logger, self.best_episode, self.best_reward, mean_reward)", "title": "" }, { "docid": "2147cc31fc145610dbcba7b9c5f6d2ea", "score": "0.6020431", "text": "def find_best_object_and_behavior(self):\n potential_event_deflection = np.inf\n\n if not (self.model.network_structure is None):\n # check if network structure was passed and only consider\n # candidate objects to which actor has link\n possible_ids = [i for i,link\n in enumerate(self.model.network_structure[self.unique_id])\n if link == 1]\n potential_objects = [ag for ag in self.model.schedule.agents\n if ag.unique_id in possible_ids]\n if self.model.action_on_group:\n potential_objects += [self.model.group]\n else:\n potential_objects = [ag for ag in self.model.schedule.agents\n if ag.unique_id != self.unique_id]\n if self.model.action_on_group:\n potential_objects += [self.model.group]\n\n if self.model.object_choice == \"random\":\n potential_object = np.random.choice(potential_objects)\n potential_action = compute_opt_behavior(self, potential_object)\n potential_transients = compute_transients(\n self.current_transients,\n potential_action,\n potential_object.current_transients,\n self.model.abo_coefficients)\n else:\n np.random.shuffle(potential_objects)\n\n for obj in potential_objects:\n opt_beh = compute_opt_behavior(self, obj)\n transients = compute_transients(self.current_transients,\n opt_beh, obj.current_transients,\n self.model.abo_coefficients)\n\n if self.model.object_choice == \"min event-tension\":\n fundamentals_stack = np.hstack((self.fundamentals, opt_beh,\n obj.fundamentals))\n event_deflection = np.linalg.norm(fundamentals_stack\n - transients)**2\n\n elif self.model.object_choice == \"max deflection-reduction\":\n actor_deflection_diff = (\n np.linalg.norm(self.fundamentals - transients[:3])**2\n - self.personal_deflection\n )\n beh_deflection = np.linalg.norm(opt_beh - transients[3:6])**2\n # we include the actual deflection reduction of all agents\n # in the object choice, not just the group object\n if obj.unique_id == -1:\n object_deflection_diff = (\n np.linalg.norm(obj.fundamentals - transients[6:])**2\n - obj.personal_deflection\n )\n for ag in [agent for agent in self.model.schedule.agents\n if agent.unique_id != self.unique_id]:\n transients_g = compute_transients(\n self.current_transients, opt_beh,\n ag.current_transients,\n self.model.abo_coefficients\n )\n object_deflection_diff+= (\n np.linalg.norm(ag.fundamentals - transients_g[6:])**2\n - ag.personal_deflection\n )\n else:\n object_deflection_diff = (\n np.linalg.norm(obj.fundamentals - transients[6:])**2\n - obj.personal_deflection\n )\n\n event_deflection = ( actor_deflection_diff\n + beh_deflection\n + object_deflection_diff )\n else:\n print(\"unknown or no object selection criterion specified\")\n\n # check if deflection for this behavior is smaller\n # than for other behaviors\n if event_deflection < potential_event_deflection:\n potential_event_deflection = event_deflection\n potential_transients = transients\n potential_object = obj\n potential_action = opt_beh\n\n return (potential_object, potential_action, potential_transients)", "title": "" }, { "docid": "45ccb88b554cf570c6a44f750a0c3ef0", "score": "0.60071445", "text": "def run(self):\n self.logger.info(\"\\n\\n************************* Genetic Algorithm *************************\")\n self.logger.info(f\"Objective: {self.objective}\")\n self.logger.info(f\"============================ Execution with seed: {self._seed} ============================\")\n self.logger.info(f\"{time.strftime('%d/%m/%Y %H:%M:%S')}\\n\\n\")\n\n random.seed(self._seed)\n np.random.seed(self._seed)\n\n self._generate_initial_population()\n self._population_statistic()\n self._print_population_info(self.new_pop, 0)\n\n best_fitness = self.new_pop.bestFitness\n count_patience = 0\n\n compare_op_obj = partial(ge) if self.objective == 'maximization' else partial(le)\n compare_op_best = partial(gt) if self.objective == 'maximization' else partial(lt)\n\n for gen in range(1, self.nr_max_gen + 1):\n self._generating_new_population()\n self._population_statistic()\n self._print_population_info(self.new_pop, gen)\n\n if compare_op_best(self.new_pop.bestFitness, best_fitness):\n best_fitness = self.new_pop.bestFitness\n count_patience = 0\n else:\n count_patience += 1\n\n if self.objective_fitness is not None:\n if compare_op_obj(self.new_pop.bestFitness, self.objective_fitness):\n self.logger.info(f\"Stop because found the objective fitness {self.objective_fitness}\")\n break\n\n if self.patience > 0:\n if count_patience >= self.patience:\n self.logger.info(f\"Stop because patience limit\")\n break\n\n self.logger.info(f\"Best solution found: {self._statistic[-1]['best_indiv']}\")\n save_statistic(self.output_dir, self._statistic)", "title": "" }, { "docid": "ef7acdb6e0620fe4b000accc940aa89b", "score": "0.5998345", "text": "def get_energy(self):\n\n D, self.e, self.C = self.build_density(self.H)\n\n E_old = 0.0\n F_old = None\n\n for iteration in range(self.max_iter):\n\n J = np.einsum(\"pqrs, rs -> pq\", self.g, D)\n K = np.einsum(\"prqs, rs -> pq\", self.g, D)\n\n # F = H_pq + 2 * G_pqrs D_rs - G_prqs D_rs\n F = self.H + 2.0 * J - K\n\n if self.damping == 'on' and iteration >= self.damp_start:\n F = self.damp(F_old, F)\n\n F_old = F\n\n if self.diis == 'on' and iteration >= self.diis_start:\n F = diis_helper.diis(F, D, self.diis_start,self.diis_vector)\n\n E_electric = np.sum((F + self.H) * D)\n self.E = E_electric + self.mol.nuclear_repulsion_energy()\n\n E_diff = self.E - E_old\n E_old = self.E\n\n # Build the AO gradient\n grad = F @ D @ self.S - self.S @ D @ F\n grad_rms = np.mean(grad ** 2) ** 0.5\n\n D, self.e, self.C = self.build_density(F)\n\n print(\"Iteration: %3d Energy: % 16.12f Energy difference: % 8.4f Gradient difference: % 8.4f\" % (iteration, self.E, E_diff,grad_rms))\n\n if E_diff < self.e_conv and grad_rms < self.d_conv:\n break\n\n print(\"SCF has finished! \\n\")", "title": "" }, { "docid": "7b24c4d70a946f3b9b496e46c1a8767e", "score": "0.5992288", "text": "def initPopulation(self):\n for i in range(0, self.popSize):\n individual = Individual(self.genSize, self.data)\n individual.computeFitness()\n self.population.append(individual)\n\n self.best = self.population[0].copy()\n for ind_i in self.population:\n if self.best.getFitness() > ind_i.getFitness():\n self.best = ind_i.copy()\n print (\"Best initial sol: \",self.best.getFitness())", "title": "" }, { "docid": "65ed1251824de1d9a976769dd87cfabf", "score": "0.59906185", "text": "def _find_solutions(self):\n # Set progress message\n self.model.progress.set_message(\"finding solutions\")\n\n # Generate grid of indices to search for solutions\n grid_range = range(self.opts.gp)\n indices = [tuple([n for n in grid_range]) for _ in self.model.iter_obj2]\n self.cp = list(itertools.product(*indices))\n self.cp = [i[::-1] for i in self.cp]\n\n # Pickle the model and initialize queue and process handlers\n self.model.pickle()\n self.queues = QueueHandler(self.cp, self.opts)\n self.queues.split_work()\n self.procs = ProcessHandler(self.opts, self.model, self.queues)\n\n # Start processes and wait for results\n self.procs.start()\n self.unprocessed_sols = []\n while not self.procs.join():\n self.unprocessed_sols.extend(self.queues.get_result())\n # One last sweep to make sure all results have been collected\n self.unprocessed_sols.extend(self.queues.get_result())\n\n # In case all worker processes were killed, not all work may have been done.\n if self.procs.any_killed:\n raise Exception(\"At least one worker exited prematurely, not all computations may have been done.\")\n\n # Clean the pickled model\n self.model.clean()", "title": "" }, { "docid": "7fb69b984732c6d732aacba92def1e51", "score": "0.5982387", "text": "def solution_most_iterations_done(self) -> int:\n return int(self.dss_obj.SolutionI(ctypes.c_int32(41), ctypes.c_int32(0)))", "title": "" }, { "docid": "17959bc5f94063bea50630dbccfdc9af", "score": "0.59580135", "text": "def solve(self, problem):\r\n recommended_solns = []\r\n intermediate_budgets = []\r\n expended_budget = 0\r\n delta_max = self.factors[\"delta_max\"]\r\n gamma_0 = self.factors[\"gamma_0\"]\r\n delta_candidate = [gamma_0 * delta_max, delta_max, delta_max / gamma_0]\r\n #print(delta_candidate)\r\n\r\n # default values\r\n eta_1 = self.factors[\"eta_1\"]\r\n eta_2 = self.factors[\"eta_2\"]\r\n gamma_1 = self.factors[\"gamma_1\"]\r\n gamma_2 = self.factors[\"gamma_2\"]\r\n k = 0 # iteration number\r\n\r\n # Start with the initial solution\r\n new_x = problem.factors[\"initial_solution\"]\r\n new_solution = self.create_new_solution(new_x, problem)\r\n recommended_solns.append(new_solution)\r\n intermediate_budgets.append(expended_budget)\r\n\r\n # Parameter tuning run\r\n tp_final_ob_pt, k, delta, recommended_solns, intermediate_budgets, expended_budget, new_x = self.parameter_tuning(\r\n delta_candidate[0], problem)\r\n for i in range(1, 3):\r\n final_ob_pt, k_pt, delta_pt, recommended_solns_pt, intermediate_budgets_pt, expended_budget_pt, new_x_pt = self.parameter_tuning(\r\n delta_candidate[i], problem)\r\n expended_budget += expended_budget_pt\r\n if -1 * problem.minmax[0] * final_ob_pt < -1 * problem.minmax[0] * tp_final_ob_pt:\r\n k = k_pt\r\n delta = delta_pt\r\n recommended_solns = recommended_solns_pt\r\n intermediate_budgets = intermediate_budgets_pt\r\n new_x = new_x_pt\r\n\r\n intermediate_budgets = (\r\n intermediate_budgets + 2 * np.ones(len(intermediate_budgets)) * problem.factors[\"budget\"] * 0.01).tolist()\r\n intermediate_budgets[0] = 0\r\n\r\n while expended_budget < problem.factors[\"budget\"]:\r\n k += 1\r\n fval, Y, q, grad, Hessian, delta_k, expended_budget = self.model_construction(new_x, delta, k, problem,\r\n expended_budget)\r\n\r\n # Cauchy reduction\r\n if np.matmul(np.matmul(grad, Hessian), grad) <= 0:\r\n tau = 1\r\n else:\r\n tau = min(1, norm(grad) ** 3 / (delta * np.matmul(np.matmul(grad, Hessian), grad)))\r\n\r\n grad = np.reshape(grad, (1, problem.dim))[0]\r\n candidate_x = new_x - tau * delta * grad / norm(grad)\r\n\r\n for i in range(problem.dim):\r\n if candidate_x[i] < problem.lower_bounds[i]:\r\n candidate_x[i] = problem.lower_bounds[i] + 0.01\r\n elif candidate_x[i] > problem.upper_bounds[i]:\r\n candidate_x[i] = problem.upper_bounds[i] - 0.01\r\n\r\n candidate_solution = self.create_new_solution(tuple(candidate_x), problem)\r\n\r\n # adaptive sampling needed\r\n problem.simulate(candidate_solution, 1)\r\n expended_budget += 1\r\n sample_size = 1\r\n\r\n # Adaptive sampling\r\n while True:\r\n problem.simulate(candidate_solution, 1)\r\n expended_budget += 1\r\n sample_size += 1\r\n sig2 = candidate_solution.objectives_var\r\n if sample_size >= self.samplesize(k, sig2, delta_k):\r\n break\r\n\r\n # calculate success ratio\r\n fval_tilde = -1 * problem.minmax[0] * candidate_solution.objectives_mean\r\n\r\n # replace the candidate x if the interpolation set has lower objective function value\r\n if min(fval) < fval_tilde:\r\n minpos = fval.index(min(fval))\r\n fval_tilde = min(fval)\r\n candidate_x = Y[minpos][0]\r\n\r\n if (self.local_model_evaluate(np.zeros(problem.dim), q) - self.local_model_evaluate(\r\n np.array(candidate_x) - np.array(new_x), q)) == 0:\r\n rho = 0\r\n else:\r\n rho = (fval[0] - fval_tilde) / (\r\n self.local_model_evaluate(np.zeros(problem.dim), q) - self.local_model_evaluate(\r\n candidate_x - new_x, q));\r\n\r\n if rho >= eta_2: # very successful\r\n new_x = candidate_x\r\n delta_k = min(gamma_1 * delta_k, delta_max)\r\n recommended_solns.append(candidate_solution)\r\n intermediate_budgets.append(expended_budget)\r\n elif rho >= eta_1: # successful\r\n new_x = candidate_x\r\n delta_k = min(delta_k, delta_max)\r\n recommended_solns.append(candidate_solution)\r\n intermediate_budgets.append(expended_budget)\r\n else:\r\n delta_k = min(gamma_2 * delta_k, delta_max)\r\n\r\n return recommended_solns, intermediate_budgets", "title": "" }, { "docid": "2df6e82235fe60278712b06fb8d042ae", "score": "0.5955973", "text": "def calculate(self):\n # initialize transformation from canonic to energy\n # optimal states (if necessary)\n if self.W_mn is None:\n self.initialize_orbitals(rattle=self.rattle)\n\n # optimize the non-unitary invariant part of the\n # functional\n self.unitary_optimization()\n\n return self.esic, self.ekin", "title": "" }, { "docid": "ad29ddb80d7682684f4cf035f8b96f23", "score": "0.59508735", "text": "def overall_fitness(self):\n # Use a decyaing function to generate fitness\n # Use two step decaying function\n # First block gives importance to exploration and when as soon\n # food has been found, the next block will focus on dropping\n # the food on hub\n self.individual[0].fitness = (1 - self.beta) * self.delayed_reward \\\n + self.exploration_fitness() + self.carrying_fitness() \\\n + self.food_collected", "title": "" }, { "docid": "faa0eaf976005e56d162f1b3f95a98d0", "score": "0.5949581", "text": "def solveAll(self):\r\n return None", "title": "" }, { "docid": "b4f00dadad3e1e7502978585f5eeb041", "score": "0.5946176", "text": "def optimzeGeneralParameters():\n\t#initialize qm energies dictionaries for each molecule\n\tglobal qm_energies\n\tqm_energies = {mol : {} for mol in molecules}\n\n\t#set parameters for minimize function\n\tmaxiter = 5000\n\tpgtol = -1e-17\n\tftol = 1e-17\n\n\t#get parameter bounds for minimization\n\tall_bnds = make_bounds_list(initialValuesList, True)\n\ta_bnds = make_bounds_list(initialValuesList, False)\n\n\t#optimize A parameters for each energy component, store in final_params list\n\tparameterList = initialValuesList\n\tmetaPOInter_initial_list = remove_C_params(parameterList)\n\tglobal fitting_component\n\twhile fitting_component < 4:\n\n\t\t#make list of bounds for component to pass down to POInter. Only include B bound if fitting exchange\n\t\tif fitting_component == 0:\n\t\t\tbnds = all_bnds\n\t\telse:\n\t\t\tbnds = a_bnds\n\n\t\t#make list of parameters for component to pass down to POInter\n\t\t#get only parameters for the component we are fitting\n\n\t\t#TODO: make so user can choose whether or not to fit B params\n\t\t#TODO: make sure input B params are the same for every molecule\n\t\t\t#TODO: use same .exp file ()\n\t\tmetaPOInter_input = get_component_parameters(metaPOInter_initial_list)\n\n\t\tres = minimize(metaPOInter,metaPOInter_input,method='L-BFGS-B',\\\n\t\t\t\t\t\t\tjac=True,\\\n\t\t\t\t\t\t\toptions={'disp':True,'gtol':pgtol,'ftol':ftol,'maxiter':maxiter},\\\n\t\t\t\t\t\t\tbounds = bnds)\n\n\t\tpopt = res.x\n\t\tprint \"Optimized parameters for component \" + str(fitting_component) + \" : \" + str(popt)\n\n\t\tif not res.success:\n\t\t\tprint 'Warning! Optimizer did not terminate successfully, and quit with the following error message:'\n\t\t\tprint\n\t\t\tprint res.message\n\t\t\tprint\n\t\t\treturn\n\n\t\tglobal optimized_params\n\t\toptimized_params[energyComponents[fitting_component]] = popt\n\n\t\t#reset new B params in POInter objects after fitting exchange component\n\t\tif fitting_component == 0:\n\t\t\tupdated_B_params = get_updated_B_from_exchange(popt)\n\t\t\tfor mol in molecules.keys():\n\t\t\t\ti = 0\n\t\t\t\tfor atomtype in allAtomtypes:\n\t\t\t\t\tpointerModel = molecule_POInter_objects[mol]\n\t\t\t\t\tpointerModel.params[atomtype][0]['B'] = updated_B_params[i]\n\t\t\t\t\ti += 1\n\t\t\n\t\tfitting_component += 1\n\n\t#write output JSON file with optimized parameters\n\toptimized_params[\"Dispersion\"] = [1.0 for atomtype in allAtomtypes]\n\tfinal_params_list = write_output_params_to_list(optimized_params)\n\tmap_params(final_params_list, \"optimized_params\")\n\n\t#calc dispersion lsq_error and store\n\tdisp_lsq_error = calc_disp_lsq_error()\n\tcomponent_lsq_error[energyComponents[fitting_component]] = disp_lsq_error\n\n\t#write lsq_error output file\n\twith open(\"component_lsq_error.txt\", 'w') as f:\n\t\tfor component in energyComponents:\n\t\t\tf.write(component + \":\" + '\\t')\n\t\t\tf.write(str(component_lsq_error[component]) + '\\n')\n\n\tprint \"===========================================================================\"\n\tprint \" Optimized parameters successfully written to optimized_params.constraints \"\n\tprint \"===========================================================================\"\n\n\treturn res", "title": "" }, { "docid": "036faa1e19c7270a93947e53c76cf9c0", "score": "0.5943793", "text": "def get_solution(self):\n\n return self._funcs[\"get_solution\"](self.problem, self.variable_dict)", "title": "" }, { "docid": "469e51f3864b44e39addeb0ee1556f22", "score": "0.59420794", "text": "def get_analysis(self):\n return self.best_fitness, self.avg_fitness, self.worst_fitness", "title": "" }, { "docid": "88e417ac9a10cd6b79cb7903657a121a", "score": "0.593701", "text": "def solutions(self):\r\n if self.results is None:\r\n warning('results')\r\n return \r\n if self.is_isoschizomer():\r\n iso = True\r\n else:\r\n iso = False\r\n if type(self.best) == list:\r\n print('Results obtained using the following group of enzymes:')\r\n for e in self.best:\r\n print(e, sep=',')\r\n print('Other possible solutions:')\r\n for el in self.results:\r\n if el != self.best:\r\n print(el)\r\n else:\r\n if iso:\r\n warning('enzyme3')\r\n print('Best enzyme:\\n', self.best)\r\n print('Other possible solutions:')\r\n b = self.results.index(self.best)\r\n print(self.results[0:b]+ self.results[b+1:])\r\n\r\n return", "title": "" }, { "docid": "8f362cc586c58c587ff2150bc663a74b", "score": "0.5930106", "text": "def optimize(self):\n\n\t\tstill_needed = []\n\t\t# Verify the required information\n\t\tif self.preference_input is None:\n\t\t\tstill_needed.append(\"Preferences\")\n\t\tif self.LP_input is None:\n\t\t\tstill_needed.append(\"LP_input\")\n\t\tif self.teacher_file is None:\n\t\t\tstill_needed.append(\"Teacher File (secondary)\")\n\n\t\ts = \"\"\n\t\tif len(still_needed) > 0:\n\t\t\ts = str(still_needed)\n\t\t\tmessagebox.showerror(\"Error\", \"You are missing the following\\n\\n\" + s)\n\t\t\treturn \n\n\t\tGAP = self.slider.get()\n\t\tprint(GAP)\n\t\t\n\t\tprint(\"All good\")\n\n\t\t# optimize_schedule(self.preference_input, self.LP_input, None,\n\t\t# \tself.teacher_file, GAP, self.requirements, self.save_location)\n\t\t# The none is for Grades, as I just hard coded that in the opt code for now\n\n\t\t# Create optimization instance\n\t\tO = Optimizer(self.preference_input, self.LP_input, None,\n\t\t\t\tself.teacher_file, GAP, self.requirements, self.save_location)\n\t\tO.optimize()\n\n\n\n\n\n\t\t# Things we need to think abou thow to deal with:\n\t\t# \tGrades\n\t\t#\tProximity\n\t\t#\t", "title": "" }, { "docid": "b2320927b5675caba0e0b584d30231a5", "score": "0.59246856", "text": "def solve(self):\n self.DSSSolution.Solve()", "title": "" }, { "docid": "79aa869a66d8a075a889aaa4a27ba494", "score": "0.5923873", "text": "def APSP_1(self):\n\n\t\tt_start = time.time()\n\t\tres = float('inf')\n\t\tfor s in self.G.keys():\n\t\t\tprint('Working on node {} of total {} nodes... time elapsed: {:5.2f} minutes.'\\\n\t\t\t\t.format(s, self.n, (time.time() - t_start)/60))\n\t\t\tdp = self.Bellman_Ford(s, self.G)\n\t\t\t\n\t\t\tif dp is None:\n\t\t\t\tprint('\\nThere is a negative cost cycle. Exit the algorithm.')\n\t\t\t\treturn None\n\t\t\tres = min(res, min(dp.values()))\n\n\t\treturn res", "title": "" }, { "docid": "de4dcf4702ef09949cd1b9ec034b0e13", "score": "0.59213895", "text": "def run(self, max_iterations=100) -> Tuple[List[str], List[float]]:\n\n\n for i in range(max_iterations):\n # Generate Moveset\n moves = self.generate_moveset(self.state)\n\n if len(moves) == 0:\n return (self.state, self.costs)\n\n # Choose S i randomly from Moveset(S)\n m = random.choice(moves)\n\n # Define dV=V(S i )-V(S)\n v_s = self.graph.solution_cost(self.state)\n v_m = self.graph.solution_cost(m)\n dV = v_s - v_m\n\n # If dV>0 then S←S i else with probability p, S←S i\n if dV > 0 or (random.random() <= self.generate_p(dV)):\n self.state = m\n self.costs.append(v_m)\n self.decrease_temperature( i/max_iterations)\n else:\n self.costs.append(v_s)\n # If downhill descent is minimal, terminate\n if abs(dV/v_s) < SimulatedAnnealing.STOPPAGE_VALUE:\n return (self.state, self.costs)\n\n return (self.state, self.costs)", "title": "" }, { "docid": "5913a3f71cbfc93d162a9dbdedf2bd1c", "score": "0.5912698", "text": "def startSearch(self):\n current = self.initNode\n bestScore = float(\"inf\")\n for i in range(0,100):\n current = self.initNode\n while True:\n neighbors = current.expand()\n if not neighbors:\n break\n current = min(neighbors, key=attrgetter(\"h\"))\n \n # Random minimum\n listOfMins = [x for x in neighbors if current.h == x.h]\n index = random.randint(0, len(listOfMins)-1)\n current = listOfMins[index]\n \n if self.successTest(current.state):\n if current.g < bestScore:\n bestScore = current.g\n bestOne = current\n break\n return self._extractPlan(bestOne)", "title": "" }, { "docid": "a96eb04d01c5e9d4aff74f3f921f6ca9", "score": "0.5908016", "text": "def best(self):\n if self._best is None:\n raise BestNotFound()\n return self._best", "title": "" }, { "docid": "c72e6d6de196d127dfef8df3d116ae88", "score": "0.590735", "text": "def employed_bees_phase(self):\n for i in range(self.get_nsource()):\n # nominate neighbour solution except\n k = self.random_nominee_except(i)\n # randomly select soln vector element xi to change\n xi = rand.randint(0,self.get_dimension()-1)\n phi = self.a * rand.uniform(-1,1)\n # get current food source\n eith_src = self.bee[i].get_source()\n # generate a new solution with all vector element in current\n # vector and change the randomly selected parameter only\n enew_src = np.copy(eith_src)\n ekth_src = self.bee[k].get_source()\n # generate new candidate food source\n enew_src[xi] = eith_src[xi] + phi * (eith_src[xi] - ekth_src[xi])\n # update new soln in range of solution lower_limit\n enew_src = self.band_filter(enew_src)\n enew_cost = self.cost_of(enew_src)\n # generate new bee with new_src\n\n enew_bee = food.Nectar(enew_src, enew_cost)\n # greedy selection by current employed bee\n self.bee[i].greedy_choice(enew_bee)", "title": "" }, { "docid": "cd0932f9ee1bdccd14136fe6af53b0f2", "score": "0.58952796", "text": "def report_best(self):\n lowest_Err = min(self.Err)\n index = self.Err.index(lowest_Err)\n best_x = self.x[index]\n formatted_x = '[' + ', '.join(['%.2f' % xi for xi in best_x]) + ']'\n print 'best x: %s' % formatted_x\n print 'lowest Err: %.3E' % lowest_Err\n return best_x", "title": "" }, { "docid": "3cce4fa387bf2542bc60e2516c287211", "score": "0.5893321", "text": "def computeOptimalPlays(self):\n\n # Base Cases\n for i in range(self.size):\n # If there is only one pot of gold left, take that pot\n self.cache[(i, i)] = {'score': self.gold[i], 'choice': 'left'}\n for i in range(self.size - 1):\n # If there are two pots of gold left, take the larger one\n self.cache[(i, i+1)] = {\n 'score': max(self.gold[i], self.gold[i+1]),\n 'choice': 'left' if self.gold[i] >= self.gold[i+1] else 'right'\n }\n\n # Inductive Step\n for length in range(2,self.size):\n for j in range(self.size - length):\n k = j + length\n # Here, we decide whether to take from the left (the j side)\n # or the right (the k side). If we take from the left, then\n # (if) our opponent makes the optimal move, we'll be left with\n # the minimum of two pots taken from the left or one pot taken\n # from either side (because it is a zero-sum game).\n # Similarly, if we take from the right, then our worst-case\n # score is the minimum of the subgame situation where either\n # two pots are taken from the right or one from either side.\n # The subgame structure is what makes this a DP problem.\n leftMove = self.gold[j]\n rightMove = self.gold[k]\n afterLeftMove = min(self.score(j+2,k), self.score(j+1,k-1))\n afterRightMove = min(self.score(j+1,k-1), self.score(j,k-2))\n\n if leftMove + afterLeftMove >= rightMove + afterRightMove:\n score = leftMove + afterLeftMove\n choice = 'left'\n else:\n score = rightMove + afterRightMove\n choice = 'right'\n\n self.cache[(j,k)] = {\n 'score': score,\n 'choice': choice\n }", "title": "" }, { "docid": "e515c8f7954ca9a5e4c489dc52a14b0d", "score": "0.5886591", "text": "def best_ind(self):\n random.seed(64)\n NGEN = 10\n MU = 10\n pop = self.toolbox.population(n=MU)\n hof = tools.ParetoFront()\n stats = tools.Statistics(lambda ind: ind.fitness.values)\n stats.register(\"avg\", numpy.mean, axis=0)\n stats.register(\"std\", numpy.std, axis=0)\n stats.register(\"min\", numpy.min, axis=0)\n stats.register(\"max\", numpy.max, axis=0)\n # algorithms.eaMuPlusLambda(pop, self.toolbox, MU, LAMBDA, CXPB, MUTPB, NGEN, stats, halloffame=hof)\n algorithms.eaSimple(pop, self.toolbox, cxpb=0.6, mutpb=0.05, ngen=NGEN, halloffame=hof)\n print(\"The best individual is :\", hof[-1])\n # print(\"The best fitness is :\", eval_ind(self, hof[-1]))\n return hof[-1]", "title": "" }, { "docid": "a3d1dea5cd1d94568fde8f01cc0d0532", "score": "0.5884986", "text": "def optimise(self, verbose=False, guess=None, \n elitism=5, method='GA',cooling_rate=0.1, maxtime=60, maxstallgen=100,\n nswap=3, popsize=50, mut=0.02, maxgens=50, dimension_bits=1):\n # TODO: check length of init guess\n from scipy.optimize import minimize\n from scipy.optimize import basinhopping\n self.models = []\n def eval_func_in(x):\n return self.eval_func(x, prior=[])\n\n N = self.N_reactions\n\n import inspyred\n from random import Random\n rand = Random()\n if method == 'GA':\n ea = inspyred.ec.GA(rand)\n else:\n ea = inspyred.ec.SA(rand)\n self.scores = []\n self.lower = 0\n self.upper = 1\n self.best_score = 1\n\n self.scores_all = []\n def evaluator(candidates, args):\n dimensions = args['dimensions']\n dimension_bits = args['dimension_bits']\n fitness = []\n for cs in enumerate(candidates):\n #if len(params) != dimensions:\n #print(cs, dimensions, dimension_bits)\n if method == 'GA':\n params = self._binary_to_real(cs[1], dimensions, dimension_bits)\n else:\n params = cs[1]\n score = eval_func_in(params)\n #score = eval_func_in(cs)\n fitness.append(score)\n # TODO selection pressure could be added here \n self.scores_all.extend(fitness)\n self.scores.append(min(np.array(fitness)))\n if self.scores[-1] < self.best_score:\n self.best_score = self.scores[-1]\n i = np.argmin(np.array(fitness))\n self.best_individual = candidates[i]\n start = False\n print(self.scores[-1], self.best_score)\n\n return fitness\n dimensions = N\n #dimension_bits = 4 # preceision is 1/(2**n-1)\n def generator(random, args):\n # For the first generation only.\n #print('HERE')\n dimension_bits = args['dimension_bits']\n #guess = args['guess'][:]\n pop = [random.choice([0, 1]) \n for _ in xrange(self.N_reactions * dimension_bits)]\n #if guess is not None:\n # pop[0] = guess\n self.pop = pop\n if hasattr(self, 'best_individual'):\n pop = self.best_individual\n return pop\n\n import inspyred.ec\n import inspyred.ec.ec\n ea.terminator = inspyred.ec.terminators.evaluation_termination\n bounder = inspyred.ec.ec.Bounder(0,1)\n\n self.storage = []\n\n def my_observer(population, num_generations, num_evaluations, args):\n #best = max(population)\n #print('{0:6} -- {1} : '.format(num_generations, best.fitness))\n\n #print(population)\n self.storage.append(population[0].candidate[:])\n for pop in population:\n self.models.append(pop.candidate[:])\n ea.observer = my_observer\n\n\n ea.selector = inspyred.ec.selectors.uniform_selection\n #ea.selector = inspyred.ec.selectors.rank_selection\n #ea.selector = inspyred.ec.selectors.fitness_proportionate_selection\n #ea.selector = inspyred.ec.selectors.tournament_selection\n\n ea.replacer = inspyred.ec.replacers.truncation_replacement\n #ea.replacer = inspyred.ec.replacers.plus_replacement\n self.ea = ea\n\n final = ea.evolve(generator=generator, evaluator=evaluator, \n maximize=False, bounder=bounder, temperature=100, cooling_rate=cooling_rate,\n max_evaluations=popsize*maxgens,dimensions=len(self.model.reactions),\n dimension_bits = dimension_bits, guess=guess, gaussian_mean=0.5, gaussian_std=0.5,\n pop_size=popsize, mutation_rate=mut, num_elites=elitism)\n\n self.ea = ea\n self.final = final\n res = None\n self.best_bitstring = max(self.ea.population).candidate\n print self.best_bitstring\n try:\n self.best_bitstring = self._binary_to_real(self.best_bitstring,\n len(self.model.reactions), dimension_bits)\n except:\n pass", "title": "" }, { "docid": "7a446de06c2fc8fde29effb2ef29f743", "score": "0.5871639", "text": "def potential_energy_distribution(self):\n\n if len(self.traces) is None:\n raise ValueError(\"Can't predict final energy with just one trace!\")\n\n self.run_all_iterations()\n return self.energy_model.predict_from_trace(self.observed_energies)[0]", "title": "" }, { "docid": "2080004ed77bb58b93101bbcdb5a9740", "score": "0.5868522", "text": "def result(self):\n return (self.best_mu, self.best_reward,\n self.curr_best_reward, self.sigma)", "title": "" }, { "docid": "99d29e673ca70278dd187ed8c0e891f2", "score": "0.5866907", "text": "def get_optimal():\n LOG.info(\"Retrieving Optimal with url : %s\", request.url)\n recipe = request.get_json()\n LOG.info(recipe)\n LOG.info(str(recipe['name']))\n current_time = int(time.time())\n workload_name = 'optimal_'+str(current_time)\n # if 'ts_from' in recipe:\n # LOG.debug(recipe['ts_from'])\n # LOG.debug(recipe['ts_to'])\n # workload = Workload(str(recipe['name']), ts_from= recipe['ts_from'], ts_to= recipe['ts_to'])\n # eng = Engine()\n # eng.run('optimal', recipe['name'], recipe['ts_from'], recipe['ts_to'])\n # else:\n # workload = Workload(str(recipe['name']))\n config = {}\n if recipe.get('device_id'):\n config['device_id'] = recipe['device_id'].strip(\n ).lower().replace('-', '_')\n if recipe.get('project'):\n config['project'] = recipe['project']\n if recipe.get('sort_order'):\n config['sort_order'] = recipe['sort_order']\n if recipe.get('telemetry_filter', None) is not None:\n config['telemetry_filter'] = recipe['telemetry_filter']\n else:\n config['telemetry_filter'] = False\n workload = Workload(workload_name, workload_config=config)\n # storing initial recipe\n # TODO: validate recipe format\n recipe_bean = Recipe()\n recipe_bean.from_json(recipe)\n workload.add_recipe(\n int(\"{}{}\".format(int(round(time.time())), '000000000')), recipe_bean)\n pipe_exec = OptimalPipe()\n node_type = 'machine'\n try:\n workload = pipe_exec.run(workload, node_type)\n except KeyError:\n return Response('Service not ready yet, please wait or restart landscape', status=202)\n if workload.get_latest_graph() is None and config.get('device_id') is not None:\n return Response('Device not found', status=404)\n if workload.get_latest_graph() is None:\n return Response('Landscape not ready yet?', status=202)\n results = workload.get_metadata(OptimalFilter.__filter_name__)\n # return Response(results.to_json(), mimetype=MIME)\n # return Response(results.to_dict('results'), mimetype=MIME)\n json_results = json.dumps(results.to_dict('results'))\n return Response(json_results, mimetype=MIME)", "title": "" }, { "docid": "6f76478ca755f6f62b9a4feabf72ea7b", "score": "0.58448356", "text": "def solve(self):\n start_time = time.time()\n maxdepth = 0\n break_cond = False\n\n while(not break_cond):\n if self.algo == \"bfs\":\n # Pop the leftmost element if doing a bfs (Queue)\n current_state = self.frontier.popleft()\n elif self.algo == \"dfs\":\n # Pop the rightmost element if doing a dfs (Stack)\n current_state = self.frontier.pop()\n else:\n # Get element with highest priority if doing A* (Heap)\n current_state = self.frontier.get()\n self.frontier_set.remove(current_state)\n if (self.isFinalState(current_state)):\n soln = self.get_solution_moves(current_state)\n end_time = time.time()\n stats = {}\n stats[\"nodes_expanded\"] = len(self.explored)\n stats[\"search_depth\"] = current_state.depth\n stats[\"max_search_depth\"] = maxdepth\n stats[\"cost_of_path\"] = len(soln)\n stats[\"time\"] = end_time - start_time\n stats[\"path\"] = soln\n return stats\n neighbors = current_state.generate_possible_states()\n if self.algo == \"dfs\":\n neighbors.reverse()\n\n for neighbor in neighbors:\n if neighbor not in self.explored and neighbor not in self.frontier_set:\n if self.algo == \"bfs\" or self.algo == \"dfs\":\n self.frontier.append(neighbor)\n else:\n self.frontier.put(neighbor)\n self.frontier_set.add(neighbor)\n if neighbor.depth > maxdepth:\n maxdepth = neighbor.depth\n self.explored.add(current_state)\n if self.algo == \"bfs\" or self.algo == \"dfs\":\n frontier_sz = len(self.frontier)\n else:\n frontier_sz = self.frontier.qsize()\n logging.debug(\"Frontier size = \" +\n str(frontier_sz) +\n \"; Explored size = \" +\n str(len(self.explored)))\n if self.algo == \"bfs\" or self.algo == \"dfs\":\n break_cond = len(self.frontier) == 0\n else:\n break_cond = self.frontier.empty()\n logging.error(\"This is an unsolvable board!\")\n return None", "title": "" }, { "docid": "468d623199cf946ade4500c72622bb3f", "score": "0.58404225", "text": "def doit(self):\n self.reduceSolutions()\n keyz = self.solutions.keys()\n keyz.sort()\n prodarr = self.mergeIdent(keyz)\n keyz += self.chkForMore(keyz,prodarr)\n localans = self.solvePell()\n setx = set(keyz+localans)\n ansvec = list(setx)\n answer = 1+sum(ansvec)\n return answer", "title": "" }, { "docid": "370c27f8417ed8239b7d1a695857c183", "score": "0.5814705", "text": "def first_state(self):\n new_sol = Solution(\n self.all_ts[0][:1],\n self.all_ys[0][:, :1],\n self.all_models[:1],\n self.all_inputs[:1],\n None,\n None,\n \"final time\",\n )\n new_sol._all_inputs_casadi = self.all_inputs_casadi[:1]\n new_sol._sub_solutions = self.sub_solutions[:1]\n\n new_sol.solve_time = 0\n new_sol.integration_time = 0\n new_sol.set_up_time = 0\n\n return new_sol", "title": "" }, { "docid": "3cc528770f4cb159b4a99b6fd7ff1d8f", "score": "0.5813268", "text": "def run(self):\n problem = self.testproblem.problem\n results = TestResults(self.testproblem, self.config)\n # results = Test\n try:\n start = time.time() # Time the solve\n print(\"starting\",self.testproblem.id,\"with config\",self.config.id,\"at\",start)\n problem.solve(solver = self.config.solver, verbose = self.config.verbose, **self.config.kwargs)\n print(\"finished solve for\", self.testproblem.id, \"with config\", self.config.id)\n if problem.solver_stats.solve_time is not None:\n results.solve_time = problem.solver_stats.solve_time\n else:\n results.solve_time = time.time() - start\n if problem.solver_stats.setup_time is not None:\n results.setup_time = problem.solver_stats.setup_time\n if problem.solver_stats.num_iters is not None:\n results.num_iters = problem.solver_stats.num_iters\n\n results.status = problem.status\n results.opt_val = problem.value\n except Exception as e:\n print(e)\n # Configuration could not solve the given problem\n results = TestResults(self.testproblem, self.config)\n results.size_metrics = problem.size_metrics\n print(\"failure solving\",self.testproblem.id,\"with config\",self.config.id)\n return results\n\n # Record residual gross stats:\n results.avg_abs_resid, results.max_resid = TestInstance.compute_residual_stats(problem)\n print(\"computed stats for\", self.testproblem.id, \"with config\", self.config.id)\n\n # Record problem metrics:\n results.size_metrics = problem.size_metrics\n\n print(\"finished\",self.testproblem.id,\"with config\",self.config.id,\"at\",time.time()-start)\n return results", "title": "" }, { "docid": "1a4ddcf70a8389e9ebf5384e8f000f5b", "score": "0.5812091", "text": "def optimize(self):\n self._add_constraints()\n self.Model.update()\n self.Model.optimize()\n if (\n self.Model.Status == 2\n ): # Status = 2 means optimal solution found and model is solved\n self._solved = True\n self._head_results = self._get_head_results()\n print(\"OPTIMIZATION COMPLETE\")\n else:\n self._head_results = None\n print(\"OPTIMIZATION NOT COMPLETE\")", "title": "" }, { "docid": "9d1185bafadfc3331c9f21166a9d22f2", "score": "0.5804594", "text": "def baseSearch(self, gameState):\n\t\tactions = gameState.getLegalActions(self.index)\n\n\t\t# You can profile your evaluation time by uncommenting these lines\n\t\t# start = time.time()\n\t\tv = []\n\t\tfor action in actions:\n\t\t\tif action == 'Stop':\n\t\t\t\tcontinue\n\t\t\tsuccessor = gameState.generateSuccessor(self.index, action)\n\t\t\tv.append((self.evaluate(successor), action))\n\n\t\t# print 'eval time for agent %d: %.4f' % (self.index, time.time() - start)\n\n\t\t# print \"Values in baseSearch \", v\n\t\tmaxValues = [i for i in v if i == max(v)]\n\t\tmaxValue = random.choice(maxValues)\n\n\t\t# print maxValue\n\t\tbestAction = maxValue[1]\n\t\t# print bestAction\n\t\tfoodLeft = len(self.getFood(gameState).asList())\n\t\t# print foodLeft\n\t\tif foodLeft <= 2:\n\t\t\t# print \"foodl <= 2\"\n\t\t\tbestDist = 9999\n\t\t\tfor action in actions:\n\t\t\t\tif action == 'Stop':\n\t\t\t\t\tcontinue\n\t\t\t\tsuccessor = gameState.generateSuccessor(self.index, action)\n\t\t\t\tpos2 = successor.getAgentPosition(self.index)\n\t\t\t\tdist = self.getMazeDistance(gameState.getAgentPosition(self.index), pos2)\n\t\t\t\t# print \"Comparing {0} < {1}\".format(dist, bestDist)\n\t\t\t\tif dist < bestDist:\n\t\t\t\t\t# print \"Change action to \",action\n\t\t\t\t\tbestAction = action\n\t\t\t\t\tbestDist = dist\n\t\t\t# print \"Base search return with \", bestAction\n\t\t\treturn bestAction\n\t\t# print \"Base search return with \",bestAction\n\t\treturn bestAction", "title": "" }, { "docid": "2e9eb1c81491450c937f6743bbc7fd60", "score": "0.5800815", "text": "def judge(self):\r\n temp = -1\r\n best_one = None\r\n for individual in self.individuals:\r\n distance = self.get_disctance(individual)\r\n if temp == -1 or distance < temp:\r\n temp = distance\r\n best_one = individual\r\n\r\n self.best_ones.append(best_one)", "title": "" }, { "docid": "9f58e89ad63958879fc3bcc91bf5057c", "score": "0.5797411", "text": "def optimal_eps(self):\n Network = self.Network\n W = self.get_W()\n B = self._get_B(W)\n N = Network.size()\n eps = np.zeros([N,N])\n for j in range(N):\n eps += self._get_eps_j(j,W[j],B[j])\n return eps", "title": "" }, { "docid": "35d5185a0acaa718fea4eb3c5221e4da", "score": "0.5795139", "text": "def test_griewangk():\n\n print \"Testing Griewangk:\"\n print \"Expected: x=[0.]*10 and f=0\"\n from mystic.models import griewangk as costfunc\n ndim = 10\n lb = [-400.]*ndim\n ub = [400.]*ndim\n maxiter = 10000\n seed = 123 # Re-seed for each solver to have them all start at same x0\n \n # DifferentialEvolutionSolver\n print \"\\nUsing DifferentialEvolutionSolver:\"\n npop = 50\n random_seed(seed)\n from mystic.solvers import DifferentialEvolutionSolver\n from mystic.termination import ChangeOverGeneration as COG\n from mystic.termination import CandidateRelativeTolerance as CRT\n from mystic.termination import VTR\n from mystic.strategy import Rand1Bin, Best1Bin, Rand1Exp\n esow = Monitor()\n ssow = Monitor() \n solver = DifferentialEvolutionSolver(ndim, npop)\n solver.SetRandomInitialPoints(lb, ub)\n solver.SetStrictRanges(lb, ub)\n solver.SetEvaluationLimits(generations=maxiter)\n solver.SetEvaluationMonitor(esow)\n solver.SetGenerationMonitor(ssow)\n solver.enable_signal_handler()\n #term = COG(1e-10)\n #term = CRT()\n term = VTR(0.)\n time1 = time.time() # Is this an ok way of timing?\n solver.Solve(costfunc, term, strategy=Rand1Exp, \\\n CrossProbability=0.3, ScalingFactor=1.0)\n sol = solver.Solution()\n time_elapsed = time.time() - time1\n fx = solver.bestEnergy\n print \"Solution: \", sol\n print \"f value: \", fx\n print \"Iterations: \", solver.generations\n print \"Function evaluations: \", len(esow.x)\n print \"Time elapsed: \", time_elapsed, \" seconds\"\n assert almostEqual(fx, 0.0, tol=3e-3)\n\n # DifferentialEvolutionSolver2\n print \"\\nUsing DifferentialEvolutionSolver2:\"\n npop = 50\n random_seed(seed)\n from mystic.solvers import DifferentialEvolutionSolver2\n from mystic.termination import ChangeOverGeneration as COG\n from mystic.termination import CandidateRelativeTolerance as CRT\n from mystic.termination import VTR\n from mystic.strategy import Rand1Bin, Best1Bin, Rand1Exp\n esow = Monitor()\n ssow = Monitor() \n solver = DifferentialEvolutionSolver2(ndim, npop)\n solver.SetRandomInitialPoints(lb, ub)\n solver.SetStrictRanges(lb, ub)\n solver.SetEvaluationLimits(generations=maxiter)\n solver.SetEvaluationMonitor(esow)\n solver.SetGenerationMonitor(ssow)\n #term = COG(1e-10)\n #term = CRT()\n term = VTR(0.)\n time1 = time.time() # Is this an ok way of timing?\n solver.Solve(costfunc, term, strategy=Rand1Exp, \\\n CrossProbability=0.3, ScalingFactor=1.0)\n sol = solver.Solution()\n time_elapsed = time.time() - time1\n fx = solver.bestEnergy\n print \"Solution: \", sol\n print \"f value: \", fx\n print \"Iterations: \", solver.generations\n print \"Function evaluations: \", len(esow.x)\n print \"Time elapsed: \", time_elapsed, \" seconds\"\n assert almostEqual(fx, 0.0, tol=3e-3)", "title": "" }, { "docid": "cc6b76e94e2da8452d3f6eb07296ad32", "score": "0.57891214", "text": "def evolutionary_algorithm(self):\n # Creating the Individual Using DEAP\n creator.create(\"FitnessMax\", base.Fitness, weights=(1.0,))\n creator.create(\"Individual\", list, fitness=creator.FitnessMax)\n\n # Creating ToolBox For The DEAP Framework\n toolbox = base.Toolbox()\n toolbox.register(\"attr_bool\", random.randint, 0, 1)\n toolbox.register(\"individual\", tools.initRepeat, creator.Individual, toolbox.attr_bool, self.n_features)\n toolbox.register(\"population\", tools.initRepeat, list, toolbox.individual)\n toolbox.register(\"evaluate\", self.fitness_test)\n toolbox.register(\"mate\", tools.cxOnePoint)\n toolbox.register(\"mutate\", tools.mutFlipBit, indpb=0.05)\n toolbox.register(\"select\", tools.selTournament, tournsize=3)\n\n # Initialize Parameters\n pop = toolbox.population(n=self.n_pop)\n hof = tools.HallOfFame(self.n_pop * self.n_gen)\n stats = tools.Statistics(lambda ind: ind.fitness.values)\n stats.register(\"avg\", np.mean)\n stats.register(\"min\", np.min)\n stats.register(\"max\", np.max)\n\n # Genetic Algorithm\n pop, log = algorithms.eaSimple(pop, toolbox, cxpb=0.5, mutpb=0.2, ngen=self.n_gen, stats=stats, halloffame=hof,verbose=True)\n\n # Return Fall Of Home\n return hof", "title": "" }, { "docid": "763350b629661a962875e4097a558371", "score": "0.5786748", "text": "def best_model() :\n file_path = os.path.join(os.path.os.getcwd(), cf.PERF_FILE)\n with open(file_path, 'r') as perf_file:\n prec = perf_file.readlines()[0].split(';')\n\n precisions = [t for t in prec if len(t)>0]\n\n models = [t.split(':')[0] for t in precisions]\n models = sorted(models)\n precisions = np.array([float(t.split(':')[1]) for t in precisions])\n index_best_model = np.argmax(precisions)\n #best_model = models[index_best_model]\n\n output_files, model_dict = get_output_files()\n\n chosen_model = model_dict[int(index_best_model)+1]\n chosen_output_file = output_files[int(index_best_model)+1]\n\n print('Best model is ' + chosen_model + ' with precision ' + str(max(precisions)) + '%')\n\n return chosen_model, chosen_output_file", "title": "" }, { "docid": "152015f9a8d342528e38fde1197c42a4", "score": "0.57838523", "text": "def get_solution(self):\n if self.is_solution_available():\n pass\n raise errors.IllegalState()", "title": "" }, { "docid": "1af224aec567cf6f051e98f8188c816f", "score": "0.5780064", "text": "def run(self):\n if self.current_control:\n target_value = self.current_density\n else:\n target_value = self.average_cell_voltage * self.stack.n_cells\n if not isinstance(target_value, (list, tuple, np.ndarray)):\n target_value = [target_value]\n cell_voltages = []\n current_densities = []\n local_data_list = []\n global_data_list = []\n for i, tar_value in enumerate(target_value):\n current_errors = []\n temp_errors = []\n convergence_flags = []\n self.timing['simulation'] = 0.0\n self.timing['output'] = 0.0\n simulation_start_time = timeit.default_timer()\n counter = 0\n while True:\n if counter == 0:\n if self.current_control:\n self.stack.update(current_density=tar_value)\n else:\n self.stack.update(voltage=tar_value)\n else:\n self.stack.update()\n if self.stack.break_program:\n break\n current_error, temp_error = self.calc_convergence_criteria()\n current_errors.append(current_error)\n temp_errors.append(temp_error)\n if len(target_value) < 1:\n print(counter)\n counter += 1\n if ((current_error < self.it_crit and temp_error < self.it_crit)\n and counter > self.min_it) or counter > self.max_it:\n break\n if counter > self.max_it:\n convergence_flag = False\n else:\n convergence_flag = True\n convergence_flags.append(convergence_flag)\n simulation_stop_time = timeit.default_timer()\n simulation_time = simulation_stop_time - simulation_start_time\n self.timing['simulation'] += simulation_time\n output_start_time = timeit.default_timer()\n\n if not self.stack.break_program:\n # voltage_loss = self.get_voltage_losses(self.stack)\n cell_voltages.append(np.average([cell.v for cell in\n self.stack.cells]))\n current_densities.append(self.stack.i_cd_avg)\n\n case_name = 'Case'+str(i)\n self.output.save(case_name, self.stack)\n local_data_dict = self.output.get_data(self.stack)\n local_data_dict['Iterations'] = \\\n {'value': [list(range(counter))], 'units': '-'}\n local_data_dict['Current Density Error'] = \\\n {'value': [current_errors], 'units': '-',\n 'xkey': 'Iterations'}\n local_data_dict['Temperature Error'] = \\\n {'value': [current_errors], 'units': '-',\n 'xkey': 'Iterations'}\n\n local_data_list.append(local_data_dict)\n if self.output.save_plot:\n path = os.path.join(self.output.output_dir, case_name,\n 'plots', 'Convergence.png')\n self.output.create_figure(path, list(range(counter)),\n [current_errors, temp_errors],\n xlabels='Iteration',\n ylabels='Error',\n yscale='log',\n legend=['Current Density',\n 'Temperature'])\n else:\n target_value = target_value[0:-i]\n break\n\n average_current_density = \\\n np.average([np.average(cell.i_cd, weights=cell.active_area_dx)\n for cell in self.stack.cells])\n if self.stack.coolant_circuit is None:\n cool_mass_flow = 0.0\n else:\n cool_mass_flow = self.stack.coolant_circuit.mass_flow_in\n global_data = \\\n {\n 'Convergence':\n {'value': convergence_flag, 'units': ' '},\n 'Stack Voltage': {'value': self.stack.v_stack, 'units': 'V'},\n 'Average Cell Voltage':\n {'value': self.stack.v_stack / self.stack.n_cells,\n 'units': 'V'},\n 'Minimum Cell Voltage':\n {'value': np.min(self.stack.v), 'units': 'V'},\n 'Maximum Cell Voltage':\n {'value': np.max(self.stack.v), 'units': 'V'},\n 'Average Current Density':\n {'value': average_current_density, 'units': 'A/m²'},\n 'Stack Power Density':\n {'value': self.stack.v_stack * average_current_density,\n 'units': 'W/m²'},\n 'Stack Power':\n {'value': self.stack.v_stack * average_current_density\n * self.stack.cells[0].active_area,\n 'units': 'W'},\n 'Cooling Mass Flow Rate:':\n {'value': cool_mass_flow,\n 'units': 'kg/s', 'format': '.4E'},\n 'Cathode Mass Flow Rate:':\n {'value': self.stack.fuel_circuits[0].mass_flow_in,\n 'units': 'kg/s', 'format': '.4E'},\n 'Anode Mass Flow Rate:':\n {'value': self.stack.fuel_circuits[1].mass_flow_in,\n 'units': 'kg/s', 'format': '.4E'},\n }\n global_data_list.append(global_data)\n output_stop_time = timeit.default_timer()\n self.timing['output'] += output_stop_time - output_start_time\n self.output.print_global_data(self, global_data)\n self.output.save_global_results(global_data)\n\n output_start_time = timeit.default_timer()\n\n # print polarization curve, if more than one current_density value\n # was provided\n if len(target_value) > 1:\n self.output.write_data(current_densities, cell_voltages,\n 'Current Density [A/m²]', 'Cell Voltage',\n units='V', directory=self.output.output_dir,\n # save_csv=True, save_plot=True,\n write_mode='w')\n\n output_stop_time = timeit.default_timer()\n self.timing['output'] += output_stop_time - output_start_time\n\n return global_data_list, local_data_list", "title": "" }, { "docid": "182e8144cc9cf7df0a32def1d95bd1d8", "score": "0.5779991", "text": "def _eval_energy(self):\n S = self.S\n M = S.mera\n SIlayer = self.SIlayer # = 2\n SIlayer_p1 = SIlayer + 1\n\n for i in range(self.num_of_SIlayer):\n #这里是反复地从 SIlayer ascending 到 SIlayer_p1, WITH tau changing\n if i>0:\n for o in self.ham_op_names:\n S.__getattribute__(o)[SIlayer][0].data[:] = S.__getattribute__(o)[SIlayer_p1][0].data[:]\n self.ascending_ham(M, S, SIlayer, tau=SIlayer + i)\n #above only update H, no need update rho, as the later is scale-invar not changed in each layer\n \n if 1:\n S.rho_2[SIlayer_p1][0].data[:] = S.rho_2[SIlayer][0].data[:]\n if not S.only_NN:\n S.rho_3[SIlayer_p1][0].data[:] = S.rho_3[SIlayer][0].data[:]\n #in the end eval eng at the layer above the first SIlayer\n S.eng_ham(SIlayer_p1)", "title": "" }, { "docid": "682efc983522090afcf58edc4c64d068", "score": "0.5779628", "text": "def optimize(self):\n self.open_output_files()\n while (self.n_iter < self.n_maxiter and not self.is_converged):\n self.n_iter += 1\n self.choose_step_direction(self.opt_type)\n self.line_search(-1.0 * self.step_dir)\n self.update_energy()\n self.update_gradient()\n self.traj.append_step(self.mol)\n self.update_criteria()\n self.check_convergence()\n self.print_status()\n self.close_output_files()", "title": "" }, { "docid": "3ec4666d8a6ab245f0cab4066f997667", "score": "0.57755536", "text": "def optimize_ga(self):\n\n converged = False\n ngens = 1\n generation = 1\n difference = self.tol * 10000.0\n self.solution_history = np.zeros(self.max_generation + 1)\n self.solution_history[0] = np.min(self.parent_fitness)\n\n run_time = 0.0\n start_time = time.time()\n while converged is False and ngens < self.max_generation and \\\n run_time < self.max_time:\n self.crossover()\n self.mutate()\n # determine fitness of offspring\n for i in range(self.population_size):\n self.chromosome_2_variables(self.offspring_population[i])\n self.offspring_fitness[i] = \\\n self.objective_function(self.design_variables)\n\n # rank the total population from best to worst\n total_fitness = np.append(self.parent_fitness,\n self.offspring_fitness)\n ranked_fitness = \\\n np.argsort(total_fitness)[0:int(self.population_size)]\n\n total_population = \\\n np.vstack([self.parent_population, self.offspring_population])\n self.parent_population[:, :] = total_population[ranked_fitness, :]\n self.parent_fitness[:] = total_fitness[ranked_fitness]\n\n # store solution history and wrap up generation\n self.solution_history[generation] = np.min(self.parent_fitness)\n\n if generation > self.convergence_iters:\n difference = \\\n self.solution_history[generation - self.convergence_iters]\\\n - self.solution_history[generation]\n else:\n difference = 1000\n if abs(difference) <= self.tol:\n converged = True\n\n # shuffle up the order of the population\n shuffle_order = np.arange(1, self.population_size)\n np.random.shuffle(shuffle_order)\n shuffle_order = np.append([0], shuffle_order)\n self.parent_population = self.parent_population[shuffle_order]\n self.parent_fitness = self.parent_fitness[shuffle_order]\n\n generation += 1\n ngens += 1\n\n run_time = time.time() - start_time\n\n # Assign final outputs\n self.solution_history = self.solution_history[0:ngens]\n self.optimized_function_value = np.min(self.parent_fitness)\n self.chromosome_2_variables(\n self.parent_population[np.argmin(self.parent_fitness)])\n self.optimized_design_variables = self.design_variables\n\n logger.debug('The GA ran for this many generations: {}'\n .format(ngens))\n logger.debug('The GA ran for this many seconds: {:.3f}'\n .format(run_time))\n logger.debug('The optimized function value was: {:.3e}'\n .format(self.optimized_function_value))\n logger.debug('The optimal design variables were: {}'\n .format(self.optimized_design_variables))", "title": "" }, { "docid": "22f62b19d875ff1dfa2e3d4bfd9a5172", "score": "0.57655716", "text": "def return_fitness(self, e):\r\n\t\treturn e.fitness", "title": "" }, { "docid": "83fe6af1079e34a1c9740f124d26c109", "score": "0.576445", "text": "def solve():\n return round(7 * (1 - choose(60, 20) / choose(70, 20)), 10)", "title": "" }, { "docid": "595ecfd9facfe91a02000cddd7224112", "score": "0.57595515", "text": "def fittest(self):\n return self.best_fit, self.best_fit_fitness", "title": "" }, { "docid": "b1c1e9395f4e30e4b41f6aa74720d615", "score": "0.57595515", "text": "def test_best_improvement():\n process_list = [1, 2, 4, 1, 3]\n expected_solution = [3, 1, 4, 2, 1]\n current_neighborhood = 0\n\n best_solution = best_improvement(process_list, current_neighborhood, 60)\n assert best_solution == expected_solution", "title": "" }, { "docid": "1774df4c382a9a263ca22e025ab62161", "score": "0.5756853", "text": "def save_result(self, output_dir):\n self.best_solver.save_tree(output_dir)\n self.best_solver.save_best_solution(output_dir)\n for i in range(self.best_solver.tube_num):\n self.best_solver.visualize_full_search(output_dir, tube_num=i, with_solution=True)\n\n filename = output_dir / \"optimize_result.txt\"\n # noinspection PyTypeChecker\n optimize_result = open(filename, \"w\")\n\n optimize_result.write(f\"Optimizer Success: {self.success}\\n\")\n optimize_result.write(f\"Goal Reached: {self.best_solver.found_solution}\\n\")\n optimize_result.write(f\"Completion Time: {self.completion_time}\\n\")\n optimize_result.write(f\"Number of Iterations: {self.num_iterations}\\n\")\n optimize_result.write(f\"Number of Function Evals: {self.num_function_eval}\\n\")\n q = \", \".join(str(i) for i in self.best_q)\n optimize_result.write(f\"Optimized Q: {q}\\n\")\n\n optimize_result.close()\n\n filename = output_dir / \"optimize_process.txt\"\n # noinspection PyTypeChecker\n optimize_process = open(filename, \"w\")\n\n for i in range(len(self.optimize_process)):\n q = \", \".join(str(j) for j in self.optimize_process[i].get('q'))\n cost = self.optimize_process[i].get('cost')\n optimize_process.write(f\"{q} | {cost}\\n\")\n\n optimize_process.close()", "title": "" }, { "docid": "8d155e5d49b180fe2d888a7e23955477", "score": "0.5756708", "text": "def betterEvaluationFunction(currentGameState):\n \"*** YOUR CODE HERE ***\"\n foods = currentGameState.getFood()\n newGhostStates=currentGameState.getGhostStates()\n newPos = currentGameState.getPacmanPosition()\n print 'New position:', newPos\n newScaredTimes = [ghostState.scaredTimer for ghostState in newGhostStates]\n\n\n\n optimal_food = []\n optimal_ghost = []\n food = currentGameState.getFood().asList()\n\n for f in food:\n optimal_food.append(manhattanDistance(newPos, f))\n if len(optimal_food) == 0:\n return 0\n a = min(optimal_food)\n\n if a == 0:\n a = currentGameState.getScore()\n cap=[]\n b = max(optimal_food)\n f=currentGameState.getNumFood()\n\n # for g in currentGameState.getGhostPositions():\n # optimal_ghost.append(manhattanDistance(newPos, g))\n # if manhattanDistance(newPos, g) == 0:\n # return -100000.0\n # elif manhattanDistance(newPos, g) > 0.0 and manhattanDistance(newPos, g) <= 1.0:\n # return -100000.0/6\n\n ghost_PM_dist = []\n ghost_dist_nearest = 0\n for ghost in newGhostStates:\n Ghost_Pos = ghost.getPosition()\n GP_dist = manhattanDistance(Ghost_Pos, newPos)\n ghost_PM_dist.append(GP_dist)\n\n if len(ghost_PM_dist) > 0:\n ghost_dist_nearest = min(ghost_PM_dist)\n else:\n ghost_dist_nearest = currentGameState.getScore()\n # print ghost_dist_nearest\n ghost_feature = 1 / (ghost_dist_nearest + 1)\n score = 0\n # c=currentGameState.getCapsules()\n # print 'cap',c\n # for i in c:\n # cap.append(manhattanDistance(newPos,i))\n # if len(optimal_ghost) > 0:\n # ghost_dist_nearest = min(optimal_ghost)\n # else:\n # ghost_dist_nearest = currentGameState.getScore()\n # # print ghost_dist_nearest\n # ghost_feature = 1 / (ghost_dist_nearest + 1)\n # score = 0\n # score=0\n # if currentGameState.isWin():\n # score = 1000000\n return currentGameState.getScore() + 1.0/(a) +100*score+ 2*(ghost_feature)\n\n util.raiseNotDefined()", "title": "" }, { "docid": "14c0ae96cd69e709a3affb6aa640c97d", "score": "0.5754821", "text": "def best_result(self):\n if self.result is not None:\n return self.result[0]\n return None", "title": "" }, { "docid": "7c226930b9f22f2d5ed885f9483f4c19", "score": "0.57431376", "text": "def evolve(self):\n #Configure properly each island\n self.__parametrize__()\n\n # Get evolution generator of each island\n self._rc[:].execute(\"gen = gp_engine.sequentially_evolve()\", block=True)\n\n # Loop over all the sessions\n i = 0\n while True:\n logging.info('Launch generation evolution')\n\n #Do generation computing\n self._rc[:].execute(\"chosen = gen.next()\")\n\n #Print best individual\n logging.info('Get best individuals')\n self._rc[:].execute(\"best = gp_engine.get_best_individual()\")\n bests = self._rc[:]['best']\n\n print \"Generation %d\" % i\n print \"=\"*15\n print \"Island\\t| fit\\t\"\n print \"\\n\".join(\\\n [ \"%d\\t%f\" % (j, bests[j][1]) \\\n for j in range(len(bests))])\n\n print \"AVG\\t%f\" %np.mean( [bests[j][1] for j in range(len(bests))])\n i = i + 1\n\n logging.info('Test if end of generation')\n #Test if the process is finished\n self._rc[:].execute('end=gp_engine.__evolver__.is_evolution_ended()')\n\n if np.any( self._rc[:]['end']):\n break\n\n #Operate the migration\n logging.info('Launch migration process')\n self.__migration_operator__.manage_migration()\n\n #loop again\n\n print 'Evolution terminated'\n return bests", "title": "" }, { "docid": "d73f2751a1cb356fcb986044d948ed77", "score": "0.5730213", "text": "def is_better_then_global_best(self, solution):\n return solution.personal_best.fitness_value < self.best_solution.fitness_value", "title": "" }, { "docid": "6b31563d2c606a0c2b5ebe046d544277", "score": "0.5728114", "text": "def random_search():\t\t\n\tsim = simulator()\t\t\t\t\t\t\t# simulator object, to be used during this algorithm\n\tnum_search = 20\t\t\t\t\t\t\t\t# number of robots evaluated\n\tnum_tetr_added = 14\t\t\t\t\t\t\t# this is the size of our robots ( 15 tetrahedron total )\n\n\trs_fit = np.zeros(num_search)\t\t\t\t# learning curve array\n\tbest_fit = 0\t\t\t\t\t\t\t\t# best fitness place holder\n\tbest_array = np.empty(8)\t\t\t\t\t# top performing genome place holder\n\t\n\tfor i in range(num_search):\t\t\t\t\t# do the random search\t\n\t\tprint('counter: ', i)\n\n\t\trobot = tetrahedron()\t\t\t\t\t\t\t\t\t\t\t\t\t# make a random robot\n\t\trobot.set_tetr( np.array((0 , 0 , 0)) , np.array((0 , 0 , 0)))\t\t\t# make the seed\n\t\trobot.set_genome(num_tetr_added)\t\t\t\t\t\t\t\t\t\t# generate a random genome\n\t\trobot.build_robot()\t\t\t\t\t\t\t\t\t\t\t\t\t\t# builds the robot \n\t\t\n\t\tmass_list = robot.mass_list\t\t\t\t# center the robot\n\t\tcom = robot.get_com(mass_list)\t\t\t# \t\t....\n\t\trobot.center_object(com)\t\t\t\t# \t\t....\n\n\t\tfitness = calc_fitness( robot , sim)\t# evaluate the robot's fitness\n\n\t\tif fitness > best_fit:\t\t\t\t\t# if the fitness is better than current best\n\t\t\tbest_fit = fitness \t\t\t\t\t# update placeholders\n\t\t\tbest_array = np.copy(robot.genome)\t# \t\t....\n\n\t\trs_fit[i] = best_fit\t\t\t\t\t# add the best fit to the learning curve\n\n\tnp.savetxt(\"rs_robot.csv\", best_array, delimiter=\",\")\n\tnp.savetxt(\"rs_learning.csv\", rs_fit, delimiter=\",\")", "title": "" } ]
a232b5ed960453ab1d2673894b52333d
submit a dos calculation and interpolate result if returns complete
[ { "docid": "5e0224e23e34529b3523d980d025abb2", "score": "0.0", "text": "def get_dos(self):\n\n label = 'KKR DOS calc.'\n dosdict = self.ctx.dos_params_dict\n description = 'dos calc: emin= {}, emax= {}, nepts= {}, tempr={}, kmesh={}'.format(dosdict['emin'], dosdict['emax'], dosdict['nepts'], dosdict['tempr'], dosdict['kmesh'])\n code = self.inputs.kkr\n remote = self.inputs.remote_data\n params = self.ctx.dos_kkrparams\n options = {\"max_wallclock_seconds\": self.ctx.max_wallclock_seconds,\n \"resources\": self.ctx.resources,\n \"queue_name\" : self.ctx.queue}#,\n if self.ctx.custom_scheduler_commands:\n options[\"custom_scheduler_commands\"] = self.ctx.custom_scheduler_commands\n inputs = get_inputs_kkr(code, remote, options, label, description, parameters=params, serial=(not self.ctx.use_mpi))\n\n # run the DOS calculation\n self.report('INFO: doing calculation')\n dosrun = self.submit(KkrCalculation, **inputs)\n\n # for restart workflow:\n self.ctx.last_calc = dosrun\n\n return ToContext(dosrun=dosrun)", "title": "" } ]
[ { "docid": "215366153e758085c8bc79a022ff8b6c", "score": "0.6252689", "text": "def calculate(self):\n item_number = self.fields.curselection()\n polynomial = self.json_data[\"functions\"][int(item_number[0])]\n result_floating = None\n result_interval = None\n if self.json_data[\"method\"] == \"regula_falsi\":\n try:\n result_floating = regula_falsi(polynomial[\"a\"],\n polynomial[\"b\"],\n function_from_list,\n polynomial[\"coeff\"]\n )\n except MyError as error:\n result_floating = error.value\n except ZeroDivisionError:\n result_floating = \"Unfortunately, division by 0 happened\"\n try:\n result_interval = regula_falsi_interval(polynomial[\"a\"],\n polynomial[\"b\"],\n function_from_list,\n polynomial[\"coeff\"]\n )\n result_interval = result_interval.format('%.20E')[9:]\n result_interval = result_interval[:-1]\n except MyError as error:\n result_interval = error.value\n elif self.json_data[\"method\"] == \"newton\":\n try:\n result_floating = newton(polynomial[\"x\"],\n polynomial[\"epsilon\"],\n polynomial[\"max_iterations\"],\n function_from_list,\n polynomial[\"coeff\"],\n polynomial[\"derivative_coeff\"]\n )\n except ZeroDivisionError:\n result_floating = \"Unfortunately, division by zero happened\"\n except MyError as error:\n result_floating = error.value\n try:\n result_interval = newton_interval(polynomial[\"x\"],\n polynomial[\"epsilon\"],\n polynomial[\"max_iterations\"],\n function_from_list,\n polynomial[\"coeff\"],\n polynomial[\"derivative_coeff\"]\n )\n result_interval = result_interval.format('%.20E')[9:]\n result_interval = result_interval[:-1]\n except MyError as error:\n result_interval = error.value\n else:\n result_interval = \"You propably\"\n result_floating = \"gave me wrong JSON\"\n self.show_result_floating.config(text=result_interval)\n self.show_result_interval.config(text=result_floating)", "title": "" }, { "docid": "e0830761115955abace478bf63277abd", "score": "0.6215401", "text": "def calculate_results():\n\n print('Calculating results..')", "title": "" }, { "docid": "d07116c7437a3ffc65ec7ced8caca88f", "score": "0.6053697", "text": "def _calculate(self):\n ...", "title": "" }, { "docid": "977d47a74d67da7d19a1b22a8e36d483", "score": "0.59952414", "text": "def do_calculation():\n dT = config.getfloat(\"thermodynamics\", \"Tmin\")\n try:\n nmax = config.getint(\"thermodynamics\", \"nmax\")\n except ValueError:\n nmax = config.get(\"thermodynamics\", \"nmax\")\n if not nmax == \"automatic\":\n print(\"nmax has to be given as integer or \\\"automatic\\\"\") \n try:\n analytic_n0 = config.get(\"geometry\", \"analytic_n0\")\n except ValueError:\n print(\"analytic_n0 is not given correctly: \", analytic_n0)\n analytic_n0 = True\n if type(geom.r1) == list or type(geom.r2) == list:\n analytic_n0 = False\n Tmax = config.getfloat(\"thermodynamics\", \"Tmax\")\n f = thermodynamics.finiteT(dT, nmax, Tmax, geom, analytic_n0)\n quantity = config.get(\"thermodynamics\", \"quantity\")\n result = eval(\"f.%s\" %quantity)\n return result, f", "title": "" }, { "docid": "f05dd3eeb2320b01c30e115c6928fb5f", "score": "0.5965643", "text": "def error_calculation():", "title": "" }, { "docid": "69c8ad689e17b9cd1efc84e5ba7563bd", "score": "0.59115", "text": "def calc(self, *args):\n pass", "title": "" }, { "docid": "387f4fedb43cb0287db06ecf37b13dc3", "score": "0.5901406", "text": "def calc():\r\n nonlocal func\r\n func = e.get()\r\n func = func.replace(\" \", \"\")\r\n func = toReversePolish(check(fixmult(findNeg(tokenizer(func)))))\r\n result.set(calculate(func, float(e2.get())))", "title": "" }, { "docid": "b3ba20e0c2fecb5806fb08b0ca839ef9", "score": "0.58904105", "text": "def calculate(**kwargs):\n return None", "title": "" }, { "docid": "29a6f09fde14e1f16e33b66e2b65226c", "score": "0.58634573", "text": "def _postCalculate(self,retList):\n\t\tpass", "title": "" }, { "docid": "3eccf3447683a2dc735f5799fbff98c3", "score": "0.58459675", "text": "def calculate(self, *args, **kwargs) -> Any:", "title": "" }, { "docid": "f37e62a56b03972b4b59b80389e4d31d", "score": "0.5809612", "text": "def compute(self):\n\n self.setInitialValue()\n self.partialResult = self.initialValue\n self.elementResult = None\n\n self.updateFunctionPort()\n\n self.set_output('Result', self.partialResult)", "title": "" }, { "docid": "81d0a1c04bf6c68b3a921b158c6c03fb", "score": "0.57289326", "text": "def OnCalcDone(self, event):\n if event.data is None:\n self.log(\"No result from calculation!\")\n self.PSF_HDUlist = None\n else:\n self.PSF_HDUlist = event.data\n for w in ['Display PSF', 'Display profiles', 'Save PSF As...']:\n self.widgets[w].Enable(True)\n\n self._refresh_window()\n self.log(\"PSF calculation complete\")\n # In either event, the worker is done\n self.calcthread = None", "title": "" }, { "docid": "3ae7ea9f7af562970e90b832e81e037a", "score": "0.5720635", "text": "def calculate(self):\n pass", "title": "" }, { "docid": "fd18a5ecb864880f5b28ca9d88e14303", "score": "0.56965977", "text": "def on_evaluate(self, interp):\n return 0.000000", "title": "" }, { "docid": "b8afeea6242d6a4492b1855f0c649af5", "score": "0.5681769", "text": "def Calculate():\r\n values = GetValues()\r\n values = SolveEqn(values)\r\n DisplayValues(values)", "title": "" }, { "docid": "b38fe131032ab41802c0adfc8618129c", "score": "0.5660559", "text": "def _run_calculation(self):\n # collect input\n input_vs = OrderedDict()\n for intf in self.input_intfs:\n if intf.sibling_intf_allowed:\n input_vs.setdefault(intf.family_name, []).append(intf.get_calculated_value())\n else:\n input_vs[intf] = intf.get_calculated_value()\n\n # run actual function defined by user\n try:\n results = self.calculate(*input_vs.values())\n except Exception as e:\n # set all outputs NULL\n results = [NullValue(f\"calculation fail of {self}\")] * len(self.output_intfs)\n # record and print error status\n self._calculation_status = e\n self.print_status()\n raise\n else:\n # wrap singular result and match length\n results = [results] if not isinstance(results, (list, tuple)) else list(results)\n results += [NullValue(\"calculation didn't return enough values\")] * (len(self.output_intfs) - len(results))\n self._calculation_status = ''\n finally:\n # push calculation result down to outputs\n for result, intf in zip(results, self.output_intfs):\n intf.set_provoke_value(result)", "title": "" }, { "docid": "e3918df179bdebeaa8326fa3d94277d7", "score": "0.5617621", "text": "def compute_value(self):\n result = 0.00000000\n if self.value1 and self.tasa:\n result = (self.value1 * 1) / self.tasa \n self.result = result\n if not self.flag1:\n self.id_record = self.env.context.get('active_id')\n self.act_model = self.env.context.get('active_model')\n self.flag1 = True\n return {\n 'view_mode': 'form',\n 'res_model': 'compute.crypto.value',\n 'res_id': self.id,\n 'view_id': False,\n 'type': 'ir.actions.act_window',\n 'target': 'new',\n 'context': {\"default_act_model\":self.act_model, \"default_result\": result, \"default_id_record\": self.id_record,},\n }\n else:\n raise UserError(\"You need set all values first.\")", "title": "" }, { "docid": "e43cd1561f63a67897cd240b00229f59", "score": "0.55982625", "text": "def process_final(self, *args):\n return self._sum / self._count", "title": "" }, { "docid": "aad11d6d48fea29f5ab6cfd496c2674f", "score": "0.5591634", "text": "def doCalculation()->JSON_RESPONSE: \n\n # Get JSON data from client\n first:int = int(request.json['first'])\n second:int = int(request.json['second'])\n oper:str = request.json['operation']\n\n rst:int = performCalc(first, second, oper)\n\n # Prepare the return structure\n result:dict[str,int] = {\"result\":rst}\n\n # Send JSON back to the client\n return json.dumps(result)", "title": "" }, { "docid": "37453e9332d7d6b5fbee48eb2fe7abf2", "score": "0.55907995", "text": "def handle_calculate(self, value):\n #try:\n\n result = self.valid_number_check() * 1.6093\n self.root.ids.output_label.text = str(result)\n print(result)\n #except: ValueError", "title": "" }, { "docid": "11725e0d4aa23fdfc0173c1231d04274", "score": "0.5529947", "text": "def _compute(self):", "title": "" }, { "docid": "85e680e952960f28dbfe0efb0c4bc700", "score": "0.5507777", "text": "def calc(self):\n raise Exception(\"Workflow calculation not implemented yet\")", "title": "" }, { "docid": "43ddd32b2c49f7c13ef977e81a00da9d", "score": "0.54745024", "text": "def calc_results():\n global num_solutions\n subt = 100*get_subt(0) + 10*get_subt(1) + get_subt(2)\n diff = 100*get_diff(0) + 10*get_diff(1) + get_diff(2)\n minu = 1000*get_minu(0) + 100*get_minu(1) + 10*get_minu(2) + get_minu(3)\n if (subt + diff == minu) and (minu >= 1000):\n print minu, \"-\", subt, \"==\", diff\n num_solutions += 1", "title": "" }, { "docid": "4fb90820e644680bc6d5e502aead6fde", "score": "0.5453361", "text": "def calculate(self, structure, **kwargs):", "title": "" }, { "docid": "70fffa82abf3ab29e63d6e09fafe4f39", "score": "0.54355496", "text": "def _postCalculate(self,retList,time):\n\t\tif len(retList) > 0: \n\t\t\tret = pandas.concat(retList,ignore_index=True)\n\t\t\tret = self.calculateThresholdPolygon(ret,time)\n\t\t\tret['percentEffected'] = 1.\n\t\telse: \n\t\t\tret = None\n\t\treturn ret", "title": "" }, { "docid": "a94e16cfebaa89484f51c5df781ba26c", "score": "0.5427222", "text": "def __check_calc(self):\n # nothing done\n return", "title": "" }, { "docid": "166673f4de6c078d348a3759ee698ae2", "score": "0.54081076", "text": "def calculate(self, var_list):\n # Run dummy calculation.\n index = var_list[1].split(' ')[0]\n submit = call('cp files/pw-scf_{}.out pw-scf.out'.format(index),\n shell=True)\n if submit != 0:\n raise AssertionError('Could not submit job.')\n energy = self.scrape_output(self.output_file)\n\n print('iteration {0} energy {1}'.format(index, energy))\n\n return energy", "title": "" }, { "docid": "b8ed09a292242295d9bd0a9dc162cc28", "score": "0.54075795", "text": "def calificacion_final(par1, par2, par3):\n calfinal = par1 + par2 + par3\n promedio = calfinal / 3\n return promedio", "title": "" }, { "docid": "d8124e8fa74cc5964d34246b27cb46bf", "score": "0.5406774", "text": "def run_interp():\n func = str(input('Enter string or trigonometric function: '))\n print('Value l')\n l_a = float_input(positive=True)\n print('Value u')\n u_b = float_input(positive=True)\n print('Value n')\n n_c = integer_input()\n print('Your intermediate results: {}'.format(interp(func, l_a, u_b, n_c)))", "title": "" }, { "docid": "36a407754c15e9a2256c75e9d606681f", "score": "0.53978", "text": "def __calc(self):\n timezone = self.timezone # in hours, east is positive\n longitude= self.long # in decimal degrees, east is positive\n latitude = self.lat # in decimal degrees, north is positive\n\n time = self.time # percentage past midnight, i.e. noon is 0.5\n day = self.day # daynumber 1=1/1/1900\n \n Jday =day+2415018.5+time-timezone/24 # Julian day\n Jcent =(Jday-2451545)/36525 # Julian century\n\n Manom = 357.52911+Jcent*(35999.05029-0.0001537*Jcent)\n Mlong = 280.46646+Jcent*(36000.76983+Jcent*0.0003032)%360\n Eccent = 0.016708634-Jcent*(0.000042037+0.0001537*Jcent)\n Mobliq = 23+(26+((21.448-Jcent*(46.815+Jcent*(0.00059-Jcent*0.001813))))/60)/60\n obliq = Mobliq+0.00256*cos(rad(125.04-1934.136*Jcent))\n vary = tan(rad(obliq/2))*tan(rad(obliq/2))\n Seqcent = sin(rad(Manom))*(1.914602-Jcent*(0.004817+0.000014*Jcent))+sin(rad(2*Manom))*(0.019993-0.000101*Jcent)+sin(rad(3*Manom))*0.000289\n Struelong= Mlong+Seqcent\n Sapplong = Struelong-0.00569-0.00478*sin(rad(125.04-1934.136*Jcent))\n declination = deg(asin(sin(rad(obliq))*sin(rad(Sapplong))))\n \n eqtime = 4*deg(vary*sin(2*rad(Mlong))-2*Eccent*sin(rad(Manom))+4*Eccent*vary*sin(rad(Manom))*cos(2*rad(Mlong))-0.5*vary*vary*sin(4*rad(Mlong))-1.25*Eccent*Eccent*sin(2*rad(Manom)))\n\n hourangle= deg(acos(cos(rad(90.833))/(cos(rad(latitude))*cos(rad(declination)))-tan(rad(latitude))*tan(rad(declination))))\n\n self.solarnoon_t=(720-4*longitude-eqtime+timezone*60)/1440\n self.sunrise_t =self.solarnoon_t-hourangle*4/1440\n self.sunset_t =self.solarnoon_t+hourangle*4/1440", "title": "" }, { "docid": "9393ef12dcb8b1ff273f5733e0539ddb", "score": "0.53911626", "text": "def compute(self) -> float:\n raise NotImplementedError", "title": "" }, { "docid": "b254264ee7ae0c135148c1ce91717f2d", "score": "0.5388854", "text": "def Exemples_calculs():\n while True :\n (xA,yA)=input_coord(\"A\")\n while True :\n (xB,yB)=input_coord(\"B\")\n if xB!=xA or yB!=yA :\n break\n else :\n display(\"Il faut deux points distincts pour faire une droite ! Recommencez !\")\n\n intro=Latex(\"\"\"Nous allons calculer le coefficient directeur de la droite (AB) avec\n A( {} ; {} ) et B( {} ; {} )\"\"\".format(xA,yA,xB,yB))\n if xA==xB :\n\n calcul=Latex(\"\"\"Les droites parallèles à l'axe des ordonnées n'ont pas de coefficient directeur !\n Il faut donc que vos points n'aient pas la même abscisse :\"\"\")\n else :\n frac=fractions.Fraction((yB-yA),(xB-xA))\n if frac==int(frac):\n frac=str(int(frac))\n else :\n frac=\"\\dfrac{{{numerator}}}{{{denominator}}}\".format(numerator=frac.numerator,denominator=frac.denominator)\n calcul=Math(\"m= \\dfrac{{y_B-y_A}}{{x_B-x_A}}= \\dfrac{{{yB}-{yA}}}{{{xB}-{xA}}}= \\dfrac{{{yByA}}}{{{xBxA}}}={frac}\"\n .format(xA=xA,yA=yA,xB=xB,yB=yB,yByA=yB-yA,xBxA=xB-xA,frac=frac))\n Trace_droite(xA,yA,xB,yB)\n display(intro,calcul)\n s=input(\"Voulez-vous recommencer ?(o/n)\")\n if s.lower() in ['n','non','no'] :\n break\n else :\n clear_output()", "title": "" }, { "docid": "5dd7780b29a79b6fd8d3b696267d256c", "score": "0.53845376", "text": "def processAlgorithm(self, parameters, context, feedback):\n\n # Load present raster\n present_raster = self.parameterAsRasterLayer(parameters, self.INPUT1, context)\n ds_present = gdal.Open(present_raster.dataProvider().dataSourceUri())\n arr_present = ds_present.GetRasterBand(1).ReadAsArray()\n # Clean negative values\n arr_present[arr_present<0] = 0\n\n # Load future raster\n future_raster = self.parameterAsRasterLayer(parameters, self.INPUT2, context)\n ds_future = gdal.Open(future_raster.dataProvider().dataSourceUri())\n arr_future = ds_future.GetRasterBand(1).ReadAsArray()\n # Clean negative values\n arr_future[arr_future<0] = 0\n\n print(arr_future)\n print(np.unique(arr_future))\n # Parameters\n V = self.parameterAsDouble(parameters, self.INPUT5, context)\n\n r = 0\n\n c = 3\n\n area_pixel = self.parameterAsInt(parameters, self.PIXEL_RES, context) * self.parameterAsInt(\n parameters, self.PIXEL_RES, context)\n # Years\n present = self.parameterAsInt(parameters, self.INPUT3, context)\n future = self.parameterAsInt(parameters, self.INPUT4, context)\n\n # Calculate coeff sequestration\n arr_diff = arr_future - arr_present\n arr_diff_norm = arr_diff / float((future - present))\n arr_years = np.array(range(0, future - present))\n coeff = sum(1 / ((1 + r / 100) ** arr_years * ((1 + c / 100) ** arr_years)))\n carbon_sequestration_value = V * arr_diff_norm * coeff\n carbon_sequestration_difference = arr_future - arr_present\n # Initialize and write on output raster\n path_output = self.parameterAsString(parameters, self.OUTPUT, context)\n file_output = path_output + '/SE_01_carbon_sequestration_delta_euro.tiff'\n driver = gdal.GetDriverByName(\"GTiff\")\n [cols, rows] = carbon_sequestration_value.shape\n carbon_sequestration_difference_area = np.sum(carbon_sequestration_difference) / (cols * rows * area_pixel)\n outdata = driver.Create(file_output, rows, cols, 1, gdal.GDT_Float64)\n outdata.SetGeoTransform(ds_present.GetGeoTransform())##sets same geotransform as input\n outdata.SetProjection(ds_present.GetProjection())##sets same projection as input\n outdata.GetRasterBand(1).WriteArray(carbon_sequestration_value)\n print(np.max(outdata.GetRasterBand(1).ReadAsArray()))\n outdata.FlushCache() ##saves to disk!!\n outdata = None\n band = None\n ds = None\n print(np.sum(arr_present))\n report_output = path_output + '/SE_sequestro_carbonio.txt'\n f = open(report_output, \"w+\")\n today = datetime.today().strftime('%Y-%m-%d-%H:%M:%S')\n f.write(\"Sommario dell'analisi del sequestro di carbonio\\n\")\n f.write(\"Data: \" + today +\"\\n\\n\\n\")\n f.write(\"Analisi stato di fatto\\n\\n\")\n f.write(\"Anno corrente: %i \\n\" % (present))\n f.write(\"Sequestro carbonio Stato attuale (ton Corg): %f \\n\" % (np.sum(arr_present)))\n f.write(\"Valore medio del carbonio sequestrato per unità di superficie - Stato attuale (ton Corg/ha): : %f \\n\" % (\n np.sum(arr_present) / (cols * rows * area_pixel) * 10000))\n f.write(\"Valore totale del sequestro di carbonio (€): %f \\n\\n\\n\" % ((np.sum(arr_present)*V*coeff)/ float((future - present))))\n f.write(\"Analisi stato di progetto\\n\\n\")\n f.write(\"Anno progetto: %i \\n\" % (future))\n f.write(\"Sequestro carbonio Stato di progetto (ton Corg): %f \\n\" % (np.sum(arr_future)))\n f.write(\"Valore medio del carbonio sequestrato per unità di superficie - Stato di progetto (ton Corg/ha): %f \\n\" % (\n np.sum(arr_future) / (cols * rows * area_pixel) * 10000))\n f.write(\"Valore totale del sequestro di carbonio (€): %f \\n\\n\\n\" % ((np.sum(arr_future)*V*coeff) / float((future - present))))\n f.write(\"Differenze tra stato di progetto e stato attuale\\n\\n\")\n f.write(\"Periodo di analisi: %i - %i\\n\" % (present, future))\n f.write(\"Differenza di sequestro carbonio (ton Corg): %f \\n\" % (np.sum(arr_diff)))\n f.write(\"Differenza carbonio sequestrato per unità di superficie (ton Corg/ha): %f \\n\" % (\n carbon_sequestration_difference_area * 10000))\n f.write(\"Differenza in termini economici del SE di sequestro di carbonio (stato di progetto – stato attuale) (€):%d \\n\" % (\n np.sum(carbon_sequestration_value)))\n return {self.OUTPUT: carbon_sequestration_value}\n\n \n # ----------------------------------------------------------------------------------- \n # Copyright (c) 2021 Città di Torino.\n # \n # This material is free software: you can redistribute it and/or modify\n # it under the terms of the GNU General Public License as published by\n # the Free Software Foundation, either version 2 of the License, or\n # (at your option) any later version.\n # \n # This program is distributed in the hope that it will be useful,\n # but WITHOUT ANY WARRANTY; without even the implied warranty of\n # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n # GNU General Public License for more details.\n # \n # You should have received a copy of the GNU General Public License\n # along with this program. If not, see http://www.gnu.org/licenses.\n # ----------------------------------------------------------------------------------- ", "title": "" }, { "docid": "ddba8492bd02da00237d8e1464e5a126", "score": "0.5371956", "text": "def _run(self):\n self._solve()\n return self._ret", "title": "" }, { "docid": "11080b93adcf2859b7f8662c9f7e6fff", "score": "0.53654003", "text": "def _compute(y, t, N, b, k):\n S, I, R = y\n #print(t)\n '''The Susceptible Equation'''\n dSdt = -b[int(t)] * S * I / N\n \n '''The Infected Equation = -The Susceptible Equation - The Recovered Equation'''\n dIdt = b[int(t)] * S * I / N - k * I\n \n '''The Recovered Equation'''\n dRdt = k * I\n return dSdt, dIdt, dRdt", "title": "" }, { "docid": "fd122062e4d6256eaf6d4aa2bbf05994", "score": "0.53594244", "text": "def integral(self, time1, time2):", "title": "" }, { "docid": "1fb010d6f5ea79ce4b09974b0bc0d8c9", "score": "0.5352363", "text": "def bulk_calc(self):\n self.calc_csa()\n self.calc_CCC()\n self.calc_impedence()\n self.calc_runVd()", "title": "" }, { "docid": "7ca2393dd1b3cb44fb5ee38d774cc7de", "score": "0.5340337", "text": "def compute_values(self):\n pass", "title": "" }, { "docid": "7ca2393dd1b3cb44fb5ee38d774cc7de", "score": "0.5340337", "text": "def compute_values(self):\n pass", "title": "" }, { "docid": "c8e6b33eeb34f158ca8b2ddbd6257563", "score": "0.5340099", "text": "def run_calculation(self):\n\n if w.ignore_history_false.isChecked():\n\n fp = w.input_file_line_edit.text()\n\n w.df, w.cache = read_from_file(fp, raw_mt=True)\n # TODO: Check if the filter index is still necessary when matching string indices in match_target_mt below.\n filter_index = w.df.query('status == \"mt\"').index\n w.df = w.df.loc[filter_index]\n\n # Create boolean filter with all repetitions except for first occurrences set to True\n filter_index = w.df.query('stype == \"source\"').duplicated('text')\n w.df = w.df[-filter_index]\n\n else:\n w.df = new_translation(w.df, w.cache, w.sample_object)\n\n df_mt = match_target_mt(w.df)\n\n w.cache = pe_density(df_mt, w.cache)\n w.actionSave_as.setEnabled(True)\n w.textOutput.setText(str('Your Post-Edit Density score is {:.3f}\\n'.format(w.cache['ped'])))\n w.statistics()\n\n w.autosave()", "title": "" }, { "docid": "db551929b95c9e61285499b44233dd40", "score": "0.5333397", "text": "def calc(self, x):\n pass", "title": "" }, { "docid": "9ab45ce819ad5f27ffcf041865253bdf", "score": "0.53273827", "text": "def post(self, start_value, end_value):\n start_value, end_value = int(start_value), int(end_value)\n engine_name = get_engine_name()\n if os.path.exists(engine_name):\n self.__clean_hashes()\n\n if not os.path.exists(json_filename):\n create_minion_json()\n engine_db_name = 'sqlite:///' + engine_name\n try:\n p = Process(target=calculate_range, args=(start_value, end_value, engine_db_name, m_id))\n p.start()\n processes.append(p)\n except:\n print('Error: failed to run calculation on minion ' + str(m_id) + '.')\n return {'status': 'error', 'message': 'calculation failed'}\n update_status('busy')\n return {'status': 'busy', 'message': 'calculation started'}", "title": "" }, { "docid": "81f4e7059b0fd4d4a86a03694951504c", "score": "0.5327031", "text": "def compute():\n message1[\"text\"] = \"\" #reset messages\n message2[\"text\"] = \"\"\n message4[\"text\"] = \"\"\n message5[\"text\"] = \"\" \n message6[\"text\"] = \"\" \n message7[\"text\"] = \"\" \n \n smux=str(smux_entry.get()) #return the smux value in the tkinter entry \n try:\n volts_min=float(volt_min_entry.get()) # min voltage \n volts_max=float(volt_max_entry.get()) # max voltage \n R=float(resistance_entry.get()) # protection resistance \n if abs(volts_max)>20 or abs(volts_min)>20:\n texte5=\"abs(volt) <=10\"\n message5[\"text\"] = texte5 \n print(texte5) \n else: \n nb=int(point_number_entry.get()) #measures nb\n if nb<1:\n texte4=\"nb>0\"\n message4[\"text\"] = texte4 \n print(texte4) \n else:\n # delay=float(delay_entry.get()) #not needed\n # print(smux,volts_min,volts_max,nb,delay) #used to debug\n try:\n complete_measure(volts_min,volts_max,nb,smux=smux,R=R,compliance=compliance)\n texte7=\"Measures done\"\n message7[\"text\"] = texte7 \n print(texte7) \n except Exception as ex:\n texte6=\"Error from measurement detected: %s\" % str(ex) #display the error name\n message6[\"text\"] = texte6 \n print(texte6) \n reset() \n except:\n texte2=\"floats\"\n message2[\"text\"] = texte2 \n print(texte2)", "title": "" }, { "docid": "1594a7502a7cae5142f313f0066993f3", "score": "0.53201216", "text": "def func(progress_remaining: float) -> float:\n return initial_value", "title": "" }, { "docid": "c176dcfdf4a68d52eaa145558ca732f5", "score": "0.53149915", "text": "def calc(self, pvalues, lamb):\n\t\traise NotImplementedError", "title": "" }, { "docid": "2c01c7f7208d17f565623b9efa9f2829", "score": "0.5313737", "text": "def _evaluate_function(self, inputs, aliases):\n # TODO how to handle aliases????\n method = self._sub_name\n request = inputs.pop('request', None) # sometimes just the inputs will be used without the request, like in elasticity curves\n result = self._obj.evaluate(method, request, inputs)\n balance = result[0]\n meta = result[1]\n return balance, meta", "title": "" }, { "docid": "0b7bf3ab90c18dae7b8553fba4cb5d3e", "score": "0.53023535", "text": "def calculate(self, mol: Molecule) -> float:", "title": "" }, { "docid": "034d41aa8d8f3385ce7f1536f5f8b7b5", "score": "0.52879363", "text": "def __calc(self):\n timezone = self.timezone # in hours, east is positive\n longitude= self.long # in decimal degrees, east is positive\n latitude = self.lat # in decimal degrees, north is positive\n\n time = self.time # percentage past midnight, i.e. noon is 0.5\n day = self.day # daynumber 1=1/1/1900\n \n # NOAA Spreadsheet\n Jday = day+2415018.5+time-timezone/24. # Julian day\n Jcent = (Jday-2451545)/36525 # Julian century\n\n Manom = 357.52911+Jcent*(35999.05029-0.0001537*Jcent)\n Mlong = 280.46646+Jcent*(36000.76983+Jcent*0.0003032)%360\n Eccent = 0.016708634-Jcent*(0.000042037+0.0001537*Jcent)\n Mobliq = 23+(26+((21.448-Jcent*(46.815+ \\\n Jcent*(0.00059-Jcent*0.001813))))/60)/60\n obliq = Mobliq+0.00256*cos(rad(125.04-1934.136*Jcent))\n vary = tan(rad(obliq/2))*tan(rad(obliq/2))\n Seqcent = sin(rad(Manom))*(1.914602-Jcent*(0.004817+0.000014*Jcent)) \\\n +sin(rad(2*Manom))*(0.019993-0.000101*Jcent)+ \\\n sin(rad(3*Manom))*0.000289\n Struelong= Mlong+Seqcent\n Sapplong = Struelong-0.00569-0.00478*sin(rad(125.04-1934.136*Jcent))\n declination = deg(asin(sin(rad(obliq))*sin(rad(Sapplong))))\n \n eqtime = 4*deg(vary*sin(2*rad(Mlong))-2*Eccent*sin(rad(Manom))+ \\\n 4*Eccent*vary*sin(rad(Manom))*cos(2*rad(Mlong))- \\\n 0.5*vary*vary*sin(4*rad(Mlong))- \\\n 1.25*Eccent*Eccent*sin(2*rad(Manom)))\n\n hourangle= deg(acos(cos(rad(90.833))/(cos(rad(latitude))* \\\n cos(rad(declination)))-tan(rad(latitude))*tan(rad(declination))))\n\n self.solarnoon_t = (720-4*longitude-eqtime+timezone*60)/1440.\n self.sunrise_t = self.solarnoon_t-hourangle*4/1440.\n self.sunset_t = self.solarnoon_t+hourangle*4/1440.", "title": "" }, { "docid": "ee1d7a33c192af97967f2c8a2e3c713b", "score": "0.5286321", "text": "def execute_strategy(self):\n data = self.results.copy().dropna()\n data['position'] = np.sign(data['returns'].rolling(self.mom).mean())\n data['strategy'] = data['position'].shift(1) * data['returns']\n # determine when trades take place\n # trades = data['position'].diff().fillna(0) != 0\n # subtract transaction costs from return where trades take place\n # data['strategy'][trades] -= self.tc\n data['creturns'] = data['returns'].cumsum().apply(np.exp)\n data['cstrategy'] = data['strategy'].cumsum().apply(np.exp)\n self.results = data\n # absolute performance of indicator\n aperf = data['cstrategy'].ix[-1]\n # out/under performance of indicator\n operf = aperf - data['creturns'].ix[-1]\n return round(aperf, 2), round(operf, 2)", "title": "" }, { "docid": "7b38b01232a3f553a7ee77b5f648b7ec", "score": "0.5280257", "text": "def calc(self):\n sorted_pop = self.sortpop()\n initial_route = self.population[sorted_pop[0][0]]\n distance = 1 / sorted_pop[0][1]\n progress = [ distance ]\n if callable(self.animate):\n self.plot = True\n individual = Individual(initial_route)\n stop_animation = Event()\n self.animate(individual, progress, stop_animation, plot_conclusion=initial_route)\n else:\n self.plot = False\n if self.prn:\n print(f\"Initial distance: {distance}\")\n try:\n if self.plot:\n for i in range(self.generations):\n population = self.next_gen()\n sorted_pop = self.sortpop()\n distance = 1 / sorted_pop[0][1]\n progress.append(distance)\n if self.prn:\n print(f\"[Generation:{i}] Current distance: {distance}\")\n route = population[sorted_pop[0][0]]\n individual.update(route)\n else:\n for i in range(self.generations):\n population = self.next_gen()\n distance = 1 / self.sortpop()[0][1]\n if self.prn:\n print(f\"[Generation:{i}] Current distance: {distance}\")\n \n \n except KeyboardInterrupt:\n pass\n try:\n stop_animation.set()\n except NameError:\n pass\n final_route_index = self.sortpop()[0][0]\n final_route = population[final_route_index]\n if self.prn:\n print(\"Final route:\", final_route)\n\n return initial_route, final_route, distance", "title": "" }, { "docid": "7b38b01232a3f553a7ee77b5f648b7ec", "score": "0.5280257", "text": "def calc(self):\n sorted_pop = self.sortpop()\n initial_route = self.population[sorted_pop[0][0]]\n distance = 1 / sorted_pop[0][1]\n progress = [ distance ]\n if callable(self.animate):\n self.plot = True\n individual = Individual(initial_route)\n stop_animation = Event()\n self.animate(individual, progress, stop_animation, plot_conclusion=initial_route)\n else:\n self.plot = False\n if self.prn:\n print(f\"Initial distance: {distance}\")\n try:\n if self.plot:\n for i in range(self.generations):\n population = self.next_gen()\n sorted_pop = self.sortpop()\n distance = 1 / sorted_pop[0][1]\n progress.append(distance)\n if self.prn:\n print(f\"[Generation:{i}] Current distance: {distance}\")\n route = population[sorted_pop[0][0]]\n individual.update(route)\n else:\n for i in range(self.generations):\n population = self.next_gen()\n distance = 1 / self.sortpop()[0][1]\n if self.prn:\n print(f\"[Generation:{i}] Current distance: {distance}\")\n \n \n except KeyboardInterrupt:\n pass\n try:\n stop_animation.set()\n except NameError:\n pass\n final_route_index = self.sortpop()[0][0]\n final_route = population[final_route_index]\n if self.prn:\n print(\"Final route:\", final_route)\n\n return initial_route, final_route, distance", "title": "" }, { "docid": "75cc49ea82b117f6b2d47af646f819d0", "score": "0.527877", "text": "def _evaluate(data, context):\n # print \"Received : \", data, \" Sequence Number : \", context[\"data_sequence_no\"]\n score = 0\n secondary_score = 0\n\n for k in range(100):\n time.sleep(random.randint(0,100)/1000.0)\n percent_complete = k*1.0/100 * 100\n update_progress(context, percent_complete, \"\")\n # print \"Context Response Channel ::: \", context['response_channel']\n # if k%20==0:\n # # print \"Update : \", percent_complete\n score += random.randint(1,100)*1.0/0.7 / 100\n secondary_score += random.randint(1,100)*1.0/0.7 / 100\n\n _result_object = {\n \"score\" : score,\n \"secondary_score\" : secondary_score,\n }\n return _result_object", "title": "" }, { "docid": "e0178d47f659aae4e12fbcd69db2468c", "score": "0.52750695", "text": "def on_pushButton_calc_clicked(self):\r\n # TODO: not implemented yet\r\n if self.str1 == '':\r\n a = 0\r\n else:\r\n a = float(self.str1)\r\n if self.str2 == '':\r\n b = 0\r\n else:\r\n b = float(self.str2)\r\n res = 0\r\n if self.op == '+':\r\n res = a + b\r\n elif self.op == '-':\r\n res = a - b\r\n elif self.op == '*':\r\n res = a * b\r\n elif self.op == '/':\r\n if b == 0:\r\n res = 0\r\n else:\r\n res = a / b\r\n\r\n self.lineEdit_result.setText(str(res))\r\n self.flag = 2\r\n\r\n # raise NotImplementedError\r", "title": "" }, { "docid": "320ea0ff97999dcee761fcc123dd114c", "score": "0.52461845", "text": "def calculate(self):\n self.correct_extinction()\n self.rdistance = calculate_radial_distance(self.position, self.center,\n self.distance)\n self.r23 = calculate_r23(self.fluxes)\n self.calculate_OH()\n self.SFR = calculate_sfr(self.distance, self.fluxes['halpha'])", "title": "" }, { "docid": "23dd86b596bed8c6aac3dcd00301e5c7", "score": "0.5244022", "text": "def compute(self, today, assets, out, opens, closes):\n out[:] = (opens[-1] - closes[0]) / closes[0]", "title": "" }, { "docid": "23dd86b596bed8c6aac3dcd00301e5c7", "score": "0.5244022", "text": "def compute(self, today, assets, out, opens, closes):\n out[:] = (opens[-1] - closes[0]) / closes[0]", "title": "" }, { "docid": "a33836659fccc659301fd2466ce5d572", "score": "0.5242563", "text": "def run(self):\n self.evaluate()\n self.accumulate()\n self.summarize()", "title": "" }, { "docid": "675188c454c830f80a91cf94327c7e3e", "score": "0.52329695", "text": "def process_final(self, *args):\n mean = self._sum / self._count\n mean = mean ** 2\n variance = (self._sum_sq / self._count) - mean\n if variance < 0:\n return 0\n else: \n return variance ** 0.5", "title": "" }, { "docid": "598dc322c12f3f2638ec51cc2bcf894c", "score": "0.52291757", "text": "def func(progress_remaining: float):\n return progress_remaining * initial_value", "title": "" }, { "docid": "14aa6575bdb29a5d01bfab980c63bd12", "score": "0.5228993", "text": "def linear_schedule(progress):\n return 1 - progress", "title": "" }, { "docid": "c27142e62aa568661d8bbdf43d69f8b6", "score": "0.5228111", "text": "def computation(self, results, *parameters):\n\n return self.add_computation(computation(results, *parameters))", "title": "" }, { "docid": "7768d4b0f72db48a4418dd627fdd9c73", "score": "0.52274144", "text": "def _compute(self):\n pass", "title": "" }, { "docid": "6abf202e5afb80f54132416bc47cb3f1", "score": "0.52269894", "text": "def inspect_calculation(self):\n converged_mc = True\n self.ctx.restart_calc = self.ctx.calculation['retrieved']\n if converged_mc:\n self.report(\"Calculation converged, terminating the workflow\")\n self.ctx.done = True", "title": "" }, { "docid": "28e2904de8eda761267f0b480736bd0d", "score": "0.52208334", "text": "def thread_start_calculating(self, distance_value):\n print(\"Breakpoint Distance value:{}\".format(distance_value))\n logger.info('Slot:thread_start_calculating :', \n extra={'sim_index': self._data_store.get_simulation_index()})\n\n if distance_value == 0:\n logger.info('Slot:thread_start_calculating SINGLE STEP NOT IMPLEMENTED:', \n extra={'sim_index': self._data_store.get_simulation_index()})\n #TODO - finish this breakpoint case\n self.simulationComputing = False\n\n elif distance_value == -1:\n logger.info('Slot:thread_start_calculating RUN TO COMPLETION :', \n extra={'sim_index': self._data_store.get_simulation_index()})\n # set the breakpoint to be a very large number to indicate run to completion\n self.breakpointDistance = 9999999\n self.simulationComputing = True\n else:\n # run to the distance value point in the track\n sim_index = self._data_store.get_simulation_index()\n if distance_value > self._data_store.get_distance_at_index(sim_index) :\n logger.info('Slot:thread_start_calculating RUN TO DISTANCE :', \n extra={'sim_index': sim_index})\n # requested breakpoint is further down the track\n self.breakpointDistance = distance_value\n # Start computing and acknowledge to MainWindow by sending a signal back \n self.simulationThreadSignal.emit(\"Calculating...\")\n # \"state\" variable indicating thread should be calculating\n self.simulationComputing = True\n else:\n logger.info('Slot:thread_start_calculating PAST REQUESTED DISTANCE :', \n extra={'sim_index': sim_index})\n # simulation has already past this point in the track, don't proceed\n self.simulationComputing = False", "title": "" }, { "docid": "4213b442ff6408444f7a99b113d82637", "score": "0.52148896", "text": "def _calculate_avg():\n while (True):\n _avg_num(_q_uwb_a, q_to_a, \"to_a\", 8)\n _avg_num(_q_uwb_b, q_to_b, \"to_b\", 8)\n time.sleep(0.015)", "title": "" }, { "docid": "6a06c78d7eb16247bf403e1389083f14", "score": "0.52093", "text": "def integrate(self, t):", "title": "" }, { "docid": "4ee388bb9a884aa4cea1bef1a52b88fa", "score": "0.5208004", "text": "def __call__(self, *args, **kwargs) -> Any:\n return self.calculate(*args, **kwargs)", "title": "" }, { "docid": "a7a19b61964963525e9d4ad7f5b8669f", "score": "0.5207855", "text": "def func(progress_remaining: float) -> float:\n return progress_remaining * initial_value", "title": "" }, { "docid": "a7a19b61964963525e9d4ad7f5b8669f", "score": "0.5207855", "text": "def func(progress_remaining: float) -> float:\n return progress_remaining * initial_value", "title": "" }, { "docid": "54047f7afc5fafe5f39587ee486749fd", "score": "0.5203203", "text": "def __cost_function(self, sense='single'):\n\n self.SnappyHexMeshrun()\n self.Optimisationrun()\n if sense==\"single\":\n t = self.RunUtilities()\n return p\n elif sense==\"multi\":\n t, p = self.RunUtilities(sense=sense)\n return t, p\n else:\n print(\"Invalid input for sense: \", sense)\n print(\"Available options are: 'single' or 'multi'\")\n return None", "title": "" }, { "docid": "30447d2a14ea9b45e16c20607615711e", "score": "0.5196032", "text": "def calculate_derivative(param):", "title": "" }, { "docid": "e1106def928c6e8ab53fb563e3bff385", "score": "0.51902646", "text": "def evaluate(self) -> None:\n\n self._generate_paths()\n self._compute_payoff_fn()\n self._is_evaluated = True", "title": "" }, { "docid": "f1b28047d6734cbcd08da68de38b381b", "score": "0.51892775", "text": "def done(self, result):\n output_file = result['output']\n GLF = result['GLF']\n Qt.QApplication.restoreOverrideCursor()\n self.statusBar().showMessage('GLF = {:0.1f}%'.format(GLF*100))\n # Show a MessageBox.\n QtWidgets.QMessageBox.information(self, 'Ausgabe erzeugt',\n str(output_file),\n QtWidgets.QMessageBox.Ok)\n self.start_button.setEnabled(True)\n\n if self.plot_timeseries:\n try: # Create a line plot of the time series in a new window\n self.plot_window = PlotWindow()\n self.plot_window.update_lineplot(result['df_sum'])\n self.plot_window.show()\n except Exception as e:\n logger.exception(e)\n QtWidgets.QMessageBox.critical(self, 'Fehler', str(e),\n QtWidgets.QMessageBox.Ok)", "title": "" }, { "docid": "daceaadc42197c7d5f2b3753e3192aee", "score": "0.5189256", "text": "def when_computation_done(self, out, param):\r\n tkMessageBox.showinfo('Computation is done', out)\r\n self.set_state('normal')\r\n\r\n if self.plugin_type == \"Chimera\":\r\n self.profile_wd = os.path.realpath(\r\n os.path.normcase(self.main_parameters[1] + \"/chimera/\"))\r\n\r\n tunnels_script = os.path.join(self.profile_wd, 'complex.py')\r\n paths_script = os.path.join(self.wd, 'paths.py')\r\n pores_script = os.path.join(self.wd, 'pores.py')\r\n autopores_script = os.path.join(self.wd, 'autopores.py')\r\n dom = parse(os.path.join(self.xml_wd, 'tunnels.xml'))\r\n\r\n for e in dom.getElementsByTagName('Exception'):\r\n tkMessageBox.showinfo('Exception', e.getAttribute('Text'))\r\n\r\n if self.plugin_type == \"PyMOL\":\r\n\r\n if param == \"pores\":\r\n self.group_objects(tunnels_script, 'Pores', \"MOLE_tunnels\")\r\n else:\r\n self.group_objects(tunnels_script, 'Tunnels', \"MOLE_pores\")\r\n\r\n self.group_objects(paths_script, 'Paths', 'MOLE_Paths')\r\n self.group_objects(pores_script, 'Pores', 'MOLE_Pores')\r\n self.group_objects(autopores_script, 'AutoPores', 'MOLE_AutoPores')\r\n plugin().show(\"cgo\", \"Interior*\")\r\n plugin().set_view(self.original_view)\r\n\r\n else:\r\n plugin.run(tunnels_script)\r\n\r\n self.set_state('normal')", "title": "" }, { "docid": "8bc4859850d902bbbabda8a6f64c2238", "score": "0.51880544", "text": "def render(self, request):\n self._calculatePi(request).addCallback(self._gotResult, request)\n return server.NOT_DONE_YET", "title": "" }, { "docid": "7174f03ecc0b1631771bcd650b043bf5", "score": "0.5187847", "text": "def _postCalculate(self,retList,time):\n\t\tmodList = []\n\t\tfor vals in retList: \n\t\t\tvals[\"percentEffected\"] = vals.apply(lambda x: self.getPercent(x['severity'],x['ToxicLoad']),axis=1)\n\t\t\tvals = self.calculateThresholdPolygon(vals,time)\n\t\t\tmodList.append(vals)\n\n\t\treturn pandas.concat(modList,ignore_index=True)", "title": "" }, { "docid": "7174f03ecc0b1631771bcd650b043bf5", "score": "0.5187847", "text": "def _postCalculate(self,retList,time):\n\t\tmodList = []\n\t\tfor vals in retList: \n\t\t\tvals[\"percentEffected\"] = vals.apply(lambda x: self.getPercent(x['severity'],x['ToxicLoad']),axis=1)\n\t\t\tvals = self.calculateThresholdPolygon(vals,time)\n\t\t\tmodList.append(vals)\n\n\t\treturn pandas.concat(modList,ignore_index=True)", "title": "" }, { "docid": "36b0c8b9bdb177f85aa5d45a556a0e22", "score": "0.5185649", "text": "def Calculate(self):\n pass", "title": "" }, { "docid": "b214cdc4438aebfa3d63f75c8e78c910", "score": "0.5171598", "text": "def compute_pi_main(workers, accuracy, batchsize):\n n_total = 0\n s_total = 0\n tasks_queue = mp.Queue() # each task (or batch) is represented by the batchsize\n done_queue = mp.Queue() # result of each task is the number of successes in that batch\n processes = mp.Pool(workers, initializer=compute_pi_worker, initargs=(tasks_queue, done_queue))\n\n for _ in range(workers):\n tasks_queue.put(batchsize)\n\n while True:\n s = done_queue.get(block=True)\n s_total += s\n n_total += batchsize\n pi_est = (4.0 * s_total) / n_total\n error = abs(math.pi - pi_est)\n\n if error > accuracy:\n tasks_queue.put(batchsize)\n else:\n processes.terminate()\n processes.join()\n break\n\n print(\"Estimate:\", pi_est)\n print(\"Error:\", error)\n print(\"Steps:\", n_total)\n return pi_est, error, n_total", "title": "" }, { "docid": "67da759295be1801c868c36af3dd3052", "score": "0.5168196", "text": "def calculate_float(self, default: int):\n self.total_float = self.latest_finish - self.earliest_finish\n\n if not self.successors:\n self.free_float = default - self.earliest_finish\n else:\n self.free_float = min(s.earliest_start for s in self.successors) - self.earliest_finish", "title": "" }, { "docid": "ce99a8d17f1502a9ede8378469d864af", "score": "0.5166104", "text": "def calculate(s):", "title": "" }, { "docid": "fff35d5e25b8885f6ecc9ff6d4200e5b", "score": "0.5155409", "text": "def handler(event, context):\n try:\n parsed_event = CalculatorEvent(**event)\n except ValidationError as exp:\n raise Exception(json.dumps(f\"[BadRequest] {exp}\"))\n\n if parsed_event.year not in crypto_prices[parsed_event.query]['old_prices']:\n raise Exception(json.dumps(f\"[BadRequest] Data for requested year not available\"))\n\n\n current_value = (parsed_event.amount/crypto_prices[parsed_event.query]['old_prices'][parsed_event.year]) * crypto_prices[parsed_event.query]['current_price']\n \n return round(current_value,2)", "title": "" }, { "docid": "a28c312c92b4217aed7e3c0932a1bfe0", "score": "0.5150363", "text": "def execute_strategy(self):\n data = self.results.copy().dropna()\n data['position'] = np.where(data['SMA1'] > data['SMA2'], 1, 0)\n data['strategy'] = data['position'].shift(1) * data['returns']\n data['creturns'] = data['returns'].cumsum().apply(np.exp)\n data['cstrategy'] = data['strategy'].cumsum().apply(np.exp)\n self.results = data\n # absolute performance of indicator\n aperf = data['cstrategy'].ix[-1]\n # out/under performance of indicator\n operf = aperf - data['creturns'].ix[-1]\n return round(aperf, 2), round(operf, 2)", "title": "" }, { "docid": "da3563d9154880d9234b4a9a1fead992", "score": "0.5143086", "text": "def evaluate_metric(self, assignments):\n sleep_seconds = 10\n print('{0} - Sleeping for {1} seconds to simulate expensive computation...'.format(threading.current_thread(), sleep_seconds))\n time.sleep(sleep_seconds)\n return sigopt.examples.franke_function(assignments['x1'], assignments['x2'])", "title": "" }, { "docid": "0647838977b755e73ac53acb14e7ed14", "score": "0.5141489", "text": "def execute_strategy(self):\n data = self.results.copy().dropna()\n data['sma'] = data['returns'].rolling(self.sma).mean()\n data['distance'] = data['close'] - data['sma']\n # sell signals\n data['position'] = np.where(data['distance'] > self.threshold, -1, np.nan)\n # buy signals\n data['position'] = np.where(data['distance'] < -self.threshold, 1, data['position'])\n # cross of current price and SMA (zero distance)\n data['position'] = np.where(data['distance'] * data['distance'].shift(1) < 0, 0, data['position'])\n data['position'] = data['position'].ffill().fillna(0)\n data['strategy'] = data['position'].shift(1) * data['returns']\n # determine when trades take place\n # trades = data['position'].diff().fillna(0) != 0\n # subtract transaction costs from return where trades take place\n # data['strategy'][trades] -= self.tc\n data['creturns'] = data['returns'].cumsum().apply(np.exp)\n data['cstrategy'] = data['strategy'].cumsum().apply(np.exp)\n self.results = data\n # absolute performance of indicator\n aperf = data['cstrategy'].ix[-1]\n # out/under performance of indicator\n operf = aperf - data['creturns'].ix[-1]\n return round(aperf, 2), round(operf, 2)", "title": "" }, { "docid": "ab977317ec5b5501427a8c5d5683e375", "score": "0.51393765", "text": "def calculate(self):\n super().calculate()\n\n\n tr_key = Constants.get_key(\"TR\")\n trn_key = Constants.get_key(\"TRn\")\n\n dm_plus_key = Constants.get_key(\"DMplus\")\n dm_plus_n_key = Constants.get_key(\"DMplusN\")\n\n dm_minus_key = Constants.get_key(\"DMminus\")\n dm_minus_n_key = Constants.get_key(\"DMminusN\")\n di_plus_n_key = Constants.get_key(\"DIplusN\")\n di_minus_n_key = Constants.get_key(\"DIminusN\")\n di_diff_key = Constants.get_key(\"DIdiff\")\n di_sum_key = Constants.get_key(\"DIsum\")\n dx_key = Constants.get_key(\"DX\")\n\n df_result = []\n\n for ticker in self.tickers:\n\n df_data_atr = self.df[[ticker]].copy()\n atr_ind = ATR(df=df_data_atr, n=14)\n\n df_data = self.df[ticker].copy()\n # the period parameter of ATR function does not matter because period does not influence TR calculation\n df_data[tr_key] = atr_ind.calculate()[ticker][[tr_key]]\n\n df_data[dm_plus_key] = \\\n np.where(\n (df_data[self.high_key] - df_data[self.high_key].shift(1)) >\n (df_data[self.low_key].shift(1) - df_data[self.low_key]),\n df_data[self.high_key] - df_data[self.high_key].shift(1),\n 0)\n\n df_data[dm_plus_key] = \\\n np.where(\n df_data[dm_plus_key] < 0,\n 0,\n df_data[dm_plus_key])\n\n df_data[dm_minus_key] = \\\n np.where((df_data[self.low_key].shift(1) - df_data[self.low_key]) >\n (df_data[self.high_key] - df_data[self.high_key].shift(1)),\n df_data[self.low_key].shift(1) - df_data[self.low_key],\n 0)\n\n df_data[dm_minus_key] = np.where(df_data[dm_minus_key] < 0, 0, df_data[dm_minus_key])\n\n TRn = []\n DMplusN = []\n DMminusN = []\n TR = df_data[tr_key].tolist()\n DMplus = df_data[dm_plus_key].tolist()\n DMminus = df_data[dm_minus_key].tolist()\n\n for i in range(len(df_data)):\n if i < self.n:\n TRn.append(np.NaN)\n DMplusN.append(np.NaN)\n DMminusN.append(np.NaN)\n elif i == self.n:\n TRn.append(df_data[tr_key].rolling(self.n).sum().tolist()[self.n])\n DMplusN.append(df_data[dm_plus_key].rolling(self.n).sum().tolist()[self.n])\n DMminusN.append(df_data[dm_minus_key].rolling(self.n).sum().tolist()[self.n])\n elif i > self.n:\n TRn.append(TRn[i - 1] - (TRn[i - 1] / 14) + TR[i])\n DMplusN.append(DMplusN[i - 1] - (DMplusN[i - 1] / 14) + DMplus[i])\n DMminusN.append(DMminusN[i - 1] - (DMminusN[i - 1] / 14) + DMminus[i])\n\n df_data[trn_key] = np.array(TRn)\n df_data[dm_plus_n_key] = np.array(DMplusN)\n df_data[dm_minus_n_key] = np.array(DMminusN)\n df_data[di_plus_n_key] = 100 * (df_data[dm_plus_n_key] / df_data[trn_key])\n df_data[di_minus_n_key] = 100 * (df_data[dm_minus_n_key] / df_data[trn_key])\n df_data[di_diff_key] = abs(df_data[di_plus_n_key] - df_data[di_minus_n_key])\n df_data[di_sum_key] = df_data[di_plus_n_key] + df_data[di_minus_n_key]\n df_data[dx_key] = 100 * (df_data[di_diff_key] / df_data[di_sum_key])\n ADX = []\n DX = df_data[dx_key].tolist()\n for j in range(len(df_data)):\n if j < 2 * self.n - 1:\n ADX.append(np.NaN)\n elif j == 2 * self.n - 1:\n ADX.append(df_data[dx_key][j - self.n + 1:j + 1].mean())\n elif j > 2 * self.n - 1:\n ADX.append(((self.n - 1) * ADX[j - 1] + DX[j]) / self.n)\n df_data[self.indicator_key] = np.array(ADX)\n\n df_result.append(df_data.loc[:, [self.indicator_key]])\n\n self.df = pd.concat(df_result, axis=1, keys=self.tickers)\n\n return self.df", "title": "" }, { "docid": "eafd971da2d043a48e336831a0a68557", "score": "0.5138379", "text": "def process_queue_fringes(self):\n\n try:\n result = self.queue.get(0)\n\n if result == \"ABORT\":\n self.addlog(\"Mission aborted.\", self.warningcolor2)\n self.abortmission.set(0)\n self.totaltime = 0\n self.removeprogressbar()\n return\n\n # Show result of the task if needed\n self.entry_23.delete(0, END)\n self.entry_23.insert(0, '{0:.2f}'.format(result[0]))\n self.entry_24.delete(0, END)\n self.entry_24.insert(0, '{0:.2f}'.format(result[1]))\n\n self.setoffsets()\n\n self.layertype_list, self.entry_x_list, self.entry_d_list, self.checklayer_list = [], [], [], []\n\n for i in range(1, self.layernumber + 1):\n self.layertype_list.append(getattr(self, \"layertypevar{}\".format(i)).get())\n self.entry_x_list.append(float(getattr(self, \"entry_x_{}\".format(i)).get()))\n self.entry_d_list.append(float(getattr(self, \"entry_d_{}\".format(i)).get()))\n self.checklayer_list.append(int(getattr(self, \"checklayer{}\".format(i)).get()))\n\n fitobject = FIT_FTIR(self.Temp, self.wavenumbers_cut, self.trans_cut, self.entry_d_0.get(),\n self.layertype_list, self.entry_x_list, self.entry_d_list, self.checklayer_list,\n float(self.entry_21.get()), self.angle,\n float(self.entry_23.get()), float(self.entry_24.get()), self.subtype, 2,\n self.listbox, self.progress_var, self.wn_beingcalculated,\n self.FTIRplot, self.absorptionplot, self.canvas, self.blindcal, self.abortmission)\n self.peakvalues_fit = fitobject.returnpeakvalues()\n\n self.addlog('Fitting fringes complete. Total time: {:.1f}s. MSE={}'.format(self.totaltime, result[2]))\n\n self.totaltime = 0\n\n self.try_remove_fitline(self.fitline_trans)\n self.fitline_trans = self.plot_and_show(self.FTIRplot, self.fitline_trans, 0, self.wavenumbers_cut,\n self.peakvalues_fit, 'r', '', 'Transmission (%)', 0, 'upper right')\n\n self.removeprogressbar()\n\n except queue.Empty:\n self.totaltime += 0.1\n self.after(100, self.process_queue_fringes)", "title": "" }, { "docid": "5820bfa6b2e06e705d2fad4ec1847994", "score": "0.5132884", "text": "def compute(\n self,\n blade_cost,\n blade_mass,\n hub_system_cost,\n hub_system_mass,\n nacelle_mass,\n nacelle_cost,\n tower_cost,\n tower_mass,\n blade_number=3,\n offshore=True,\n ):\n\n # Variables\n self.blade_cost = (\n blade_cost # Float(0.0, units='USD', iotype='in', desc='cost for a single wind turbine blade')\n )\n self.blade_mass = blade_mass # Float(0.0, units='kg', iotype='in', desc='mass for a single wind turbine blade')\n self.hub_system_cost = hub_system_cost # Float(0.0, units='USD', iotype='in', desc='hub system cost')\n self.hub_system_mass = hub_system_mass # Float(0.0, units='kg', iotype='in', desc='hub system mass')\n self.nacelle_mass = nacelle_mass # Float(0.0, units='kg', iotype='in', desc='nacelle mass')\n self.nacelle_cost = nacelle_cost # Float(0.0, units='USD', iotype='in', desc='nacelle cost')\n self.tower_cost = tower_cost # Float(0.0, units='USD', iotype='in', desc='cost for a tower')\n self.tower_mass = tower_mass # Float(0.0, units='kg', iotype='in', desc='mass for a turbine tower')\n\n # Parameters (and ignored inputs)\n self.blade_number = blade_number # Int(3, iotype='in', desc = 'number of rotor blades')\n self.offshore = offshore # Bool(False, iotype='in', desc= 'boolean for offshore')\n\n # high level output assignment\n self.rotor_mass = self.blade_mass * self.blade_number + self.hub_system_mass\n self.rotor_cost = self.blade_cost * self.blade_number + self.hub_system_cost\n self.turbine_mass = self.rotor_mass + self.nacelle_mass + self.tower_mass\n self.turbine_cost = self.rotor_cost + self.nacelle_cost + self.tower_cost\n\n if self.offshore:\n self.turbine_cost *= 1.1\n\n # derivatives\n self.d_mass_d_blade_mass = self.blade_number\n self.d_mass_d_hub_mass = 1.0\n self.d_mass_d_nacelle_mass = 1.0\n self.d_mass_d_tower_mass = 1.0\n\n if self.offshore:\n self.d_cost_d_blade_cost = 1.1 * self.blade_number\n self.d_cost_d_hub_cost = 1.1\n self.d_cost_d_nacelle_cost = 1.1\n self.d_cost_d_tower_cost = 1.1\n else:\n self.d_cost_d_blade_cost = self.blade_number\n self.d_cost_d_hub_cost = 1.0\n self.d_cost_d_nacelle_cost = 1.0\n self.d_cost_d_tower_cost = 1.0", "title": "" }, { "docid": "a2818a75b1740c04b7c7c9442d420e04", "score": "0.5128805", "text": "def getval(self):\n \n if self.averagingAttemps > 1:\n result = 0.0\n readings = 0.0\n readingsc = 0.0\n\t readings_array = {}\n for i in range(0, self.averagingAttemps):\n reading = self.getReading()\n if (reading != 0 and reading != 1023):\n if self.averagingMethod == \"max\":\n if reading > readings:\n\t\t\t \"\"\"print(str(reading) + \" higher as \" + str(readings))\"\"\"\n readings = reading\n else:\n readings = readings + reading\n readingsc = readingsc + 1\n\t\t if reading in readings_array:\n\t\t readings_array[reading] = readings_array[reading] + 1\n\t\t else:\n\t\t\treadings_array[reading] = 1\n time.sleep(self.averagingTimeout)\n if readingsc == 0:\n result = self.getReading()\n else:\n\t\tif self.averagingMethod == \"most\":\n\t\t sorted_x = sorted(readings_array.items(), key=operator.itemgetter(1), reverse=True)\n\t\t result = sorted_x[0][0]\n elif self.averagingMethod == \"max\":\n result = readings\n else:\n result = readings / readingsc\n else:\n result = self.getReading()\n \n if result == 0:\n msg = \"Error: Check wiring for the \" + self.sensorname\n msg += \" measurement, no voltage detected on ADC input \"\n msg += str(self.adcpin)\n print(msg)\n return None\n if result == 1023:\n if self.sensorname == \"LDR\":\n # Carrying on with 1023 gives divide by zero error below\n result = 1022\n else:\n msg = \"Error: Check wiring for the \" + self.sensorname\n msg += \" measurement, full voltage detected on ADC input \"\n msg += str(self.adcpin)\n print(msg)\n return None\n\n\tif self.sensorname == \"WindDirection\":\n\t return result\n\n\t\"\"\"print(self.sensorname + \" \" + str(self.sensorvoltage))\"\"\"\n\n vout = float(result)/1023 * self.sensorvoltage\n if self.pulldown != None:\n resout = (self.pulldown * self.sensorvoltage) / vout - self.pulldown\n elif self.pullup != None:\n resout = self.pullup / ((self.sensorvoltage / vout) - 1)\n else:\n resout = vout * 1000\n return resout", "title": "" }, { "docid": "621c70cbb28fa1e4a9c190a799b7b1e6", "score": "0.51253504", "text": "def compute(self): # pylint: disable=E0202\n pass", "title": "" }, { "docid": "621c70cbb28fa1e4a9c190a799b7b1e6", "score": "0.51253504", "text": "def compute(self): # pylint: disable=E0202\n pass", "title": "" }, { "docid": "01998f20124e1512ef226959005fc520", "score": "0.512176", "text": "def calculate_callback(self):\n\n # Create the interpreter\n from asteval import Interpreter\n aeval = Interpreter()\n\n # Grab the calculation from the text box which the user wants to do\n calculation = str(self.calculation_text.text())\n\n lhs = calculation.split('=')[0].strip()\n\n # Use the package asteval to do the calculation, we are going to\n # assume here that the lhs of the equals sign is going to be the output named variable\n\n try:\n if lhs in self.data_components:\n raise KeyError('{} is already in the data components, use a different variable on the left hand side.'.format(lhs))\n\n # Pull in the required data and run the calculation\n for dc in self.data_components:\n if dc in calculation:\n aeval.symtable[dc] = self.data[dc]\n aeval(calculation)\n\n # Pull out the output data and add to the proper drop-downs\n out_data = aeval.symtable[lhs]\n self.data.add_component(out_data, lhs)\n\n # Add the new data to the list of available data for arithemitic operations\n self.data_components.append(lhs)\n\n self.close()\n\n except KeyError as e:\n self.calculation_text.setStyleSheet(\"background-color: rgba(255, 0, 0, 128);\")\n\n # Display the error in the Qt popup\n if aeval.error_msg:\n self.error_label_text.setText('{}'.format(aeval.error_msg))\n else:\n self.error_label_text.setText('{}'.format(e))\n\n self.error_label_text.setStyleSheet(\"color: rgba(255, 0, 0, 128)\")", "title": "" }, { "docid": "9ad61c57a35376b206230052921c1336", "score": "0.512145", "text": "def update(self,current_value):\n\n self.error = self.set_point - current_value\n\n self.P_value = self.Kp * self.error\n if (self.last_value >= current_value):\n change = self.error - self.last_error\n else:\n change = 0.0\n\n if self.error > 0.0:\n self.I_value = self.Integrator * self.Ki\n else:\n self.I_value = (self.Integrator * self.Ki)\n\n\n #self.D_value = self.Kd * ( self.error - self.Derivator)\n self.D_value = self.Kd * change\n self.Derivator = self.error\n\n self.Integrator = self.Integrator + self.error\n\n if self.Integrator > self.Integrator_max:\n self.Integrator = self.Integrator_max\n elif self.Integrator < self.Integrator_min:\n self.Integrator = self.Integrator_min\n\n self.last_error = self.error\n self.last_value = current_value\n\n PID = self.P_value + self.I_value + self.D_value\n\n self._z_data[\"data\"][\"P\"] = self.P_value\n self._z_data[\"data\"][\"I\"] = self.I_value\n self._z_data[\"data\"][\"D\"] = self.D_value\n self._z_data[\"data\"][\"E\"] = self.error\n self._z_data[\"data\"][\"SP\"] = self.set_point\n self._z_data[\"data\"][\"OUT\"] = PID\n\n if self._zmq:\n try:\n self._zmq.send_json(self._z_data, zmq.NOBLOCK)\n except zmq.error.Again:\n pass\n\n return PID", "title": "" }, { "docid": "472ea9d79a812c81d9dbbb43bbe45dc9", "score": "0.50997347", "text": "def _calc_result(self):\n import mcmc\n\n g=zeros(2)\n gcov=zeros((2,2))\n gsens = zeros(2)\n\n if self.eta:\n eta1vals,eta2vals=self.trials[:,2], self.trials[:,3]\n g1vals,g2vals=self._get_gvals_from_eta(eta1vals,eta2vals)\n else:\n g1vals=self.trials[:,2]\n g2vals=self.trials[:,3]\n\n prior = self.gprior(g1vals,g2vals)\n dpri_by_g1 = self.gprior.dbyg1(g1vals,g2vals)\n dpri_by_g2 = self.gprior.dbyg2(g1vals,g2vals)\n\n psum = prior.sum()\n\n g0=None\n gcov0=None\n if self.when_prior=='after':\n # this will be eta not g if using that parametrization\n g0,gcov0 = mcmc.extract_stats(self.trials[:,2:4])\n\n pars, pcov = mcmc.extract_stats(self.trials,weights=prior)\n # we need to multiply each by the prior\n \"\"\"\n g[0] = (g1vals*prior).sum()/psum\n g[1] = (g2vals*prior).sum()/psum\n\n\n gcov[0,0] = (g1diff**2*prior).sum()/psum\n gcov[0,1] = (g1diff*g2diff*prior).sum()/psum\n gcov[1,0] = gcov[0,1]\n gcov[1,1] = (g2diff**2*prior).sum()/psum\n \"\"\"\n\n g[:] = pars[2:4]\n gcov[:,:] = pcov[2:4, 2:4]\n\n # now the sensitivity is \n # sum( (<g>-g) L*dP/dg )\n # ----------------------\n # sum(L*P)\n #\n # the likelihood is already in the points\n\n g1diff = g[0]-g1vals\n g2diff = g[1]-g2vals\n gsens[0] = 1. - (g1diff*dpri_by_g1).sum()/psum\n gsens[1] = 1. - (g2diff*dpri_by_g2).sum()/psum\n else:\n # prior is already in the distribution of\n # points. This is simpler for most things but\n # for sensitivity we need a factor of (1/P)dP/de\n\n pars,pcov = mcmc.extract_stats(self.trials)\n #g, gcov = mcmc.extract_stats(self.trials[:,2:2+2])\n\n g[:] = pars[2:4]\n gcov[:,:] = pcov[2:4, 2:4]\n\n g1diff = g[0]-g1vals\n g2diff = g[1]-g2vals\n\n w,=where(prior > 0)\n if w.size == 0:\n raise ValueError(\"no prior values > 0!\")\n\n gsens[0]= 1.-(g1diff[w]*dpri_by_g1[w]/prior[w]).mean()\n gsens[1]= 1.-(g2diff[w]*dpri_by_g2[w]/prior[w]).mean()\n\n \n arates = self._emcee_sampler.acceptance_fraction\n arate = arates.mean()\n #print 'acceptance rate:',w.size/float(self.trials.size)\n\n # weighted s/n based on the most likely point\n s2n,loglike,chi2per,dof,prob=self._calculate_maxlike_stats()\n if self.eta:\n g0name='eta'\n else:\n g0name='g'\n\n Tmean=pars[4]\n Terr=sqrt(pcov[4,4])\n Ts2n=pars[4]/sqrt(pcov[4,4])\n #print 'T s/n:',Ts2n\n self._result={'model':self.model,\n 'g':g,\n 'gcov':gcov,\n 'gsens':gsens,\n g0name+'0':g0,\n g0name+'cov0':gcov0,\n 'pars':pars,\n 'pcov':pcov,\n 'Tmean':Tmean,\n 'Terr':Terr,\n 'Ts2n':Ts2n,\n 'arate':arate,\n 's2n_w':s2n,\n 'loglike':loglike,\n 'chi2per':chi2per,\n 'dof':dof,\n 'fit_prob':prob}\n #wlog(\"arate:\",self._result['arate'])", "title": "" }, { "docid": "81e1d43e967bd2ad66fe32eeb4f512d8", "score": "0.5096479", "text": "def interpolate(self, double: float) -> float:\n ...", "title": "" }, { "docid": "f9ccb6ccc97f671fb95181dc45fb44f8", "score": "0.5088823", "text": "def postsolve(self, lbfgs_result):\n raise NotImplementedError", "title": "" }, { "docid": "7a7d46d8d9ffadc367fe21aad2f8c9bb", "score": "0.50876844", "text": "def result_relative(self, result1, result2):\n value1 = self.objective.get_mean(result1)\n value2 = self.objective.get_mean(result2)\n if value2 == 0:\n return float('inf') * value1\n return value1 / value2", "title": "" }, { "docid": "0bc1f58636b2ddf43f909825f55e8d1e", "score": "0.50867045", "text": "def calcul_throughput(self, spectrumrangeairmass):\n \"\"\"\n data_mag = spectrumrangeairmass.data_mag.T\n data_mag -= np.log(self.data_calspec)\n spectrumrangeairmass.data_mag = data_mag.T\n\n data_order2 = spectrumrangeairmass.order2.T\n data_order2 /= self.data_calspec\n spectrumrangeairmass.order2 = data_order2.T\n #print(self.data_calspec)\n #print(self.data_calspec.T)\n #print(self.data_calspec.T @ self.data_calspec)\n MULT = np.zeros((len(self.data_calspec),len(self.data_calspec)))\n for i in range(len(self.data_calspec)):\n for j in range(len(self.data_calspec)):\n MULT[i][j], MULT[j][i] = self.data_calspec[i] * self.data_calspec[j], self.data_calspec[i] * self.data_calspec[j]\n\n spectrumrangeairmass.INVCOV *= MULT\n spectrumrangeairmass.cov /= MULT\n \"\"\"\n\n self.slope, self.ord, self.err_slope, self.err_ord = spectrumrangeairmass.bouguer_line()\n disp = np.loadtxt(self.rep_disp_name)\n Data_disp = sp.interpolate.interp1d(disp.T[0], disp.T[1], kind=\"linear\", bounds_error=False,\n fill_value=\"extrapolate\")\n Err_disp = sp.interpolate.interp1d(disp.T[0], disp.T[1]/100, kind=\"linear\", bounds_error=False,\n fill_value=\"extrapolate\")\n tel = np.loadtxt(self.rep_tel_name)\n Data_tel = sp.interpolate.interp1d(tel.T[0], tel.T[1], kind=\"linear\", bounds_error=False,\n fill_value=\"extrapolate\")\n Err_tel = sp.interpolate.interp1d(tel.T[0], tel.T[2], kind=\"linear\", bounds_error=False,\n fill_value=\"extrapolate\")\n \"\"\"\n data_disp_new_lambda = Data_disp(self.new_lambda)\n err_disp_new_lambda = Err_disp(self.new_lambda)\n data_tel_new_lambda = Data_tel(self.new_lambda)\n err_tel_new_lambda = Err_tel(self.new_lambda)\n \"\"\"\n self.data_bouguer = np.exp(self.ord)\n err_bouguer = self.err_ord * self.data_bouguer\n self.data = self.data_bouguer / self.data_calspec\n self.lambdas = self.new_lambda\n self.err = err_bouguer / self.data_calspec\n\n # self.data = filter_detect_lines(self.lambdas, self.data, self.plot_filt, self.save_filter)\n\n Data = sp.interpolate.interp1d(self.lambdas, self.data, kind=\"linear\", bounds_error=False,\n fill_value=\"extrapolate\")\n Data_bouguer = sp.interpolate.interp1d(self.lambdas, self.data_bouguer, kind=\"linear\", bounds_error=False,\n fill_value=\"extrapolate\")\n Err = sp.interpolate.interp1d(self.lambdas, self.err, kind=\"linear\", bounds_error=False,\n fill_value=\"extrapolate\")\n self.lambdas = self.new_lambda\n #self.lambdas = np.arange(self.lambda_min, self.lambda_max, 1)\n\n self.data_tel = Data_tel(self.lambdas)\n self.data_tel_err = Err_tel(self.lambdas)\n self.data_disp = Data_disp(self.lambdas)\n self.data_disp_err = Err_disp(self.lambdas)\n self.data = Data(self.lambdas)\n self.err = Err(self.lambdas)\n self.data_bouguer = Data_bouguer(self.lambdas)\n # self.data = sp.signal.savgol_filter(self.data, 111, 2)\n\n #spectrumrangeairmass.archi_mega_fit()\n if self.order2 and not self.mega_fit:\n self.slope2, self.ord2, self.err_slope2, self.err_ord2, self.A2, self.A2_err = spectrumrangeairmass.bouguer_line_order2()\n self.data_bouguer = np.exp(self.ord2)\n err_bouguer = self.err_ord2 * self.data_bouguer\n self.data_order2 = self.data_bouguer / self.data_calspec\n self.lambdas = self.new_lambda\n self.err_order2 = err_bouguer / self.data_calspec\n # self.data_order2 = filter_detect_lines(self.lambdas, self.data_order2, self.plot_filt, self.save_filter)\n Data = sp.interpolate.interp1d(self.lambdas, self.data_order2, kind=\"linear\", bounds_error=False,\n fill_value=\"extrapolate\")\n Data_bouguer = sp.interpolate.interp1d(self.lambdas, self.data_bouguer, kind=\"linear\", bounds_error=False,\n fill_value=\"extrapolate\")\n Err = sp.interpolate.interp1d(self.lambdas, self.err_order2, kind=\"linear\", bounds_error=False,\n fill_value=\"extrapolate\")\n self.lambdas = self.new_lambda\n self.data_order2 = Data(self.lambdas)\n self.err_order2 = Err(self.lambdas)\n self.data_bouguer = Data_bouguer(self.lambdas)\n # self.data_order2 = sp.signal.savgol_filter(self.data_order2, 111, 2)\n\n if self.mega_fit:\n self.ord2, self.err_order2 = spectrumrangeairmass.megafit_emcee()\n print(self.ord2)\n print(len(self.ord2))\n self.data_order2 = self.ord2 / self.data_calspec\n self.err_order2 = self.err_order2 / self.data_calspec\n self.lambdas = self.new_lambda\n\n # self.data_order2 = filter_detect_lines(self.lambdas, self.data_order2, self.plot_filt, self.save_filter)\n Data = sp.interpolate.interp1d(self.lambdas, self.data_order2, kind=\"linear\", bounds_error=False,\n fill_value=\"extrapolate\")\n\n Data_bouguer = sp.interpolate.interp1d(self.lambdas, self.ord2, kind=\"linear\", bounds_error=False,\n fill_value=\"extrapolate\")\n\n Err = sp.interpolate.interp1d(self.lambdas, self.err_order2, kind=\"linear\", bounds_error=False,\n fill_value=\"extrapolate\")\n\n self.lambdas = self.new_lambda\n #self.lambdas = np.arange(self.lambda_min, self.lambda_max, 1)\n self.data_order2 = Data(self.lambdas)\n self.err_order2 = Err(self.lambdas)\n self.data_bouguer = Data_bouguer(self.lambdas)\n self.data_order2[21:-21] = sp.signal.savgol_filter(self.data_order2[21:-21], 11, 2)", "title": "" }, { "docid": "83954746e4599a297a386e46edd8832a", "score": "0.5083051", "text": "def solve(self, show_progress=True):\n dt = self.dt\n old_dt = self.dt\n\n if self.in_parallel:\n show = False\n else:\n show = show_progress\n bar = FloatPBar(self.t, self.tf, show=show)\n\n # Initial solution\n self.dump_output()\n self.barrier() # everybody waits for this to complete\n\n # initial solution damping time\n tdamp = self.tdamp\n\n # Compute the accelerations once for the predictor corrector\n # integrator to work correctly at the first time step.\n self.acceleration_eval.compute(self.t, dt)\n\n # solution output times\n output_at_times = numpy.array( self.output_at_times )\n\n while self.t < self.tf:\n\n # perform any pre step functions\n for func in self.pre_step_functions:\n func.eval(self)\n\n if self.rank == 0:\n logger.debug(\n \"Iteration=%d, time=%f, timestep=%f\" % \\\n (self.count, self.t, dt)\n )\n # perform the integration and update the time.\n #print 'Solver Iteration', self.count, dt, self.t, tdamp\n self.integrator.step(self.t, dt)\n\n # perform any post step functions\n for func in self.post_step_functions:\n func.eval(self)\n\n # update time and iteration counters if successfully\n # integrated\n self.t += dt\n self.count += 1\n\n # dump output if the iteration number is a multiple of the\n # printing frequency\n if self.count % self.pfreq == 0:\n self.dump_output()\n self.barrier()\n\n # dump output if forced\n if self.force_output:\n self.dump_output()\n self.barrier()\n\n self.force_output = False\n\n # re-set the time-step to the old time step before it\n # was adjusted\n dt = old_dt\n\n # update progress bar\n bar.update(self.t)\n\n # update the time for all arrays\n self.update_particle_time()\n\n # compute the new time step across all processors\n if self.adaptive_timestep:\n\n # locally stable time step\n dt = self.integrator.compute_time_step(\n self.dt, self.cfl)\n\n # damp the initial solution\n if self.t < tdamp:\n dt *= 0.5 * (numpy.sin(numpy.pi*(-0.5+self.t/tdamp)) + 1.0)\n\n # set the globally stable time step\n if self.in_parallel:\n dt = self.pm.update_time_steps(dt)\n\n # adjust dt to land on final time\n if self.t + dt > self.tf:\n dt = self.tf - self.t\n\n # adjust dt to land on specific output times\n if output_at_times.size > 0:\n tdiff = output_at_times - self.t\n condition = (tdiff > 0) & (tdiff < dt)\n\n if numpy.any( condition ):\n output_time = output_at_times[ numpy.where(condition) ]\n\n # save the old time-step and compute the new\n # time-step to fall on the specified output time\n # instant\n old_dt = dt\n dt = float( output_time - self.t )\n\n self.force_output = True\n\n # set the adjusted time-step for the solver\n self.dt = dt\n\n if self.execute_commands is not None:\n if self.count % self.command_interval == 0:\n self.execute_commands(self)\n\n # close the progress bar\n bar.finish()\n\n # final output save\n self.dump_output()", "title": "" }, { "docid": "a0a145553849371012e1201d8c01f8d3", "score": "0.50810856", "text": "def evaluate(solutions, results, rollouts=100): # pipaek : rollout 100 -> 10 , originally 100\n index_min = np.argmin(results)\n best_guess = solutions[index_min]\n restimates = []\n\n for s_id in range(rollouts):\n print('p_queue.put(), s_id=%d' % s_id)\n p_queue.put((s_id, best_guess))\n print('>>>rollout_routine!!')\n rollout_routine() # pipaek : 여기서도 p_queue.put 하자마자 바로 처리..\n\n print(\">>>Evaluating...\")\n for _ in tqdm(range(rollouts)):\n #while r_queue.empty():\n # sleep(.1) # pipaek : multi-process가 아니므로\n if not r_queue.empty(): # pipaek : 20180718 r_queue.get()에서 stuck되어 있는 것을 방지하기 위해 체크!!\n #print('r_queue.get()')\n #restimates.append(r_queue.get()[1])\n r_s_id, r = r_queue.get()\n print('in evaluate r_queue.get() r_s_id=%d, r_queue remain=%d' % (r_s_id, r_queue.qsize()))\n restimates.append(r)\n else:\n print('r_queue.empty() -> break!!')\n break\n\n return best_guess, np.mean(restimates), np.std(restimates)", "title": "" }, { "docid": "5ce81706280fd507142c3a9781b2eaa7", "score": "0.5079482", "text": "def test_calc_final_slope():\n calc_final_slope(gridv1.data)\n assert True", "title": "" } ]
3510ac42e4ef7ae99ff81d34dfc62eaa
Get List of field names when a container name is given. Container names if repeated in the snapshot metadata, will always contain the same internal metadata, so the parent of the container doesn't matter. container_field_name The container of whose List of field names are required List of field names
[ { "docid": "19a19c2a810e3f9fd8aa2036cfc9c0ba", "score": "0.8049855", "text": "def data_field_container_field_name_list_get(self, container_field_name):\n def dfs_helper(dict_to_find_in, container_field_name, ret_list):\n if dict_to_find_in is None:\n return\n if container_field_name in dict_to_find_in:\n for field_name, field in list(dict_to_find_in[container_field_name].container_dict.items()):\n ret_list.append(field.name)\n return\n else:\n for field_name, field in list(dict_to_find_in.items()):\n return dfs_helper(field.container_dict, container_field_name, ret_list)\n ret_list = []\n dfs_helper(self.data_dict, container_field_name, ret_list)\n return ret_list", "title": "" } ]
[ { "docid": "30c975d7087e5406555f5a29ddfc5d2f", "score": "0.61451197", "text": "def _get_list(container, name):\n\n try:\n return container[name]\n except KeyError:\n return []", "title": "" }, { "docid": "f10c4c60092e3146611d5e905294f06d", "score": "0.5811185", "text": "def _field_names_(self):\n ret = []\n\n def _merge(f, rename):\n if not f:\n return\n\n if not rename:\n ret.extend([n for n, _ in f])\n else:\n for n, _ in f:\n new_n = rename.get(n, None)\n ret.append(new_n) if new_n else ret.append(n)\n\n for b in self.__class__.__mro__:\n _merge(\n getattr(b, '__swagger_fields__', None),\n getattr(b, '__swagger_rename__', None)\n )\n\n return ret", "title": "" }, { "docid": "437d5d3800f0ce0776ef979ed27f1d87", "score": "0.5712169", "text": "def fields_container(self):\n return self._fields_container", "title": "" }, { "docid": "437d5d3800f0ce0776ef979ed27f1d87", "score": "0.5712169", "text": "def fields_container(self):\n return self._fields_container", "title": "" }, { "docid": "437d5d3800f0ce0776ef979ed27f1d87", "score": "0.5712169", "text": "def fields_container(self):\n return self._fields_container", "title": "" }, { "docid": "b1441683baa86cccd7332f66af646866", "score": "0.5601177", "text": "def get_container_entries(self, container, prefix):\n try:\n headers = self._headers()\n swift_objects = self.conn.get_container(container,\n prefix=prefix,\n full_listing=True,\n headers=headers)[1]\n except socket.error as err:\n raise exception.SwiftConnectionFailed(reason=err)\n swift_object_names = [swift_obj['name'] for swift_obj in swift_objects]\n return swift_object_names", "title": "" }, { "docid": "4d8c066c5cc911b046911ca8ae57b598", "score": "0.5588793", "text": "def GetContainerFields(request, layer):\n icon = request.get('icon')\n icon = util.GetInstance(model.Resource, icon, layer, required=False)\n if icon and icon.type != 'icon':\n raise util.BadRequest('Invalid (non-icon) resource specified.')\n\n folder = request.get('folder')\n folder = util.GetInstance(model.Folder, folder, layer, required=False)\n folder_index = request.get('folder_index', None)\n if folder_index is not None: folder_index = int(folder_index)\n\n region = request.get('region')\n region = util.GetInstance(model.Region, region, layer, required=False)\n\n return {\n 'icon': icon,\n 'folder': folder,\n 'folder_index': folder_index,\n 'region': region,\n 'name': request.get('name', None),\n 'description': request.get('description', None),\n 'item_type': request.get('item_type', None),\n 'custom_kml': request.get('custom_kml', None)\n }", "title": "" }, { "docid": "9876c3a1dd2807b45e9acca543a94186", "score": "0.551232", "text": "def list_containers(self):\r\n if self._container_names is None:\r\n visited = set([id(self)])\r\n if hasattr(self, '_parent'): # fix for weird unpickling bug\r\n visited.add(id(self.parent))\r\n names = []\r\n for n, v in self.__dict__.items():\r\n if is_instance(v, Container) and id(v) not in visited:\r\n visited.add(id(v))\r\n names.append(n)\r\n self._container_names = names\r\n return self._container_names", "title": "" }, { "docid": "b619bd1f3f17cba660356f07ab0102bd", "score": "0.55013055", "text": "def field_names(self) -> List[str]:\n return [x.name for x in self.fields.all()]", "title": "" }, { "docid": "49731fc3f52b6436791ae50e39483390", "score": "0.5474507", "text": "def get_list_of_fields_for_node(node_name):\n return _value_to_node_fields[node_name]", "title": "" }, { "docid": "1f462a5031e0d1a0da6991690b93c2ef", "score": "0.5405363", "text": "def flatten_container(self, container):\n for names in ARG_MAP.values():\n if names[TransformationTypes.CHRONOS.value]['name'] and \\\n '.' in names[TransformationTypes.CHRONOS.value]['name']:\n chronos_dotted_name = names[TransformationTypes.CHRONOS.value]['name']\n parts = chronos_dotted_name.split('.')\n\n if parts[-2] == 'parameters':\n # Special lookup for docker parameters\n common_type = names[TransformationTypes.CHRONOS.value].get('type')\n result = self._lookup_parameter(container, parts[-1], common_type)\n if result:\n container[chronos_dotted_name] = result\n else:\n result = lookup_nested_dict(container, *parts)\n if result:\n container[chronos_dotted_name] = result\n return container", "title": "" }, { "docid": "8d7120d4d10f2d84545c761e0ae1cb6e", "score": "0.53995526", "text": "def data_field_name_list_get(self, action_name=None):\n ret_field_name_list = []\n if action_name is not None:\n ret_field_name_list=[field_name_ for field_name_, data_ in \n list(self.action_dict[self.action_dict_allname[action_name]].data_dict.items())]\n for data_name_, data_ in list(self.data_dict.items()):\n ret_field_name_list.append(data_name_)\n return ret_field_name_list", "title": "" }, { "docid": "613b688dbcd887d59bea4ed78d7433a4", "score": "0.5380753", "text": "def get_field_names(self):\n\n field_names = set()\n\n # Add field names from each component.\n for component in self.components:\n field_names.update(component.get_field_names())\n\n return list(field_names)", "title": "" }, { "docid": "5390ba244ba96352a1d6c9f1db4879ab", "score": "0.53681356", "text": "def get_field_names(self, format_string):\n return self.name_pattern.findall(format_string)", "title": "" }, { "docid": "d862b976f871cbcee5188ed6f6cb4e95", "score": "0.536005", "text": "def get_container_list(self, source):\n storage_client = None\n container = None\n\n if 'fileGroup' in source:\n # Input data stored in auto-storage\n storage_client = self.resolve_storage_account()\n container = _get_container_name(source['fileGroup'])\n elif 'containerUrl' in source:\n uri = urlsplit(source['containerUrl'])\n if not uri.query:\n raise ValueError('Invalid container url.')\n storage_account_name = uri.netloc.split('.')[0]\n sas_token = uri.query\n storage_client = BlockBlobService(account_name=storage_account_name,\n sas_token=sas_token)\n container = uri.pathname.split('/')[1]\n else:\n raise ValueError('Unknown source.')\n\n return self.list_container_contents(source, container, storage_client)", "title": "" }, { "docid": "a0dfb7fb0ea38f44ad645e2780ee0cca", "score": "0.535109", "text": "def getAllFields(layer):\n fields = layer.fields()\n fieldLists = []\n for field in fields:\n fieldLists.append(field.name())\n return fieldLists", "title": "" }, { "docid": "7d0822d0c9a73ae8026a352e77623b96", "score": "0.5347515", "text": "def get_field_names(self):\n raise NotImplementedError", "title": "" }, { "docid": "cb33f81c7565c3cc3e972a6b30431442", "score": "0.52762234", "text": "def get_field_names(self):\n\n field_names = set(sch_field_id_to_name.values())\n for component in self.components:\n field_names.update(component.get_field_names())\n return list(field_names)", "title": "" }, { "docid": "70dc8549c75e78f1846fe87ad0c2d70a", "score": "0.5217515", "text": "def getFieldNames(self):\n return self.fieldNames", "title": "" }, { "docid": "5ee8470c4b9687c8fe862ec39f66c2a4", "score": "0.5198622", "text": "def get_field_names(\n project_type,\n table_name,\n):\n # sql = \"SELECT * FROM [{}]\".format(table_name)\n sql = \"PRAGMA table_info( [{}] );\".format(table_name)\n\n data = run_query(sql, project_type)\n if data:\n return [x.get(\"name\") for x in data]\n else:\n return []", "title": "" }, { "docid": "b674a72881c343ca3e65f83343c954a6", "score": "0.51946247", "text": "def data_field_name_list_get(self, *unused):\n return [data_name_ for data_name_, data_ in list(self.data_dict.items())]", "title": "" }, { "docid": "b2434dc23b4245c69caa615e0fd07d5d", "score": "0.5171875", "text": "def get_containers_names(client, prefix=DOCK_CONTAINER_NAME_PREFIX_BTC):\n return [container.name for container in client.containers.list(\"all\") if prefix in container.name]", "title": "" }, { "docid": "7b77ae64070e8055affd29d40113c872", "score": "0.5169079", "text": "def _get_list_field_names(self):\n\n all_fields = ()\n if not self.list_fields:\n all_fields = self._get_default_list_fields()\n else:\n all_fields = list(self.list_fields)\n if self._is_list_pk_required():\n self._inject_primary_key(all_fields)\n\n all_fields = self._extract_field_names(all_fields, allow_string=True)\n if self.list_indexed is True:\n index_name = self.list_index_name\n if index_name in (None, ''):\n index_name = config_services.get('api', 'schema', 'index_name')\n\n all_fields = list(all_fields)\n all_fields.insert(0, index_name)\n all_fields = tuple(all_fields)\n\n duplicates = misc_utils.get_duplicates(all_fields)\n if duplicates:\n raise DuplicateListFieldNamesError('There are some duplicate field names '\n 'in \"list_fields\" of [{admin}] class. '\n 'this will make the find result incorrect. '\n 'please remove duplicate fields or set '\n 'unique labels for them: {duplicates}'\n .format(admin=self, duplicates=duplicates))\n\n return all_fields", "title": "" }, { "docid": "c48e94f111a0111011a30f30aff4c88a", "score": "0.5166085", "text": "def _field_names(self):\n names = []\n self._read_token('{')\n if self._is_field_name():\n names.append(self._read_token().value)\n while self._next_token().kind == ',':\n self._read_token(',')\n if self._is_field_name():\n name == self._read_token().value\n if name in names:\n raise Exception(\"Repeated field name: %s\" % name)\n names.append(name)\n else:\n raise Exception(\"field name expected, found %s\" %\n self._next_token().value)\n self._read_token('}')\n return names", "title": "" }, { "docid": "55843e0a9f4a7fe56c2087de43caa3c4", "score": "0.5160062", "text": "def field_names(self):\r\n return self._names", "title": "" }, { "docid": "25d221ff14221b5be396cb2fd14b695f", "score": "0.51470417", "text": "def get_field_names(self):\n\n field_names = set()\n for f in self.fields:\n try:\n field_names.add(unquote(f[\"name\"]))\n except KeyError:\n pass\n field_names.discard(\"\")\n return field_names", "title": "" }, { "docid": "2c14e57dbd84db2ec879f1e1adf15d5f", "score": "0.5139172", "text": "def list(self) -> list:\n labels = []\n for k, v in self._labels.items():\n labels.append(\"{}={}\".format(k, v))\n\n #TODO: return names\n containers = self.client.containers.list(filters={\"label\": labels})\n return list(map(lambda s: s.name, containers))", "title": "" }, { "docid": "b6c2df00f269215d05e90947cd79bb3f", "score": "0.51312125", "text": "def export_field_names(self) -> List[str]:\n return [f.name for f in self.fields.all()]", "title": "" }, { "docid": "b7aae64056ae801052747746a5423abd", "score": "0.5128673", "text": "def fields(self):\r\n try:\r\n self._field_name_cache\r\n except AttributeError:\r\n self._fill_fields_cache()\r\n return self._field_name_cache", "title": "" }, { "docid": "e7d6dc8c87f29a148679a18694ce2684", "score": "0.51274216", "text": "def get_field_names(self):\n\n return {f[\"name\"] for f in self.fields}", "title": "" }, { "docid": "3a797639a31c6fabc6d35cc0ef6e6484", "score": "0.5123486", "text": "def get_versioned_field_names(self, version_number):\n return []", "title": "" }, { "docid": "7b77852ac9831875ec06d5fe55dde409", "score": "0.5094264", "text": "def get_field_names(self):\n field_names = []\n for field in self.data_fields.all():\n field_names.append(field.name)\n return field_names", "title": "" }, { "docid": "aa3d8bbaa16f7920fd4f9fcc1da8b28c", "score": "0.50611734", "text": "def get_column_names_list(record_info_in: sdk.RecordInfo) -> List[str]:\n return [field.name for field in record_info_in]", "title": "" }, { "docid": "704297f96eac44cd01a88ca465d5b805", "score": "0.5060997", "text": "def field_names(self):\n return frozenset(self._children.keys())", "title": "" }, { "docid": "f64563f859bbb1a7450773db6425e8bd", "score": "0.5054078", "text": "def list(self, container_name: str) -> None:\n try:\n res = requests.get(self.url + 'list', json={'container_name': container_name})\n if res.status_code == 200:\n self.logger.info(f\"Listing files of container {container_name}\")\n return json.loads(res.content)\n else:\n return None\n except ConnectionError as ce:\n raise ce", "title": "" }, { "docid": "d3dfb9b4a47eed06de1d9963222288d2", "score": "0.5046736", "text": "def get_field_names(self):\n return self.fields.values()", "title": "" }, { "docid": "f23b83640bca611ba5973965e550f962", "score": "0.50379735", "text": "def get_container_name(self, container_id):\n if not os.path.isdir(self.containersdir):\n return []\n link_list = []\n for fname in os.listdir(self.containersdir):\n container = self.containersdir + \"/\" + fname\n if os.path.islink(container):\n real_container = os.readlink(container)\n if os.path.basename(real_container) == container_id:\n link_list.append(fname)\n return link_list", "title": "" }, { "docid": "4636b4e0dcd5bfaf02de8b0ee0c445e3", "score": "0.5035241", "text": "def fieldsNames_get(self,recordset):\r\n\t\tif recordset.upper().startswith('SELECT'):\r\n\t\t\trs=self.__open_recordset(self.conn, recordset, maxrecords=1)\r\n\t\telse:\r\n\t\t\trs=self.__open_recordset(self.conn, '['+recordset+']', maxrecords=1)\r\n\r\n\t\tif rs.Fields.Count>0:\r\n\t\t\tfields=[rs.Fields.Item(item).Name for item in range(rs.Fields.Count)]\r\n\t\telse:\r\n\t\t\tfields=[]\r\n\t\treturn fields", "title": "" }, { "docid": "50f25ecf9257d7170423361e610a667f", "score": "0.50169486", "text": "def get_fields(self, field_name):\r\n if field_name not in self.fields:\r\n raise OGRException('invalid field name: %s' % field_name)\r\n return [feat.get(field_name) for feat in self]", "title": "" }, { "docid": "7d34c0f7d6e4e777f87562f000824e1f", "score": "0.4993697", "text": "def get_fieldnames_from_cursor(cursor: Cursor) -> List[str]:\n return [i[0] for i in cursor.description]", "title": "" }, { "docid": "4d3d15cdef20f82411536b5fcbf3cacc", "score": "0.49833426", "text": "def fields(self):\r\n return [force_text(capi.get_field_name(capi.get_field_defn(self._ldefn, i)),\r\n self._ds.encoding, strings_only=True)\r\n for i in xrange(self.num_fields)]", "title": "" }, { "docid": "cc2836b21ba3be6d217c40e9a0be9a63", "score": "0.49688077", "text": "def _extract_field_names(self, fields, allow_string=True):\n\n names = []\n for item in fields:\n if self._is_valid_field(item):\n names.append(item.key)\n elif allow_string is True and isinstance(item, str) \\\n and self._is_valid_method(item):\n names.append(item)\n else:\n message = 'Provided field [{field}] is not a valid value. ' \\\n 'it must be a column attribute{sign} ' \\\n 'expression level hybrid property{end}'\n\n if allow_string is True:\n message = message.format(\n field=str(item), sign=',',\n end=' or a string representing a method name of [{admin}] class.')\n else:\n message = message.format(field=str(item), sign=' or',\n end='.')\n\n raise InvalidListFieldError(message.format(admin=self))\n\n return tuple(names)", "title": "" }, { "docid": "f24c7585e363e44d0434d30c329dbd1e", "score": "0.49683803", "text": "def field_list(cls) -> List[str]:\n return list(attr.fields_dict(cls).keys())", "title": "" }, { "docid": "4f6299e85d77d37267f451d0f8648195", "score": "0.495515", "text": "def _to_fields(d: DictAny) -> List[str]:\n cs: List[DictAny] = d[\"meta\"][\"view\"][\"columns\"]\n\n # written this way to get type annotations to work\n def f(x: DictAny) -> str:\n sx: str = x['name']\n return sx\n\n return list(map(f, cs))", "title": "" }, { "docid": "574c75a4ad74382c72ae9e5f91447525", "score": "0.49546686", "text": "def container_image_names(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:\n return pulumi.get(self, \"container_image_names\")", "title": "" }, { "docid": "8a079a0ce17f9ad49ef4125b8aff75ab", "score": "0.49508816", "text": "def stored_field_names(self):\r\n \r\n bn = self._by_name\r\n return [name for name in self._names if bn[name].stored]", "title": "" }, { "docid": "f72b6899c69e686da1f2003d0a10b4cc", "score": "0.49477538", "text": "def key_field_name_list_get(self):\n return list(self.key_dict.keys())", "title": "" }, { "docid": "e9f44857f2954aef14200b01f3bdce23", "score": "0.49375585", "text": "def get_names(collection):\n return [element.name for element in collection]", "title": "" }, { "docid": "f1ea66213ccf40c7c8ec94541b1078fb", "score": "0.49365887", "text": "def fields(self):\n return [self._fields[name] for name in self.fieldNames]", "title": "" }, { "docid": "8638226e6ca75fe8bb4d4eb091341832", "score": "0.49363112", "text": "def field_names(self, record_type=\"\"):\n return [f[\"name\"] for f in self.record_types[record_type][\"fields\"]]", "title": "" }, { "docid": "06107a2cdf60711c2ff205daad75e3a5", "score": "0.49352884", "text": "def fieldnames(self):\n return self._fieldnames", "title": "" }, { "docid": "0fc69c4893af6b3663f692a42e03452b", "score": "0.49313736", "text": "def get_canonical_container_name(container):\n return sorted(container['Names'], key=lambda name: len(name))[0][1:]", "title": "" }, { "docid": "5ca31ef9e8cbb6928657eed4cbea6060", "score": "0.48984218", "text": "def containers(self, module_name):\n\t\tbase, name = split_module_name(module_name)\n\n\t\t# the printer modules does not have the same name scheme\n\t\tif module_name == 'shares/print':\n\t\t\tbase = 'printers'\n\n\t\tself._read_directories()\n\n\t\tif self.directory is None:\n\t\t\treturn []\n\t\treturn map(lambda x: {'id': x, 'label': ldap_dn2path(x)}, self.directory.info.get(base, []))", "title": "" }, { "docid": "bec166b06411e731bed78a486883042f", "score": "0.48848704", "text": "def get_field_list(self):\n return list(self.table_format_dict)", "title": "" }, { "docid": "1918484db22ccaff5f158a9c670452d1", "score": "0.48847437", "text": "def fields(self):\r\n try:\r\n return self._fields[self.tname]\r\n except KeyError:\r\n # introspect the field names from the database\r\n # by relying on the DB API 2.0\r\n # NB: we cannot trust the ordering in the Django model\r\n curs = connections[self.alias].cursor()\r\n curs.execute('select * from %s where 1=0' % self.tname)\r\n names = self._fields[self.tname] = [\r\n r[0] for r in curs.description if r[0] != 'id']\r\n return names", "title": "" }, { "docid": "4e066b142abbdaa5a587ebabadc21915", "score": "0.48470128", "text": "def get_field_names(self):\n key_fields = [ d[0] for d in self.fpkey.fields ]\n data_fields = [ d[0] for d in self.fpdata.fields ]\n return key_fields + data_fields", "title": "" }, { "docid": "c44ff81bc789d7aee7edef6b560353f0", "score": "0.48232073", "text": "def get_field_names(table):\n return [field['name'] for field in get_db().execute(f'SELECT name FROM {FIELDS + get_table_id(table)}').fetchall()]", "title": "" }, { "docid": "899305a9f751ae70d7a33b5600f4746a", "score": "0.47798315", "text": "def list_fields(self):\n _require_metadata(gnats.MINIMAL_METADATA)\n return list(self.ordered_fields)", "title": "" }, { "docid": "da861e81945237457eaed0a3ff7571d6", "score": "0.47780028", "text": "def get_field_names(self):\n return sorted(self.column_name_describer.keys())", "title": "" }, { "docid": "92c4c389c1b8081ba8e278620cc39858", "score": "0.47687584", "text": "def fields(cls) -> List[str]:\n fields = attr.fields(cls)\n field_types = [\n (f.name, innermost_type(f.type)) for f in fields if f.name != \"gid\"\n ]\n field_names = [\n [f\"{name}.{f}\" for f in typ.fields()]\n if issubclass(typ, _HasFields)\n else [name]\n for (name, typ) in field_types\n ]\n return [name for names in field_names for name in names]", "title": "" }, { "docid": "72667bc449108aded84cbcafc14597fe", "score": "0.47650254", "text": "def _list_field_names(self, field_type):\n fields = self._properties.get('indexedField', {})\n return fields.get(field_type)", "title": "" }, { "docid": "b09a9db0023271ff3cce904fceae0e09", "score": "0.47543573", "text": "def fields(self):\n return self.field_list", "title": "" }, { "docid": "f3a8bf83e5d8353fb269facfffe1d9f9", "score": "0.4749665", "text": "def fields(self) -> t.List[str]:\n return []", "title": "" }, { "docid": "06b4eb3cb3fd1ecb3f1e60574510dad1", "score": "0.47387376", "text": "def list_collection_fields(self, environment_id, collection_id, **kwargs):\n\n if environment_id is None:\n raise ValueError('environment_id must be provided')\n if collection_id is None:\n raise ValueError('collection_id must be provided')\n\n headers = {}\n if 'headers' in kwargs:\n headers.update(kwargs.get('headers'))\n sdk_headers = get_sdk_headers('discovery', 'V1',\n 'list_collection_fields')\n headers.update(sdk_headers)\n\n params = {'version': self.version}\n\n url = '/v1/environments/{0}/collections/{1}/fields'.format(\n *self._encode_path_vars(environment_id, collection_id))\n response = self.request(\n method='GET',\n url=url,\n headers=headers,\n params=params,\n accept_json=True)\n return response", "title": "" }, { "docid": "c323dd4c958bbb31f175a4ea32a98404", "score": "0.4738311", "text": "def fields(self):\n\n if hasattr(self, '__parameters__'):\n return self.__parameters__\n else:\n return [f'f{i}' for i in self.keys()]", "title": "" }, { "docid": "3975d8dac6769204bb26cdba3ace7719", "score": "0.47211117", "text": "def get_network_container_hostnames(name):\n for network in get_networks():\n if network[\"Name\"] == name:\n return [get_container_hostname(container) for container in network[\"Containers\"]]", "title": "" }, { "docid": "499acedfa5db0e6d0724a7a7620e9479", "score": "0.47182256", "text": "def get_parameter_names(diagnostic_fields: List[List[str]]) -> List[List[str]]:\n parameter_names = []\n for condition in diagnostic_fields:\n if isinstance(condition, list):\n parameter_names.append(get_parameter_names(condition))\n elif is_variable(condition):\n parameter_names.append(condition)\n return parameter_names", "title": "" }, { "docid": "e40fba1fc77f5cea111df8942e072ae9", "score": "0.47160146", "text": "def _string_helper(self, field_name): # pylint: disable=g-ambiguous-str-annotation\n result = [\"{} {}\".format(str(self.node), str(field_name))]\n for k, v in self._children.items():\n recursive = v._string_helper(k) # pylint: disable=protected-access\n result.extend([\" {}\".format(x) for x in recursive])\n return result", "title": "" }, { "docid": "2d12959f5bd0d6a8af770d946d3c9bcb", "score": "0.47127327", "text": "def _get_dates_from_fields(self):\n\n if not self._fields_container:\n return []\n\n dates = []\n # Widgets are arranged in the reverse order of their addition.\n for field in reversed(self._fields_container.children):\n try:\n date = datetime.datetime.strptime(field.text, \"%d/%m/%Y\").date()\n except ValueError:\n date = None\n dates.append(date)\n\n return dates", "title": "" }, { "docid": "1eca8a0bdc4cc6aa80b7a640dd671a0c", "score": "0.47007495", "text": "def get_field_deserializers(self,) -> Dict[str, Callable[[ParseNode], None]]:\n from .planner_container_type import PlannerContainerType\n\n from .planner_container_type import PlannerContainerType\n\n fields: Dict[str, Callable[[Any], None]] = {\n \"containerId\": lambda n : setattr(self, 'container_id', n.get_str_value()),\n \"@odata.type\": lambda n : setattr(self, 'odata_type', n.get_str_value()),\n \"type\": lambda n : setattr(self, 'type', n.get_enum_value(PlannerContainerType)),\n \"url\": lambda n : setattr(self, 'url', n.get_str_value()),\n }\n return fields", "title": "" }, { "docid": "340c5011c1971fe81abd7d8ab3651ba7", "score": "0.4667802", "text": "def fieldnames(self):\n fieldnames = {} # sets are unordered\n for row in self.rows:\n for field in row:\n fieldnames[field] = True\n return list(fieldnames.keys())", "title": "" }, { "docid": "6ca6b7bfdfdbc0d80d69b191611a1481", "score": "0.46639836", "text": "def tags(self) -> google.protobuf.internal.containers.RepeatedScalarFieldContainer[builtins.str]:", "title": "" }, { "docid": "13a3989ca8336b4ac5811f4c12b9894c", "score": "0.46563387", "text": "def get_containers_list(self, dir_only=True):\n containers_list = []\n if not os.path.isdir(self.containersdir):\n return []\n for fname in os.listdir(self.containersdir):\n container_dir = self.containersdir + '/' + fname\n if os.path.isdir(container_dir):\n try:\n filep = open(container_dir + \"/imagerepo.name\", 'r')\n except (IOError, OSError):\n reponame = \"\"\n else:\n reponame = filep.read()\n filep.close()\n if dir_only:\n containers_list.append(container_dir)\n elif not os.path.islink(container_dir):\n names = self.get_container_name(fname)\n if not names:\n names = \"\"\n containers_list.append((fname, reponame, str(names)))\n return containers_list", "title": "" }, { "docid": "f5f79f4914c26655b3a3e72b6b5129e7", "score": "0.46369", "text": "def get_split_names(self):\n try:\n return [split.name for split in self.get_all()]\n except Exception:\n _LOGGER.error('Error getting split names from storage')\n _LOGGER.debug('Error: ', exc_info=True)\n return None", "title": "" }, { "docid": "da11509e1da3a03ab4800f720bc7a9de", "score": "0.4634459", "text": "def name_get(self):\n result = []\n for document in self:\n if document.version:\n name = '[%s] %s'%(document.version, document.name)\n else: \n name = document.name\n \n result.append((document.id, name))\n \n return result", "title": "" }, { "docid": "49c2e57520127645e0075b6b0029a0b4", "score": "0.46329495", "text": "def _get_field_names(reader: IO) -> Sequence[str]:\n csv_reader = csv.DictReader(reader)\n next(csv_reader)\n reader.seek(0)\n return csv_reader.fieldnames or []", "title": "" }, { "docid": "3660cc3d64c393055d4ac42e3b877e8f", "score": "0.4630384", "text": "def get_fields(ms_name):\n logger.debug(\"Getting antenna names\")\n\n subname = \"::\".join((ms_name, \"FIELD\"))\n field_subtab = list(xm.xds_from_table(subname))\n field_subtab = field_subtab[0]\n field_names = field_subtab.NAME\n\n logger.debug(f\"Fields found: {', '.join(field_names.values)}\")\n return field_names", "title": "" }, { "docid": "7e2bf9854325734e655c6fe8dad3c739", "score": "0.46236715", "text": "def dependencies(self) -> google.protobuf.internal.containers.RepeatedScalarFieldContainer[builtins.str]:", "title": "" }, { "docid": "b8ca2acf5377bdb047a8b663c874b75c", "score": "0.46212012", "text": "def _get_user_field_names(self):\n field_names = []\n internal_fields = (\"left\", \"right\", \"tree_id\", \"level\")\n for field in self._meta.fields:\n if (field.name not in internal_fields) and (not isinstance(field, AutoField)) and (not field.primary_key):\n field_names.append(field.name)\n return field_names", "title": "" }, { "docid": "0ab96239137f7598c22ea2ed46b559b2", "score": "0.46098047", "text": "def list_fields(table):\r\n if not table:\r\n return []\r\n cols = table.__table__.columns if hasattr(table, '__table__') else table.columns\r\n return [f.name for f in cols]", "title": "" }, { "docid": "5cb28afef9eb9d2da7ce3083fc7e4c48", "score": "0.46097603", "text": "def field_names(cls):\n return [field.name for field in cls()]", "title": "" }, { "docid": "0e676c72e977e1d12726b12347010650", "score": "0.46061674", "text": "def _GetToListFields(self, get_fields=None):\n if get_fields:\n list_fields = set()\n for field in get_fields:\n list_fields.add('items/' + field)\n return list_fields", "title": "" }, { "docid": "adf73010cff289c4920cac47f74bb3a2", "score": "0.4604988", "text": "def container_name(self) -> str:\n return typing.cast(\n str,\n self._properties.get(\"containerName\"),\n )", "title": "" }, { "docid": "e1fe3030e55145e0e75c85e0b470c3a5", "score": "0.46005386", "text": "def __init__(self, field_list, field_container, *args, **kwargs):\r\n super(DynamicForm, self).__init__(*args, **kwargs)\r\n\r\n self.field_structure = []\r\n\r\n for field_name in field_list:\r\n field_data = getattr(field_container, field_name)[1]\r\n\r\n field_object = getattr(forms.fields, field_data['type'])(**field_data['kwargs'])\r\n self.fields[field_name] = field_object\r\n\r\n field_data['field_name'] = field_name\r\n self.field_structure.append(field_data)", "title": "" }, { "docid": "dc90e0b264c2830b135d06667b3634bc", "score": "0.45973927", "text": "def fld_lst(type):\n lst = []\n if isinstance(type, dict):\n return list(type.keys())\n elif isinstance(type, list):\n for i in range(1, len(type) + 1):\n lst.append(\"element \" + str(i))\n return lst", "title": "" }, { "docid": "1d47eaac5c428fd666c6b79881ef1b66", "score": "0.45942882", "text": "def get_signature_fields(interview_metadata_dict):\r\n signature_fields = []\r\n for field_dict in (interview_metadata_dict.get('built_in_fields_used',[]) + interview_metadata_dict.get('fields',[])):\r\n field = map_names(field_dict.get('variable',''))\r\n if field.endswith('.signature'):\r\n signature_fields.append(field)\r\n return signature_fields", "title": "" }, { "docid": "6cd0478bf7c2cea9e76455ff40a8f67f", "score": "0.4587273", "text": "def flatten_container_info(self, inner_ip):\n def iter_container(tg):\n for g in tg:\n for d in g['data']['containerStatuses']:\n c = {'name': d['name'],\n 'image': d['image'],\n 'status': d['status'].lower(),\n 'container_id': d['containerID']}\n yield c\n taskgroups = self.get_unit_info(inner_ip, fields='data,namespace').get('data') or []\n containers = sorted([i for i in iter_container(taskgroups)],\n key=lambda x: constants.DockerStatusOrdering.get(\n x['status'], constants.DockerStatusDefaultOrder))\n return containers", "title": "" }, { "docid": "e1dee9b7b5498c9877ec8ba5dbdf9fc9", "score": "0.45822173", "text": "def get_field_name_order(field_option_list):\n\n field_name_list = [None] * len(field_option_list)\n while not all(field_name_list):\n for idx, field_option in enumerate(field_option_list):\n if len(field_option) == 1:\n found_name = field_option.pop()\n field_name_list[idx] = found_name\n for field_set in field_option_list:\n field_set.discard(found_name)\n break\n\n return field_name_list", "title": "" }, { "docid": "335a0e40733d20c568f2211893f67242", "score": "0.4575407", "text": "def get_all_field_names(self):\r\n try:\r\n cache = self._name_map\r\n except AttributeError:\r\n cache = self.init_name_map()\r\n names = sorted(cache.keys())\r\n # Internal-only names end with \"+\" (symmetrical m2m related names being\r\n # the main example). Trim them.\r\n return [val for val in names if not val.endswith('+')]", "title": "" }, { "docid": "e70dd224a19862e060ab3b2ef1410db9", "score": "0.4568496", "text": "def test_schema_sql_field_list(self):\n sql_fields = BQTestSchema.get_sql_field_names()\n self.assertEqual('descr, timestamp, nested', sql_fields)\n\n sql_fields = BQTestSchema.get_sql_field_names(exclude_fields=['timestamp'])\n self.assertEqual('descr, nested', sql_fields)", "title": "" }, { "docid": "212711c27e5a6607162d499c7949a51a", "score": "0.45502537", "text": "def get_fields(self, field_name: str) -> Optional[PropertyValues]:\n for p in self.properties:\n if p.name == field_name:\n return p.values\n return None", "title": "" }, { "docid": "95b9ae9153f14cf90dae683977ade3e8", "score": "0.45465776", "text": "def list_containers():", "title": "" }, { "docid": "6d778c5d87a68079760c520f0f65fa8c", "score": "0.45192477", "text": "def list_names(self):\n return self.child_names", "title": "" }, { "docid": "9ff30b92abf478bba51988c5f5f550f4", "score": "0.45170188", "text": "def get_list_fields(self):\n return self.fields", "title": "" }, { "docid": "7386c2c08d3ec1ca914444f07634040d", "score": "0.45169434", "text": "def get_field_ids_by_name(self, name_regex):\n d = {k: v for k, v in self._get_id_to_name_dict().items() if name_regex.match(v) is not None}\n return d", "title": "" }, { "docid": "417fcc89d4af8adfbb5d16facc1a5543", "score": "0.45124117", "text": "def getContainerName(ctx, containerFlavor):\n return getattr(ctx.cf.ocp.containers, containerFlavor).name", "title": "" }, { "docid": "ce030cf1a66b95ea5ba9759c1b2a34f5", "score": "0.45090052", "text": "def _string_helper(self, field_name: path.Step) -> Sequence[str]:\n result = [\"{} {}\".format(str(self.node), str(field_name))]\n for k, v in self._children.items():\n recursive = v._string_helper(k) # pylint: disable=protected-access\n result.extend([\" {}\".format(x) for x in recursive])\n return result", "title": "" }, { "docid": "0762fbc266f3c0da9136fea60d69961c", "score": "0.44986236", "text": "def _list_containers(self):\n response = self._session.get(\"http+unix://%2Fvar%2Frun%2Fdocker.sock/containers/json?all=1\")\n if not response.ok:\n raise Exception(\"Couldn't get Containers list. Check if your docker daemon alive or not\")\n\n strio = self.stringIO(response.text)\n json_response = self.json.load(strio)\n lst_containers = [str(container[\"Names\"])[3:-2] for container in json_response]\n return lst_containers", "title": "" }, { "docid": "85b097ce3af3f1fcd437792f757f3e58", "score": "0.44954124", "text": "def get_list(self):\n\n if self.mode == 'history':\n container_list = self.dopq.history\n elif self.mode == 'enqueued':\n container_list = self.dopq.container_list\n else:\n raise ValueError('invalid mode of operation: {}'.format(self.mode))\n\n if len(container_list) > self.max_containers:\n container_list = container_list[:self.max_containers]\n\n return container_list", "title": "" }, { "docid": "9a2e65a55f6d8a915330cf30b44d59e3", "score": "0.44941473", "text": "def getFields(context, field_names=None):\n pairs = []\n if context.Schema(): # is archetype\n schema = context.Schema()\n fields = schema.fields()\n if field_names:\n for field_name in field_names:\n val = None\n pairs.append(schema[field_name].getName())\n val = schema[field_name].get(context)\n if isinstance(val, tuple) or isinstance(val, list):\n val = iterToTags(val)\n elif str(val).find('\\n') != -1:\n val = newlinesToTags(val)\n pairs.append(val)\n else:\n for field in fields:\n val = None\n pairs.append(field.getName())\n val = str(field.get(context))\n if val.find('\\n') != -1:\n val = newlinesToTags(val)\n pairs.append(val)\n return pairs", "title": "" } ]
3d86c1af870937aabf2ff3595bf01c6e
Return package metadata information.
[ { "docid": "5c54becc64e16a019dde1941f01c5892", "score": "0.0", "text": "def get_classifiers():\n return [\n 'Private :: Do Not Upload',\n 'Development Status :: 4 - Beta',\n 'Operating System :: POSIX :: Linux',\n 'Programming Language :: Python',\n 'Programming Language :: Python :: 3.7',\n 'Intended Audience :: System Administrators',\n 'License :: OSI Approved :: The MIT License (MIT)',\n ]", "title": "" } ]
[ { "docid": "ba35cfa5e926725340a889cc98973d81", "score": "0.8002718", "text": "def get_package_metadata(self, id=None, name=None, version=None, architecture=None):\n url = self._package_url(id, name, version, architecture)\n return self._get_request(url)", "title": "" }, { "docid": "dfc47df194b466847bdda6c4beb22510", "score": "0.76922953", "text": "def get_metadata(distribution_name='imgclas'):\n\n pkg = pkg_resources.get_distribution(distribution_name)\n meta = {\n 'Name': None,\n 'Version': None,\n 'Summary': None,\n 'Home-page': None,\n 'Author': None,\n 'Author-email': None,\n 'License': None,\n }\n\n for line in pkg.get_metadata_lines(\"PKG-INFO\"):\n for par in meta:\n if line.startswith(par):\n _, value = line.split(\": \", 1)\n meta[par] = value\n\n # Update information with Docker info (provided as 'CONTAINER_*' env variables)\n r = re.compile(\"^CONTAINER_(.*?)$\")\n container_vars = list(filter(r.match, list(os.environ)))\n for var in container_vars:\n meta[var.capitalize()] = os.getenv(var)\n\n return meta", "title": "" }, { "docid": "0ac9aa610a51c65a9debacae3f4fb6f0", "score": "0.76084256", "text": "def getPackageInfo( self ):\n pkgData = {}\n\n pkgInfo = get_distribution('keggimporter').get_metadata('PKG-INFO')\n\n rawData = pkgInfo.split(\"\\n\")\n rawData.pop()\n\n reData = re.compile('^(.*?):\\s(.*)')\n for data in rawData:\n records = reData.search( data ) \n\n key = records.group(1)\n value = records.group(2)\n\n pkgData.update( { key: value } ) \n\n return pkgData", "title": "" }, { "docid": "68cf248a7e87a034c3b46db7dce258b3", "score": "0.7495511", "text": "def package_info(self) -> None:\n self.package_info_cmake()\n self.package_info_env()", "title": "" }, { "docid": "1007f0110f55beb346aa74b6bcd4880a", "score": "0.7289923", "text": "def package_info(self, pref):\n return self.for_package_file(pref, CONANINFO)", "title": "" }, { "docid": "6a7e3aa84b21d2936df412f0ad5b7aac", "score": "0.7280466", "text": "def get_metadata(self):\n pass", "title": "" }, { "docid": "ff36622adedfe06276943c2a74d2017f", "score": "0.72115207", "text": "def sdist_metadata(self):\n if self.is_wheel:\n raise TypeError(\"Requirement is not a source distribution!\")\n return self.pip_requirement.pkg_info()", "title": "" }, { "docid": "c1254a764867fcda711b4061940be3a8", "score": "0.71683055", "text": "def get_metadata(self):\n return self.metadata", "title": "" }, { "docid": "dd9932c4b48b7eba32c0650cbbd0c014", "score": "0.7132883", "text": "def metadata(self):\n return self.metadata_at_version(self.max_version)", "title": "" }, { "docid": "fe57db91318a21e3e84b585ebaeea23f", "score": "0.71302515", "text": "def getMetadata(self):\n return self._metadata", "title": "" }, { "docid": "a0102f653530de87a0e764b126b64a1b", "score": "0.7083335", "text": "def get_metadata(self):\n return self.metadata", "title": "" }, { "docid": "313d484fd58e8569b09855fa18f677d7", "score": "0.7056691", "text": "def metadata(self) -> pulumi.Output[Optional[Mapping[str, Any]]]:\n return pulumi.get(self, \"metadata\")", "title": "" }, { "docid": "0b7711f833b4a1fc8e95e6f563e74c1c", "score": "0.7032857", "text": "def metadata(self) -> Dict[str, Any]:\n pass", "title": "" }, { "docid": "0b7711f833b4a1fc8e95e6f563e74c1c", "score": "0.7032857", "text": "def metadata(self) -> Dict[str, Any]:\n pass", "title": "" }, { "docid": "1eaf5eed246b8d49b257ac5a678b0695", "score": "0.7015979", "text": "def get_metadata(session, name):\n resp = session.get(\"https://pypi.python.org/pypi/{}/json\".format(name))\n if resp.status_code == 200:\n return resp.json()\n return {}", "title": "" }, { "docid": "d938306b829603f77ccce0d944cb1c1c", "score": "0.6989091", "text": "def get_metadata(self):\n return self._metadata", "title": "" }, { "docid": "6bab4d88f7bf555ff963f51d1281f055", "score": "0.6965754", "text": "def get_metadata(self):\n return self.config.metadata", "title": "" }, { "docid": "58434e76703ecfd4c6a3b9449382b19c", "score": "0.6961348", "text": "def get_metadata(self):\n return self.client._perform_json(\"GET\", \"/projects/%s/metadata\" % self.project_key)", "title": "" }, { "docid": "7b3059f61caf561f1c7c9e54676c6e2f", "score": "0.6959908", "text": "def _get_package_metadata(\n self, submitted_metadata, file_name, file_size, hashes, urls, contents\n ):\n\n def _get_filename_from_urls(submitted_metadata, urls):\n file_name = \"\"\n if not urls:\n logging.warning(f\"No URLs provided for: {submitted_metadata}\")\n for url in urls:\n _file_name = os.path.basename(url)\n if not file_name:\n file_name = _file_name\n else:\n if file_name != _file_name:\n logging.warning(\n f\"Received multiple URLs with different file names; will use the first URL (file name '{file_name}'): {submitted_metadata}\"\n )\n return file_name\n\n file_name_from_url = _get_filename_from_urls(submitted_metadata, urls)\n if not file_name:\n file_name = file_name_from_url\n\n now = str(datetime.utcnow())\n metadata = {\n \"type\": \"package\",\n \"package\": {\n \"version\": \"0.1\",\n \"file_name\": file_name,\n \"created_time\": now,\n \"updated_time\": now,\n \"size\": file_size,\n \"hashes\": hashes,\n \"contents\": contents or None,\n },\n \"_upload_status\": \"uploaded\",\n }\n return metadata", "title": "" }, { "docid": "3b265be06c1a51c782d13c4a662992f0", "score": "0.6948777", "text": "def metadata(self) -> pulumi.Output[Optional[Mapping[str, str]]]:\n return pulumi.get(self, \"metadata\")", "title": "" }, { "docid": "3b265be06c1a51c782d13c4a662992f0", "score": "0.6948777", "text": "def metadata(self) -> pulumi.Output[Optional[Mapping[str, str]]]:\n return pulumi.get(self, \"metadata\")", "title": "" }, { "docid": "93bf09352497e0598c30511875afee00", "score": "0.69407064", "text": "def get_package_metadata(\n self,\n package_name: str,\n package_version: str,\n repository_name: Optional[str] = None,\n ) -> PythonPackagePayload:\n if repository_name:\n repo_config = self._config.get_repository_configuration(repository_name)\n else:\n repo_config = self._config.default_repository_configuration\n\n repo = self._repo_factory.get_repository(repo_config)\n\n metadata = repo.show(package_name, package_version)\n\n return PythonPackagePayload(\n repository_base_url=repo_config.base_url,\n package_name=package_name,\n package_version=package_version,\n package_metadata=metadata,\n )", "title": "" }, { "docid": "b06810d8c09ac0aa1fc53ae4bea9e99f", "score": "0.6907203", "text": "def metadata(self) -> Optional[Any]:\n return pulumi.get(self, \"metadata\")", "title": "" }, { "docid": "b06810d8c09ac0aa1fc53ae4bea9e99f", "score": "0.6907203", "text": "def metadata(self) -> Optional[Any]:\n return pulumi.get(self, \"metadata\")", "title": "" }, { "docid": "4f55338024553b3bff3d71189a7c55bb", "score": "0.6883791", "text": "def get_metadata():\n message = {\n 'dasher_version': __version__,\n 'repo': 'https://github.com/lsst-sqre/ltd-dasher'\n }\n return jsonify(message), 200", "title": "" }, { "docid": "8a5ded6e33a74f60e3b56eff81dbdf82", "score": "0.6876241", "text": "def metadata(self):\n pass", "title": "" }, { "docid": "8a5ded6e33a74f60e3b56eff81dbdf82", "score": "0.6876241", "text": "def metadata(self):\n pass", "title": "" }, { "docid": "123ac441393e251d1eb9282dc67a2e23", "score": "0.68461096", "text": "def getPackageInfo(self, pid):\n info = self.core.files.getPackageInfo(pid)\n if not info:\n raise PackageDoesNotExists(pid)\n return info", "title": "" }, { "docid": "fb584ab32b0d36795f375e3605fd35fe", "score": "0.6830067", "text": "def metadata():\n return MetadataConfig", "title": "" }, { "docid": "6eed88201e21210965c066fb1d60dfb2", "score": "0.6826499", "text": "def get_meta_information():\n raise NotImplementedError()", "title": "" }, { "docid": "1b4dabdee30365ee52fd7bf2830f6ef5", "score": "0.6817423", "text": "def global_metadata():\n from ctapipe import __version__ as ctapipe_version\n from ctapipe_io_lst import __version__ as ctapipe_io_lst_version\n from .. import __version__ as lstchain_version\n\n metadata = MetaData()\n metadata.LSTCHAIN_VERSION = lstchain_version\n metadata.CTAPIPE_VERSION = ctapipe_version\n metadata.CTAPIPE_IO_LST_VERSION = ctapipe_io_lst_version\n metadata.CONTACT = \"LST Consortium\"\n\n return metadata", "title": "" }, { "docid": "6916c01dffbfac6104f7b478a6ac5b10", "score": "0.68164563", "text": "def get_metadata(name):", "title": "" }, { "docid": "6916c01dffbfac6104f7b478a6ac5b10", "score": "0.68164563", "text": "def get_metadata(name):", "title": "" }, { "docid": "781fe4aa2c8086a4b8b0685f5fb5801c", "score": "0.6803053", "text": "def metadata(self):\n raise NotImplementedError()", "title": "" }, { "docid": "801021b776b04da66dda1cbc70f648f1", "score": "0.67808557", "text": "def metadata(self):\n return self._metadata", "title": "" }, { "docid": "801021b776b04da66dda1cbc70f648f1", "score": "0.67808557", "text": "def metadata(self):\n return self._metadata", "title": "" }, { "docid": "801021b776b04da66dda1cbc70f648f1", "score": "0.67808557", "text": "def metadata(self):\n return self._metadata", "title": "" }, { "docid": "801021b776b04da66dda1cbc70f648f1", "score": "0.67808557", "text": "def metadata(self):\n return self._metadata", "title": "" }, { "docid": "9d0e426fd621e83779148306521a2154", "score": "0.6772988", "text": "def info(self):\n return self._driver.GetMetadata()", "title": "" }, { "docid": "036fa730fe1c1a9fc4e36edc04999122", "score": "0.6763207", "text": "def metadata_dictionary(self) -> Dict[str, MetadataValue]:\n meta = self.metadata\n data: Dict[str, MetadataValue] = {\n # identify release\n \"name\": self.safe_name,\n \"version\": meta.version,\n # file content\n \"filetype\": self.filetype,\n \"pyversion\": self.python_version,\n # additional meta-data\n \"metadata_version\": meta.metadata_version,\n \"summary\": meta.summary,\n \"home_page\": meta.home_page,\n \"author\": meta.author,\n \"author_email\": meta.author_email,\n \"maintainer\": meta.maintainer,\n \"maintainer_email\": meta.maintainer_email,\n \"license\": meta.license,\n \"description\": meta.description,\n \"keywords\": meta.keywords,\n \"platform\": meta.platforms,\n \"classifiers\": meta.classifiers,\n \"download_url\": meta.download_url,\n \"supported_platform\": meta.supported_platforms,\n \"comment\": self.comment,\n \"sha256_digest\": self.sha2_digest,\n # PEP 314\n \"provides\": meta.provides,\n \"requires\": meta.requires,\n \"obsoletes\": meta.obsoletes,\n # Metadata 1.2\n \"project_urls\": meta.project_urls,\n \"provides_dist\": meta.provides_dist,\n \"obsoletes_dist\": meta.obsoletes_dist,\n \"requires_dist\": meta.requires_dist,\n \"requires_external\": meta.requires_external,\n \"requires_python\": meta.requires_python,\n # Metadata 2.1\n \"provides_extras\": meta.provides_extras,\n \"description_content_type\": meta.description_content_type,\n # Metadata 2.2\n \"dynamic\": meta.dynamic,\n }\n\n if self.gpg_signature is not None:\n data[\"gpg_signature\"] = self.gpg_signature\n\n # FIPS disables MD5 and Blake2, making the digest values None. Some package\n # repositories don't allow null values, so this only sends non-null values.\n # See also: https://github.com/pypa/twine/issues/775\n if self.md5_digest:\n data[\"md5_digest\"] = self.md5_digest\n\n if self.blake2_256_digest:\n data[\"blake2_256_digest\"] = self.blake2_256_digest\n\n return data", "title": "" }, { "docid": "832edcf06b78d2aa52937b3a0b67659f", "score": "0.6761121", "text": "def get_package_metadata(f: typing.BinaryIO,\n workspace_pkg_meta: dict = {},\n ) -> PackageMetadata:\n cargo_toml = tomllib.load(f)\n\n if \"package\" not in cargo_toml and \"workspace\" in cargo_toml:\n raise WorkspaceCargoTomlError(\n \"Specified directory seems to be a workspace root, please run \"\n \"pycargoebuild on one of its members instead: \"\n f\"{' '.join(cargo_toml['workspace']['members'])}\")\n\n pkg_meta = cargo_toml[\"package\"]\n _get_meta_key = functools.partial(get_meta_key,\n pkg_meta=pkg_meta,\n workspace_pkg_meta=workspace_pkg_meta)\n\n pkg_license = _get_meta_key(\"license\")\n if pkg_license is not None:\n pkg_license = cargo_to_spdx(pkg_license)\n\n pkg_version = _get_meta_key(\"version\")\n if pkg_version is None:\n raise ValueError(f\"No version found in {f.name}\")\n\n return PackageMetadata(\n name=pkg_meta[\"name\"],\n version=pkg_version,\n license=pkg_license,\n license_file=_get_meta_key(\"license-file\"),\n description=_get_meta_key(\"description\"),\n homepage=_get_meta_key(\"homepage\"))", "title": "" }, { "docid": "43cce22e0e8fd0700f5089403ce7ec38", "score": "0.6746312", "text": "def get_metadata(package_name, cache_dir=None, max_age=ONE_DAY):\n url = '{base_url}/{package_name}/json'.format(\n base_url=PYPI_SERVER, package_name=package_name)\n if cache_dir:\n metadata = get_cached_metadata(package_name, cache_dir, max_age)\n if metadata is not None:\n if metadata == {}:\n headers = email.message_from_string('\\n\\n')\n raise urllib.error.HTTPError(url, 404, 'Not Found (cached)',\n headers, StringIO())\n return metadata\n try:\n metadata = get_json(url)\n except urllib.error.HTTPError as e:\n if e.code == 404 and cache_dir:\n put_cached_metadata(package_name, cache_dir, {})\n raise\n if cache_dir:\n put_cached_metadata(package_name, cache_dir, metadata)\n return metadata", "title": "" }, { "docid": "2e27056fe7bc95a31b5f9354a91d3f28", "score": "0.67442584", "text": "def rknn_package_info():\n import pkg_resources\n toolkit = pkg_resources.working_set.by_key.get('rknn-toolkit', None)\n toolkit = pkg_resources.working_set.by_key.get('rknn-toolkit2', toolkit)\n if toolkit is None:\n return dict(name=None, version=None)\n else:\n return dict(name=toolkit.project_name, version=toolkit.version)", "title": "" }, { "docid": "d6fdacbbdaef58841093f2e9cc230265", "score": "0.6739671", "text": "def extract_interesting_information(metadata):\n info = metadata['info']\n return dict(version=info['version'],\n supports=extract_py_versions(info['classifiers']),\n sdist_url=extract_sdist_url(metadata))", "title": "" }, { "docid": "35c8411e074b5b92479bf09665f7f714", "score": "0.6736538", "text": "def get_metadata(self):\n api_call = self.api + \"/api/catalog/\" + self.catalog_id\n\n response = get_response(api_call)\n return json.load(response)", "title": "" }, { "docid": "b2df54058c20ded92130188b46ab1d32", "score": "0.6729516", "text": "def get_metadata(self):\n raise NotImplementedError(self.get_metadata)", "title": "" }, { "docid": "12e0cddb41202eac9708968055360b8c", "score": "0.67191714", "text": "def getMetadata(self):\n return self.jonesmeta", "title": "" }, { "docid": "722f168872a75b677f9d83722ddfc9c5", "score": "0.6719154", "text": "def Metadata(self):\n return self.get('Metadata')", "title": "" }, { "docid": "30f9c03c99fda600dde75c1608981b66", "score": "0.6705416", "text": "def metadata(self) -> Optional[str]:\n return pulumi.get(self, \"metadata\")", "title": "" }, { "docid": "72c543ce036eabbc7b3f9792c5adb0a0", "score": "0.6690269", "text": "def getMetaDataDict(self):\n return self._meta_data", "title": "" }, { "docid": "dd768878be679a81d51219a42f09179b", "score": "0.6674757", "text": "def info(self):\n file = apt.apt_pkg.Config.FindFile(\"Dir::Cache::pkgcache\")\n return dict(\n timestamp = os.path.getmtime(file),\n values = [\n dict(name = \"installedsize\", desc = \"installed size\"),\n dict(name = \"packagesize\", desc = \"package size\")\n ])", "title": "" }, { "docid": "59e0d35b76afcb6e4168e5e731d9a749", "score": "0.6673888", "text": "def metadata(self):\n return self.file_source.metadata()", "title": "" }, { "docid": "c1d7e03021f66cb913849eec069dc2dd", "score": "0.6672617", "text": "def get_metadata(config):\n version = 'python-%s' % __version__\n ip_address, hostname = _get_hostname_and_ip(config)\n return SdkMetadata(version, hostname, ip_address)", "title": "" }, { "docid": "0a7e2f3469eaa3b2a0e39f631b778e93", "score": "0.6670984", "text": "def generate_metadata(self):\n if len(self.files) > 0:\n this_netcdf = self.files[0].file_location\n ds, df_summ = self.files[0].process_netcdf(netcdf=this_netcdf)\n self.license = ds.attrs['license']\n self.title = ds.attrs['title']\n self.creator = ds.attrs['creator_name']\n self.creator_email = ds.attrs['creator_email']\n self.institution = ds.attrs['institution']\n self.aknowledgements = ds.attrs['acknowledgement']\n self.feature_type = ds.attrs['featureType']\n self.summary = ds.attrs['summary']\n self.conventions = ds.attrs['Conventions']\n self.naming_authority = ds.attrs['naming_authority']\n if self.date is None:\n from datetime import datetime\n self.date = datetime.fromordinal(\n datetime.toordinal(datetime(\n year=self.year, month=1, day=1)\n ) + self.doy - 1\n )\n self.month = self.date.month\n self.day = self.date.day\n self.parse_files()\n self.save()\n slack.chat.post_message(\n '#mpalatower',\n self.slack(),\n username='Mpala Tower',\n icon_emoji=':package:')\n return self\n else:\n return \"No files found for %d, day of year %d\" \\\n % (self.year, self.doy)", "title": "" }, { "docid": "3836962bcb2d08f66f30d5971b5b5b98", "score": "0.66429526", "text": "def metadata(self):\r\n if self._metadata is None:\r\n from ._metadata import MetadataManager\r\n self._metadata = MetadataManager(gis=self._gis)\r\n return self._metadata", "title": "" }, { "docid": "e07437fa75e2f2fde49582ffd07d79dd", "score": "0.6639232", "text": "def metadata(self) -> Optional[Mapping[str, str]]:\n return pulumi.get(self, \"metadata\")", "title": "" }, { "docid": "e07437fa75e2f2fde49582ffd07d79dd", "score": "0.6639232", "text": "def metadata(self) -> Optional[Mapping[str, str]]:\n return pulumi.get(self, \"metadata\")", "title": "" }, { "docid": "5532678da1f9f0b68eae561323b14148", "score": "0.6638328", "text": "def cpp_info(self):\n return self.cpp.package", "title": "" }, { "docid": "10e8e8ac44bc646a901bda3f07df0fe8", "score": "0.6635488", "text": "def metadata(self):\n return self.properties.get(\"metadata\", None)", "title": "" }, { "docid": "110972e40e3fea17a4d2bbe71077718d", "score": "0.6622566", "text": "def get_metadata(self):\n return {\"file_url\": self.file_name, \"file_date\": self.file_date}", "title": "" }, { "docid": "83c70623b8a045c06ec71d9adc6c883c", "score": "0.6593214", "text": "def metadata(self) -> typing.Optional[OrderedDict]:\n return self._metadata", "title": "" }, { "docid": "d3860fd5b4d8c15e8482c5b4b6ddb3d0", "score": "0.6559086", "text": "def metadata(self) -> \"Metadata\":\n return self._metadata", "title": "" }, { "docid": "f4776f7ac12a82ee05fdc51eff7d0c73", "score": "0.65374756", "text": "def metadata(self):\n if self._metadata is None:\n from ._metadata import MetadataManager\n self._metadata = MetadataManager(gis=self._gis)\n return self._metadata", "title": "" }, { "docid": "a1000d0703702469bdef8f3b250c6913", "score": "0.6504106", "text": "def metadata(self):\n\n json_response = self.my_http_request('GET', '/metadata')\n diction = json.loads(json_response)\n return diction[\"name\"], diction[\"thumbnail\"], diction[\"vector\"], diction[\"video\"], diction[\"size\"]", "title": "" }, { "docid": "44c567e74857cc487f0122e205712e91", "score": "0.64919186", "text": "def metadata(self, name):\n return self.get_member(str(name)).metadata", "title": "" }, { "docid": "1a26f464e9a2d88110da87941ec84150", "score": "0.6489349", "text": "def info(self):\n return (self._title, self._version, self._descr)", "title": "" }, { "docid": "98be9a01b954a98ad4ef20a3c5d45cfc", "score": "0.64881223", "text": "def metadata(self):\n return self.db.metadata", "title": "" }, { "docid": "abe75cd8bae6a9e4cf34f9a16c5012e2", "score": "0.64858735", "text": "def get_package_info(self, pref, remote):\n pref = self._resolve_latest_pref(pref, remote)\n return self._call_remote(remote, \"get_package_info\", pref), pref", "title": "" }, { "docid": "66cd491c59f84623372a3ae377715738", "score": "0.64818406", "text": "def get_metadata_for(name):", "title": "" }, { "docid": "27a90178be0933965bdf6417d2077104", "score": "0.6446554", "text": "def get_metadata(root: str) -> Metadata:\n name = dataset_name(root)\n dataset = load_dataset(name)\n return Metadata(coordinates=dataset.coordinates, labels=dataset.labels)", "title": "" }, { "docid": "b974e3130720f51fd4a3dbafb54ea2d4", "score": "0.64415383", "text": "def metadata(self):\n return {}", "title": "" }, { "docid": "f895baa107fe0e546f9c66feb46f420b", "score": "0.6436223", "text": "def metadata(self) -> Metadata:\n return self._metadata", "title": "" }, { "docid": "f2c709a4f61c7e97e9b2ce9798ce4d92", "score": "0.6422315", "text": "def get_metadata_from_pack(pack_name):\n\n with open(find_pack_path(pack_name)[0], \"r\") as pack_metadata:\n pack_meta_file_content = json.loads(pack_metadata.read())\n\n return pack_meta_file_content", "title": "" }, { "docid": "e709a77b8134766aa38b74043bdf665a", "score": "0.6422189", "text": "def dist_metainfo_dict(dist):\n\n distmetadata = dist.get_metadata(\"PKG-INFO\")\n ret = Parser(policy=default).parsestr(distmetadata)\n return ret", "title": "" }, { "docid": "93f34a7e0c0001967fe8ecc29f3bb44e", "score": "0.64209324", "text": "def get_metadata():\n bd = config_drive()\n if bd:\n return bd.get_metadata()\n return {}", "title": "" }, { "docid": "1f333089fc8b35d099aa62ca67751ad1", "score": "0.6407901", "text": "def metadata(self) -> Optional[ArbitraryMetadataMapping]:\n return self._metadata", "title": "" }, { "docid": "77c29f8b92f2acfa2ef5cd264ae92d55", "score": "0.6402572", "text": "def about_dict(repo_root, package):\n package_init = os.path.join(repo_root, package, \"__init__.py\")\n spec = iutil.spec_from_file_location(\"package\", package_init)\n package = iutil.module_from_spec(spec)\n spec.loader.exec_module(package)\n\n return package.__dict__", "title": "" }, { "docid": "831de987a235ee68af5a2e06d32fa661", "score": "0.638527", "text": "def get_metadata(self):\n return self.variable_metadata", "title": "" }, { "docid": "4ea2558bad54c5de2e4ccfca695fe8d8", "score": "0.6373588", "text": "def metadata(zclient, module):\n url_path = \"settings/modules/\" + module\n return zclient._get(url_path, {})", "title": "" }, { "docid": "f23fe4b1d36ac241574346230763ca8a", "score": "0.6357783", "text": "def package(self):\n\t\treturn self.get('package')", "title": "" }, { "docid": "4c178dca1d0a34176f11fc3658cb8e5e", "score": "0.6349725", "text": "def get_metadata(self):\n raw_metadata = self.redis.get(self.key)\n\n if raw_metadata is None:\n self.logger.debug(\"No metadata found for: {} {}\".format(self.name, self.version))\n return None\n\n metadata = loads(raw_metadata)\n\n if Version.FILENAME not in metadata:\n self.logger.debug(\"Incomplete metadata for: {} {}\".format(self.name, self.version))\n return None\n\n return metadata", "title": "" }, { "docid": "88a26f1bdf7d478cbc3a27b55f7fc19e", "score": "0.6346251", "text": "def metadata(self):\r\n meta = {k: getattr(self, k) for k in self.valid_metadata}\r\n meta[\"filename\"] = self.filename\r\n meta[\"time_points\"] = tuple(self.time_points)\r\n meta.update(self.compression_params)\r\n\r\n # Ordered dictionary by keys is easiest to inspect\r\n return OrderedDict(sorted(meta.items(), key=lambda t: t[0]))", "title": "" }, { "docid": "b2ae1ae01f0267fab58f9391c9a5572f", "score": "0.63295174", "text": "def assert_basic_meta_data(self, package_path):\n _LOGGER.debug(\"Begin asserting metadata for %s\", package_path)\n\n raw_found_metadata = self._pkg_inspector.get_meta_data(package_path)\n _LOGGER.debug(\"Found raw metadata: %s\", str(raw_found_metadata))\n\n try:\n self._pkg_inspector.validate_meta_data(raw_found_metadata)\n except AssertionError as error:\n pytest.fail(\"Package at {} failed metadata validation: {}\".format(package_path, error))\n else:\n expected_metadata = {\n key: self._expected_values[key] for key in ['project_name', 'package_version']\n }\n found_metadata = {\n 'project_name': raw_found_metadata.project_name,\n 'package_version': raw_found_metadata.version,\n }\n testfixtures.compare(expected_metadata, found_metadata)\n\n _LOGGER.debug(\"Finished asserting metadata for %s\", package_path)", "title": "" }, { "docid": "fc7a534f45634c86dcdda48cafe3c5d8", "score": "0.63201094", "text": "def fetch_metadata():\n\tproducts_releases = fetch_products_and_releases()\n\t# print(pretty(products_releases))\n\tproducts_releases_versions, projects_tarballs = fetch_projects_versions_and_tarballs(products_releases)\n\t# print(pretty(products_releases_versions))\n\tprojects_tarballs = fetch_unreferenced_projects(projects_tarballs)\n\tprojects_tarballs = fetch_unreferenced_versions(projects_tarballs)\n\n\tprint(pretty(projects_tarballs))\n\tprint(pretty(products_releases_versions))\n\n\twith open('projects_tarballs.json', 'w') as outfile:\n\t json.dump(projects_tarballs, outfile, sort_keys=True,indent=2)\n\n\twith open('products_releases_versions.json', 'w') as outfile:\n\t json.dump(products_releases_versions, outfile,sort_keys=True,indent=2)", "title": "" }, { "docid": "0accc00d15254fd933a8c397c21bc547", "score": "0.63126266", "text": "def default_metadata(pkg_info):\n if pkg_info.is_dependency:\n return dict(_default_dep_metadata)\n\n if pkg_info.name in _shipped_metadata:\n metadata = dict(_default_pkg_metadata)\n metadata.update(_shipped_metadata.get(pkg_info.name))\n\n if not pkg_info.has_possible_overrides(simple=False):\n metadata[\"version\"] = \"Sublime %s\" % sublime.version()\n\n if pkg_info.name == \"User\":\n metadata[\"version\"] = \"Unversioned\"\n\n if pkg_info.name not in _closed_default_packages:\n metadata[\"url\"] = \"https://github.com/sublimehq/Packages\"\n\n return metadata\n\n return dict(_default_pkg_metadata)", "title": "" }, { "docid": "b9e11f4ceb1002b1dcddfab323ddc71e", "score": "0.63126", "text": "def metadata():\n\treturn {\n\t\t\"name\": \"Data Type\",\n\t\t\"description\": \"Defines a type of plug-in that defines a data type, so that different components are certain they can interact with each other via this data type.\",\n\t\t\"version\": 1,\n\t\t\"dependencies\": {},\n\n\t\t\"type\": { #This is a \"plug-in type\" plug-in.\n\t\t\t\"type_name\": \"data\",\n\t\t\t\"api\": datatype.data,\n\t\t\t\"validate_metadata\": validate_metadata\n\t\t}\n\t}", "title": "" }, { "docid": "dfa3f03046a1025b0aed0a8109d953a2", "score": "0.6308763", "text": "def parse_package_meta(package_path, package, require=True):\n # TODO: Allow passing in full model path and only require one argument\n # instead of path and package name. This lets us avoid passing in an awkward\n # empty string in spacy.load() if user supplies full model path.\n location = package_path / package / 'meta.json'\n if location.is_file():\n return read_json(location)\n elif require:\n raise IOError(\"Could not read meta.json from %s\" % location)\n else:\n return None", "title": "" }, { "docid": "1f3fb9e0a972db552788b8b81692123d", "score": "0.6305958", "text": "def get_metadata(data):\n return _read_spss(data, metadata_only = True)[1]", "title": "" }, { "docid": "1b3e3b4f39add93e562073fc235b1d56", "score": "0.6303926", "text": "def _read_metadata_element(self):\n if self.manifest is not None:\n metadata = self.manifest.xpath('/pkg:package/pkg:metadata', namespaces=ns)\n\n if metadata is not None:\n return metadata[0]", "title": "" }, { "docid": "e4cd6c4d38b31a60b45e5d4378a61fe3", "score": "0.6296391", "text": "def info(repo=False):\n if repo:\n return False\n return (\"pkg_info\", \"-aoQ\")", "title": "" }, { "docid": "7bafe305358beaf2759e470f28f97a32", "score": "0.62835026", "text": "def export_getCompatibleMetadata(self,metaDict):\n return fcDB.dmeta.getCompatibleMetadata(metaDict, self.getRemoteCredentials())", "title": "" }, { "docid": "ada907d6b390f9e7b4abcd8cf9b26dbc", "score": "0.6273457", "text": "def metadata(self) -> Optional['outputs.ServiceTemplateMetadata']:\n return pulumi.get(self, \"metadata\")", "title": "" }, { "docid": "28e2471c6dedce5d119dd1e3efba9cba", "score": "0.62575465", "text": "def get_info(self):\n return self.common.get_info()", "title": "" }, { "docid": "6805d9231865395d1d2830bd4261f43a", "score": "0.6234828", "text": "def metadata(self):\n information = {'competition': self.competition,\n 'wordnet_version': self.info['wn_version'],\n 'answers_downloaded_from': self.info[\n 'answers_downloaded_from'],\n 'date_of_downloading': self.info[\n 'answers_downloaded_at'],\n 'paper': self.info['paper'],\n 'bibtex': self.info['bibtex']}\n\n # df = pd.DataFrame.from_dict({'categories': list(information.keys()),\n # 'values': list(information.values())})\n\n # display(df)\n wordnet_utils.print_dict(information)", "title": "" }, { "docid": "6db141588facaab15029599d2fca2f1d", "score": "0.62202054", "text": "def metadata(self) -> Optional[pulumi.Input[Mapping[str, Any]]]:\n return pulumi.get(self, \"metadata\")", "title": "" }, { "docid": "6db141588facaab15029599d2fca2f1d", "score": "0.62202054", "text": "def metadata(self) -> Optional[pulumi.Input[Mapping[str, Any]]]:\n return pulumi.get(self, \"metadata\")", "title": "" }, { "docid": "50621d403d46ad868502580266c8cb08", "score": "0.6218983", "text": "def GetMetadata(self, rpc, request, response):\n raise NotImplementedError", "title": "" }, { "docid": "50b9ac5cfee9a834121015d353b2ce21", "score": "0.62061596", "text": "def meta(self) -> pulumi.Output[Optional[Mapping[str, str]]]:\n return pulumi.get(self, \"meta\")", "title": "" }, { "docid": "9f3f733cf94e951aecf44bec44a9f109", "score": "0.6199301", "text": "def GetMetadata(self):\n basic_data = {\n 'spark_service': self.SERVICE_NAME,\n 'spark_svc_cloud': self.CLOUD,\n 'spark_cluster_id': self.cluster_id,\n 'spark_cluster_zone': getattr(self, 'zone', None) or 'unknown'\n }\n # TODO grab this information for user_managed clusters.\n if not self.user_managed:\n basic_data.update({'num_workers': str(self.spec.worker_group.vm_count),\n 'worker_machine_type':\n str(self.spec.worker_group.vm_spec.machine_type)})\n return basic_data", "title": "" }, { "docid": "531b6d11b6df849a0e3b204584ffbed0", "score": "0.6188202", "text": "def getInternalMetadata(self, **kwargs):\n return {'omeinfo': self._omeinfo}", "title": "" }, { "docid": "980b08b8218793f4694888b53954bdb7", "score": "0.61798024", "text": "def getInfo():", "title": "" } ]
4370a2db89aa376572fa763677d8f568
Add the hooks to the bot.
[ { "docid": "f6a523a74b100be16e1871ae1e1ba3f3", "score": "0.579434", "text": "def setup(bot: commands.Bot):\n bot.before_invoke(before_invoke)\n bot.after_invoke(after_invoke)", "title": "" } ]
[ { "docid": "42d07c0d20fbaab7a3ce210e4325d89b", "score": "0.7362344", "text": "def hook_events(self):\n for cmd in self.commands:\n self.core.add_command(cmd, self)\n for event in self.hooks:\n self.core.add_callback(event, self)\n for cmd in self.rawhooks:\n self.core.add_raw(cmd, self)", "title": "" }, { "docid": "5450bb7786ed53fe294cc35583a4b091", "score": "0.7053781", "text": "def set_game_hooks(hooks: GameHooks) -> None:\n global _game_hooks\n _game_hooks = hooks", "title": "" }, { "docid": "1d717911c38f804986b4721041fd10b5", "score": "0.69996136", "text": "def register_hooks(self, hooks):\n hooks = [h for h in hooks if h is not None]\n for h in hooks:\n assert isinstance(h, HookBase)\n # To avoid circular reference, hooks and trainer cannot own each other.\n # This normally does not matter, but will cause memory leak if the\n # involved objects contain __del__:\n # See http://engineering.hearsaysocial.com/2013/06/16/circular-references-in-python/\n h.trainer = weakref.proxy(self)\n self._hooks.extend(hooks)", "title": "" }, { "docid": "1d717911c38f804986b4721041fd10b5", "score": "0.69996136", "text": "def register_hooks(self, hooks):\n hooks = [h for h in hooks if h is not None]\n for h in hooks:\n assert isinstance(h, HookBase)\n # To avoid circular reference, hooks and trainer cannot own each other.\n # This normally does not matter, but will cause memory leak if the\n # involved objects contain __del__:\n # See http://engineering.hearsaysocial.com/2013/06/16/circular-references-in-python/\n h.trainer = weakref.proxy(self)\n self._hooks.extend(hooks)", "title": "" }, { "docid": "8a249d756642ea01dd9ad4457bff3712", "score": "0.6859483", "text": "def enable_hooks(self) -> None:\n self.hooks_enabled = True", "title": "" }, { "docid": "11a1126a6ab2f0b85fc66e57b16e028e", "score": "0.6821246", "text": "def putInHooks ():\n\n # Need to modify Leo's Kernel first\n # Overwrite existing Leo methods.\n g.funcToMethod(replaceTargetFileIfDifferent,leoAtFile.atFile)\n g.funcToMethod(massageComment,leoImport.leoImportCommands)\n\n # Add new methods used by this plugin to various classes.\n g.funcToMethod(openForRead,leoAtFile.atFile)\n g.funcToMethod(openForWrite,leoAtFile.atFile)\n g.funcToMethod(gotoLineNumberOpen,leoCommands.Commands)\n g.funcToMethod(applyLineNumberMappingIfAny, leoCommands.Commands)", "title": "" }, { "docid": "88d6493b3885f6278f8212ed6423e014", "score": "0.67838776", "text": "def prepare_hooks(self, hooks):\r\n for event in hooks:\r\n self.register_hook(event, hooks[event])", "title": "" }, { "docid": "88d6493b3885f6278f8212ed6423e014", "score": "0.67838776", "text": "def prepare_hooks(self, hooks):\r\n for event in hooks:\r\n self.register_hook(event, hooks[event])", "title": "" }, { "docid": "cc16bf846e0425e5913d8b44001ce900", "score": "0.6763753", "text": "def configure_hooks_feature(self):\n hooks_folder = getattr(config, \"HOOKS_FOLDER\", \"./hooks\")\n if not os.path.exists(hooks_folder):\n self.logger.info(\"Hooks folder '%s' doesn't exist, creating it\" % hooks_folder)\n os.makedirs(hooks_folder)\n self.hook_files = glob.glob(os.path.join(hooks_folder, \"*.py\"))", "title": "" }, { "docid": "8e754d61588c30b277f496a6f5fe68f2", "score": "0.6674685", "text": "def _init_hooks(self):\n self._hooks = Hooks()\n self._hooks.ready_to_run = self._init_hexrays_hooks\n self._hooks.hook()", "title": "" }, { "docid": "8e754d61588c30b277f496a6f5fe68f2", "score": "0.6674685", "text": "def _init_hooks(self):\n self._hooks = Hooks()\n self._hooks.ready_to_run = self._init_hexrays_hooks\n self._hooks.hook()", "title": "" }, { "docid": "0489a542c08e02331af5086e2981ff82", "score": "0.66713774", "text": "def hook(self):\n\n\t\tself.db.begin()\n\n\t\tret = self.db.get_hook()\n\t\texisting = [dict(_)['name'] for _ in ret]\n\n\t\t# Add hook modules\n\t\tif len(self.args.hook):\n\t\t\tfor h in self.args.hook:\n\t\t\t\th = h.strip()\n\t\t\t\tif h in existing:\n\t\t\t\t\tprint(\"Hook module '%s' already in hook list\" % h)\n\t\t\t\telse:\n\t\t\t\t\tself.db.add_hook(h)\n\t\t\t\t\tself.db.commit()\n\t\t\t\t\tprint(\"Added: %s\" % h)\n\n\t\t# List all hooks\n\t\telse:\n\t\t\tif len(existing):\n\t\t\t\tprint(\"Existing hooks:\")\n\t\t\t\texisting = sorted(existing)\n\n\t\t\t\tfor h in existing:\n\t\t\t\t\tprint(\"\\t%s\" % h)\n\n\t\t\telse:\n\t\t\t\tprint(\"No existing hooks\")", "title": "" }, { "docid": "b303d72b01b79b91713af6bf520ec161", "score": "0.66632324", "text": "def go_hook_all():\n global hooks\n priority = ''\n version = weechat.info_get('version_number', '') or 0\n # use high priority for hook to prevent conflict with other plugins/scripts\n # (WeeChat >= 0.3.4 only)\n if int(version) >= 0x00030400:\n priority = '2000|'\n for hook, value in HOOK_COMMAND_RUN.items():\n if hook not in hooks:\n hooks[hook] = weechat.hook_command_run(\n '%s%s' % (priority, value[0]),\n value[1], '')\n if 'modifier' not in hooks:\n hooks['modifier'] = weechat.hook_modifier(\n 'input_text_display_with_cursor', 'go_input_modifier', '')", "title": "" }, { "docid": "b14f7b51831e081ee793e34d442312a4", "score": "0.6640559", "text": "def register_hooks(self, hooks: List[Hook]):\n if not hasattr(self, '_hooks'):\n self._hooks: List[Hook] = []\n for hook in hooks:\n hook.register()\n self._hooks.append(hook)", "title": "" }, { "docid": "c29c6b427d21004ac609bfb2d25391cc", "score": "0.6605009", "text": "def hook_all():\n global hook_command_run, hooks\n priority = \"\"\n version = weechat.info_get(\"version_number\", \"\") or 0\n # use high priority for hook to prevent conflict with other plugins/scripts\n # (WeeChat >= 0.3.4 only)\n if int(version) >= 0x00030400:\n priority = \"2000|\"\n for hook, value in hook_command_run.items():\n if hook not in hooks:\n hooks[hook] = weechat.hook_command_run(\"%s%s\" % (priority, value[0]),\n value[1], \"\")\n if \"modifier\" not in hooks:\n hooks[\"modifier\"] = weechat.hook_modifier(\n \"input_text_display_with_cursor\", \"input_modifier\", \"\")", "title": "" }, { "docid": "28d97868f2cac67cd0e8cc6006ca1d2c", "score": "0.65615565", "text": "def add_hook(self, hook):\n if hook not in [type(me) for me in self.hooks]:\n self.__hooks += [hook()]", "title": "" }, { "docid": "0cb8b3979e2bf906d4dc8356ad14ebb7", "score": "0.6503424", "text": "def _setup_hooks(self): # pragma no cover\n pass", "title": "" }, { "docid": "fa0b1cadc33681909a5166075eac3202", "score": "0.6501873", "text": "def addhook(self, name, uname, function):\n self.server.addhook(name, \"%s:%s\" % (self.index, uname), function)\n self.hooks.append((name, \"%s:%s\" % (self.index, uname)))", "title": "" }, { "docid": "c5c7e9824dc521cf94e916cb68430537", "score": "0.64930224", "text": "def hooks_initializer(initializer):\n # Register default build hooks\n initializer.register_hook(hooks.add_versions_chooser)\n initializer.register_hook(hooks.add_warning)", "title": "" }, { "docid": "4344c7329eb0f2cf2e62da3a45bb6d00", "score": "0.6403244", "text": "def _register_required_hooks(self):\n\n packaging_services.register_hook(PackagingHook())", "title": "" }, { "docid": "1f736d2001be5931c63ce218fa4d8bb9", "score": "0.6390347", "text": "def register_hooks(self):\n\n self.on_pre_GET += on_list\n self.on_delete_item += on_delete\n self.on_insert += init_document_with_acl\n self.on_update += on_update", "title": "" }, { "docid": "fb89b8f5d17bf4cb987b08cf0ec8efdc", "score": "0.63517725", "text": "def pytest_addhooks(pluginmanager) -> None:\n from . import newhooks\n\n pluginmanager.add_hookspecs(newhooks)", "title": "" }, { "docid": "08edef69582cf0ea9d879b20c97d29c6", "score": "0.63485664", "text": "def init_hooks():\n raise NotImplementedError()", "title": "" }, { "docid": "988c259581ed00239a5e4b0bb9dfd80f", "score": "0.6284471", "text": "def run_hooks(self, config):\n for hook in self.setup_hooks:\n hook(config)", "title": "" }, { "docid": "ddfac105d4a5917c847228d02b558d0c", "score": "0.6270114", "text": "def register_hook(self, hook):\n self._hooks.append(hook)", "title": "" }, { "docid": "63d9dee33b36016b6d95e7ff7079755f", "score": "0.6270062", "text": "def pytest_addhooks(pluginmanager):\n from seekret.apitest.pytest_plugin import newhooks\n pluginmanager.add_hookspecs(newhooks)", "title": "" }, { "docid": "8b98798e5110a7ae6e8ac8286d29d7c3", "score": "0.6247726", "text": "def add_events(self):\n self.bot.event(self.on_ready)", "title": "" }, { "docid": "fed26d9ec1952a689b322a0a269bff07", "score": "0.6242092", "text": "def _addAEHooks():\n # Realflow uses the AEshapeHooks global variable as a convention for sharing AEshapeTemplate overrides,\n # so we will too, unless a more popular convention is found.\n pm.melGlobals.initVar('string[]', 'AEshapeHooks')\n hooks = list(pm.melGlobals['AEshapeHooks'])\n import mtoa.ui.ae.templates\n procName = utils.pyToMelProc(mtoa.ui.ae.templates.loadArnoldTemplate, [('string', 'nodeName')], useName=True)\n hooks.append(procName)\n pm.melGlobals['AEshapeHooks'] = hooks", "title": "" }, { "docid": "afbcf0e45ef1bcc1330e7d298d50d166", "score": "0.62211084", "text": "def add_hook(self, command, method):\n self.hooks[command] = method", "title": "" }, { "docid": "b24452a621b7bae3da58941d871f16db", "score": "0.6214256", "text": "def install_hooks(self, pipe):\n assert len(self.hooks) == 0\n text_encoder_targets = self._get_target_modules(pipe.text_encoder, \"lora_te\", [\"CLIPAttention\", \"CLIPMLP\"])\n unet_targets = self._get_target_modules(pipe.unet, \"lora_unet\", [\"Transformer2DModel\", \"Attention\"])\n for name, target_module in text_encoder_targets + unet_targets:\n hook = LoRAHook()\n hook.install(target_module)\n self.hooks[name] = hook\n # print(name)\n\n self.device = pipe.device\n self.dtype = pipe.unet.dtype", "title": "" }, { "docid": "f5be4d905626ef49be5a636ab0ea8ee5", "score": "0.6170191", "text": "def game_hooks() -> GameHooks:\n return _game_hooks", "title": "" }, { "docid": "1484d616169295d164f73b32366494ad", "score": "0.6154442", "text": "def run(self, hooks):\n if settings['disable_hooks']:\n return\n\n queue = []\n for value in hooks.values():\n value = seedlib.apply_template(value, self.values)\n queue.append(value)\n \n for command in queue:\n print ('info: running hook \"%s\"' % command)\n seedlib.run_pipe(command)", "title": "" }, { "docid": "482eb32d67492d3848f3b2001ca25120", "score": "0.61518055", "text": "def setup(bot):\n bot.add_cog(Events(bot))", "title": "" }, { "docid": "e40ea95a597c036314c0ac0b5a4bb00d", "score": "0.6124972", "text": "def startHook(self):\n # Empty ", "title": "" }, { "docid": "e5b43f0224a2a54a6a30b1fdb6d5b8f1", "score": "0.6113805", "text": "def set_bottle_hooks(self, app, shutdown, rollback):\n if hasattr(app, 'hook'):\n app.hook('after_request')(shutdown)", "title": "" }, { "docid": "925a2a709e21bed37d6e591bc6029891", "score": "0.611031", "text": "def install_hooks(config):\n config.on_create.extend([copy_environ, hide_cookie])\n config.on_create.insert(0, update_report)", "title": "" }, { "docid": "f6c60de55b36b553993bc477dfb188a7", "score": "0.607913", "text": "def add_hook(**_kwargs):\n hook = import_hook.create_hook(\n hook_name=__name__,\n parse_source=parse_source,\n )\n return hook", "title": "" }, { "docid": "b44b466b97d7a6660a00d78c539037c5", "score": "0.6075598", "text": "def add_hook(self, hook):\n hook_addr = hook.identifier\n \n if self.hooks.has_key(hook_addr):\n self.hooks[hook_addr].append(hook)\n logging.debug(\"Adding another hook for addr 0x%08x (name: %s)\" % \\\n (hook_addr,\n hook.name))\n else:\n self.hooks[hook_addr] = [hook]", "title": "" }, { "docid": "a8255d4c578bc301629d6436a109124a", "score": "0.60621554", "text": "async def initialize_code_hook_functions(self):\n for ability in await self.data_svc.locate('abilities'):\n for executor in ability.executors:\n if executor.code and executor.language:\n executor.HOOKS[executor.language] = self.generate_ability_execution_method", "title": "" }, { "docid": "8b80ce9f5a3f87e9bb9c1642bdcfb4a3", "score": "0.60540694", "text": "def setup(bot):\n bot.add_cog(EventLoops(bot))", "title": "" }, { "docid": "0c45dce46acba5ba5710436ac8ac26b6", "score": "0.6000275", "text": "def setup(bot):\n bot.add_cog(Logs(bot))", "title": "" }, { "docid": "36d5b30cf031b019fc2c3b8fed1a6dee", "score": "0.5998982", "text": "def setup(bot: Bot) -> None:\n bot.add_cog(Tags(bot))", "title": "" }, { "docid": "1944791d46eca2a6bbb32d2297613235", "score": "0.59902436", "text": "def assign_standard_hooks(obj, get, conf):\n hook_names = ['step_hook', 'post_process_hook',\n 'post_process_hook_final', 'pre_process_hook']\n for hook_name in hook_names:\n setattr(obj, hook_name, conf.get_function(get(hook_name, None)))", "title": "" }, { "docid": "c773fd5a7366f71b8bc48cf16391dd3a", "score": "0.59853834", "text": "def _register_hook(self, hook_name, func):\r\n self.hooks.setdefault(hook_name, []).append(func)", "title": "" }, { "docid": "a5a3262430aa75a2dc4230898591f488", "score": "0.59784955", "text": "def add_hook(self, level, location, hook, position=-1):\n\n if location not in self.levels[level].hooks:\n self.levels[level].hooks[location] = []\n\n if position == -1:\n position = len(self.levels[level].hooks[location])\n\n self.levels[level].hooks[location].insert(position, hook)", "title": "" }, { "docid": "cc99595a1169c0fa58e110dd5336a50f", "score": "0.59615684", "text": "def add_hook(**_kwargs):\n hook = import_hook.create_hook(\n hook_name=__name__,\n source_init=source_init,\n transform_source=transform_source,\n )\n return hook", "title": "" }, { "docid": "cc99595a1169c0fa58e110dd5336a50f", "score": "0.59615684", "text": "def add_hook(**_kwargs):\n hook = import_hook.create_hook(\n hook_name=__name__,\n source_init=source_init,\n transform_source=transform_source,\n )\n return hook", "title": "" }, { "docid": "42af3093cb3fc5de769600b2d9bcde98", "score": "0.5953135", "text": "def run_hooks(hooks, data):\n for hook_key, hook_class in hooks.items():\n hook = hook_class(data)\n log.info('Running {} hook'.format(hook_key))\n hook.run()", "title": "" }, { "docid": "b0a8b923b37eba8f732792eb53cb6b71", "score": "0.5936176", "text": "async def run_init_hooks(self) -> None:\n self.logger.info(\"Running init hooks\")\n for hook in self.init_hooks:\n self.logger.info(f\"Running init hook for plugin {hook.plugin}\")\n await hook.func() if inspect.iscoroutinefunction(hook.func) else hook.func()", "title": "" }, { "docid": "d65c171195ea46e096e3c1c55ae1c291", "score": "0.5925443", "text": "def on(self, hook):\n\n def decorator(handler):\n self.registry[hook].append(handler)\n\n return handler\n\n return decorator", "title": "" }, { "docid": "792e20dbb3796793cf5123be2589cedc", "score": "0.59254354", "text": "def register_hook(self, func):\r\n\r\n if func not in self.hooks:\r\n self.hooks.append(func)", "title": "" }, { "docid": "992e660201db15b027d78bbf73909ecd", "score": "0.5917185", "text": "def _before_application_run(self):\n\n for hook in self._get_hooks():\n hook.before_application_run()", "title": "" }, { "docid": "4098a25602a66288543c0be8a1720c0a", "score": "0.59140986", "text": "def setup(bot):\n bot.add_cog(Dictionary())", "title": "" }, { "docid": "128feab0e9e690fd164055f3dc6224ce", "score": "0.5912825", "text": "def _after_application_loaded(self):\n\n for hook in self._get_hooks():\n hook.after_application_loaded()", "title": "" }, { "docid": "7a3b2b4a6f0402b41ae4eff4cf4fc845", "score": "0.5884704", "text": "def on_start(self):\n self.add_bot(ScoutManager(bot_player=self))\n self.add_bot(BuildManager(bot_player=self))\n self.add_bot(GatherManager(bot_player=self))\n self.add_bot(DefenseManager(bot_player=self))\n self.init_request_board()", "title": "" }, { "docid": "2a582bbb4bf0a67821652519bd94f5ec", "score": "0.588175", "text": "def add_hook(cls, hook_type, hook_func):\r\n if hook_type not in cls._hooks_map:\r\n cls._hooks_map[hook_type] = []\r\n\r\n cls._hooks_map[hook_type].append(hook_func)", "title": "" }, { "docid": "200dd6ea332f6486cfb958496dd7eafe", "score": "0.5878965", "text": "async def setup(bot) -> None:\n await bot.add_cog(HotReload(bot))", "title": "" }, { "docid": "61cbfc3f4a2d622fbd0030e22de81d12", "score": "0.58777094", "text": "def setup(bot: Bot):\n bot.add_cog(EventoryCog(bot))\n log.info(\"loaded Eventory extension!\")", "title": "" }, { "docid": "269b00d45f4e69768306379771ce086d", "score": "0.5848767", "text": "def setup(bot: Bot) -> None:\n bot.add_cog(Utilities(bot))", "title": "" }, { "docid": "69a5ab4cf1d8372d05b8687ae7d4fdc2", "score": "0.58468145", "text": "def setup(bot):\n bot.add_cog(Tickets(bot))", "title": "" }, { "docid": "bae9f0da4d2e05b9bdea26aa011ab59a", "score": "0.58353835", "text": "def setup(bot: Bot) -> None:\n bot.add_cog(LemonStuff(bot))\n log.info(\"Cog loaded: LemonStuff\")", "title": "" }, { "docid": "9ca07bcea77b5998babc1f8f475dda35", "score": "0.5832757", "text": "def hooks(self):\n return self.__hooks", "title": "" }, { "docid": "b6d22ce1884a7faa025d158e62aa578e", "score": "0.5821258", "text": "def hook_dispatch(self) -> None:\n if sys.argv[2].strip().lower() == \"add\":\n self.add_hook(\n hook_state = sys.argv[3],\n cmd = \" \".join(sys.argv[4:]),\n )\n\n elif sys.argv[2].strip().lower() == \"del\":\n self.del_hook(\n hook_state = sys.argv[3],\n )", "title": "" }, { "docid": "e666c8c599a3a0a56b13e57e58b45c50", "score": "0.58076733", "text": "def _add_trachook(self, req, hookfile, current):\n base_url = self.config.get('trac', 'base_url')\n # try to append hook\n try:\n fp = open(hookfile, 'a')\n fp.writelines(os.linesep)\n if 'REPOS=\"$1\"' not in current:\n fp.writelines('REPOS=\"$1\"' + os.linesep)\n if 'REV=\"$2\"' not in current:\n fp.writelines('REV=\"$2\"' + os.linesep)\n if 'LOG=' not in current:\n fp.writelines('LOG=`/usr/bin/svnlook log -r $REV $REPOS`' \\\n + os.linesep)\n if 'AUTHOR=' not in current:\n fp.writelines('AUTHOR=`/usr/bin/svnlook author -r $REV ' + \\\n '$REPOS`' + os.linesep)\n if 'TRAC_ENV=' not in current:\n fp.writelines('TRAC_ENV=\"'+self.env.path+'\"' + os.linesep)\n if 'TRAC_URL=' not in current:\n fp.writelines('TRAC_URL=\"'+base_url+'\"' + os.linesep)\n fp.writelines('/usr/bin/python /usr/share/trac/contrib/' + \\\n 'trac-post-commit-hook \\\\' + os.linesep)\n fp.writelines('-p \"$TRAC_ENV\" \\\\' + os.linesep)\n fp.writelines('-r \"$REV\" \\\\' + os.linesep)\n fp.writelines('-u \"$AUTHOR\" \\\\' + os.linesep)\n fp.writelines('-m \"$LOG\" \\\\' + os.linesep)\n fp.writelines('-s \"$TRAC_URL\"' + os.linesep)\n fp.writelines(os.linesep)\n fp.close()\n except Exception:\n raise TracError(\"Can't write repository hook %s\" % hookfile)", "title": "" }, { "docid": "b1a651ece69c23f871ae87944408d516", "score": "0.5799101", "text": "def run_hooks(cls, hook_type, *args, **kwargs):\r\n hook_funcs = cls._hooks_map.get(hook_type) or []\r\n for hook_func in hook_funcs:\r\n hook_func(*args, **kwargs)", "title": "" }, { "docid": "4fcf31c68e866cf847c0a5373217c2b3", "score": "0.5785056", "text": "def register_default_hooks(self, *hooks: Hook, as_group: bool = True):\n self.hooks.extend(hooks)\n\n if as_group and self.group is not None:\n for g in get_group_by_name(self.group):\n if g is not self:\n g.register_hooks(*hooks, as_group=False)", "title": "" }, { "docid": "396772b6a60c6762461d747ec1f4b8dc", "score": "0.5783806", "text": "async def setup(bot) -> None:\n await bot.add_cog(Gambling(bot))", "title": "" }, { "docid": "f50c868844e933e05acf6e0224d81642", "score": "0.57818186", "text": "def install_custom_hook(self, hook_name):\n self._install_custom_hook(hook_name)", "title": "" }, { "docid": "e677a9f8d0ec6ba94293c47993d275df", "score": "0.5769153", "text": "def add_hook(self,\n hook_state: str,\n cmd: str,\n ) -> None:\n state = hook_state.strip().lower()\n\n if state not in self.valid_hook_states:\n print(f\"Invalid hook state: {hook_state}. Valid states are: {self.valid_hook_states}\")\n return\n\n # At this point we simply overwrite any existing command\n # and store it to the data file\n data = self.load()\n data[\"hooks\"][state] = cmd\n self.save(data)\n\n # If cmd is None this function as been called from within del_hook()\n if cmd is None:\n print(f\"Deleted hook command for state {state}\")\n else:\n print(f\"Added hook command for state {state}: {cmd}\")", "title": "" }, { "docid": "b8c18e6f43da52ca2d550b82ddba2845", "score": "0.5764374", "text": "def install_api_hooks(dbg, dll_api_hooks):\n # _pt_log(\"remain system dlls to install api hook: %d\" % (len(dll_api_hooks)))\n\n new_sys_dll = dbg.system_dlls[-1]\n dll_name = new_sys_dll.name.lower()\n\n for (dll_name, api_ctrl_list) in dll_api_hooks.items():\n if dbg.check_has_system_dll(dll_name):\n for api_ctrl in api_ctrl_list:\n dbg_set_api_bp(dbg, api_ctrl)\n del dll_api_hooks[dll_name]", "title": "" }, { "docid": "e63584e75c449d989e5334871a61d146", "score": "0.574739", "text": "def setup(bot):\n bot.add_cog(Messages(bot))", "title": "" }, { "docid": "ba6688fa7d65bc9590cd3596cc6eead6", "score": "0.5747233", "text": "def InstallHook(self):\r\n pass", "title": "" }, { "docid": "e17e625fb36c2298dbac9f5647e71522", "score": "0.5714983", "text": "def setup(bot: Bot) -> None:\n bot.add_cog(MessageLogCog(bot))", "title": "" }, { "docid": "efb0f516bd6a1e7fe91485da5245ead6", "score": "0.5709663", "text": "def registerhooks(entry=[], exit=[], *args, **kwargs):\n def wrap(f):\n _kwargs = dict(kwargs)\n _kwargs.update({'entry': entry, 'exit': exit})\n def wrapped_f(*args, **kwargs):\n return f(*args, **kwargs)\n wrapped_f.__hooks = _kwargs\n return wrapped_f\n return wrap", "title": "" }, { "docid": "044a2ad7b8e51eec0d32c615365ec6bc", "score": "0.570686", "text": "def setup(bot):\n bt.INFO('Loading Misc.py')\n bot.add_cog(Misc(bot))", "title": "" }, { "docid": "71fa5b3abe2be9c9c55ea45b9c82dd43", "score": "0.56982493", "text": "async def setup(self) -> None:\n if self.playing_title is not None:\n await self.change_presence(\n activity=discord.Game(name=self.playing_title),\n )\n\n self.tree.clear_commands(guild=None)\n for guild in self.guilds:\n self.tree.clear_commands(guild=guild)\n\n commands = registered_app_commands()\n for command in commands:\n self.tree.add_command(command)\n logger.info(f'registered {len(commands)} app commands')\n\n listeners = registered_listeners()\n for listener in listeners:\n self.add_listener(listener.func, listener.event)\n logger.info(f'registered {len(listeners)} listeners')\n\n if self._cmd_group_exts is not None:\n for ext in self._cmd_group_exts:\n self.tree.add_command(ext)\n await ext.post_init(self)\n logger.info(f'registered {ext.name} command group')\n\n await self.tree.sync()\n for guild in self.guilds:\n await self.tree.sync(guild=guild)", "title": "" }, { "docid": "fccb4dede5aa12489b6365d7dac5f30b", "score": "0.5684656", "text": "def setup(bot):\n bot.add_cog(LoadHelp(bot))", "title": "" }, { "docid": "59ace20514f392a89f07428e9134c5bc", "score": "0.5670448", "text": "def list_hooks(self) -> None:\n data = self.load()\n hooks = data.get(\"hooks\", {})\n\n print(\"Current hook command configuration:\")\n for state in self.valid_hook_states:\n cmd = hooks.get(state, None)\n print(f\" {state:6s} {cmd}\")", "title": "" }, { "docid": "c13e1b3cbfa89944d2a510fb35f6ed58", "score": "0.5668042", "text": "def on_bot_startup(bot):\n pass", "title": "" }, { "docid": "5a8ba5a4c77b793ba734b50ced6e7aaf", "score": "0.56633395", "text": "def register(name):\r\n global SUPPORTED_HOOKS\r\n if not name in SUPPORTED_HOOKS:\r\n SUPPORTED_HOOKS.append(name)", "title": "" }, { "docid": "5ad29e41de5a20f7deb3ed694f518194", "score": "0.5659891", "text": "def add_permissions_hook(self, hook: commands.CheckPredicate) -> None:\n self._permissions_hooks.append(hook)", "title": "" }, { "docid": "85d8761c3de9dfeea1cd5f99b7ec825c", "score": "0.56558925", "text": "def register_hook(self, event, hook):\r\n\r\n if isinstance(hook, collections.Callable):\r\n self.hooks[event].append(hook)\r\n elif hasattr(hook, '__iter__'):\r\n self.hooks[event].extend(h for h in hook if isinstance(h, collections.Callable))", "title": "" }, { "docid": "3258ecf3f6e2c20cf1fd7f7cecca9cc6", "score": "0.5652265", "text": "def get_hooks():\n tensor_to_log = {'loss': loss}\n # tensor_to_log = {}\n logging_tensor_hook = tf.train.LoggingTensorHook(\n tensors=tensor_to_log,\n every_n_iter=2\n )\n return [logging_tensor_hook]", "title": "" }, { "docid": "55c7449a2267ac9fd64c1310ad51e382", "score": "0.56491655", "text": "def apply_hooks():\n for line in fileinput.input():\n std_input = line.split(\" \")\n oldrev, newrev, refname = [item.strip() for item in std_input]\n # Check for zero commit, check branch deletions\n # also, avoid new package additions\n if ZERO_COMMIT in (oldrev, newrev):\n continue\n # insert new hooks here\n package_start_build(oldrev, newrev, refname)\n # Pass in 'length' as an optional argument\n # default 'length' of feed is 499\n write_rss_feed(oldrev, newrev, refname, length=499)\n return", "title": "" }, { "docid": "f70c1c6821cc9de1cb8cab705ac6ee85", "score": "0.5649146", "text": "def setup(bot: Bot) -> None:\n bot.add_cog(Internal(bot))", "title": "" }, { "docid": "0f6a737d4f0384ad0b93c99010ebf635", "score": "0.5637375", "text": "def hook(self, server=None):\n\n if server is not None:\n logging.debug('Adding hook route to web server')\n server.add_route(\n Wrapper(callable=self.get_hook(),\n route=self.context.get('server.hook', '/hook')))\n\n if (self.context.get('server.binding') is not None\n and self.context.get('server.url') is not None):\n\n self.space.register(\n hook_url=self.context.get('server.url')\n + self.context.get('server.hook', '/hook'))", "title": "" }, { "docid": "1b50dc58fd7d07d03898d7d1205095a7", "score": "0.56359833", "text": "def setup(bot):\n\n bot.add_cog(Eval(bot))", "title": "" }, { "docid": "b683c47d5bdae345391754b51c5bac6f", "score": "0.5632748", "text": "def add_callback(self, hook, function, pre=False):\n if hook not in self._hooks:\n raise Exception('This program has no hook named \"%s\"' % hook)\n hook_def = self._hook_defs.get(hook, None)\n\n if (hook_def is None or not isinstance(hook_def, FunctionChain)):\n raise TypeError(\"Cannot add callback to hook '%s'; not a \"\n \"FunctionChain. (%s)\" % (hook, type(hook_def)))\n\n if pre:\n hook_def.insert(0, function)\n else:\n hook_def.append(function)\n\n # TODO: remove or resurrect\n #self._install_dep_callbacks(function)\n self._need_build = True", "title": "" }, { "docid": "d4499e1842dac2feaba1f51e52a73f48", "score": "0.56314707", "text": "def run_hooks():\n\n hookdir = '/usr/lib/oem-config/post-install'\n\n if os.path.isdir(hookdir):\n # Exclude hooks containing '.', so that *.dpkg-* et al are avoided.\n hooks = filter(lambda entry: '.' not in entry, os.listdir(hookdir))\n child_env = dict(os.environ)\n child_env['DEBIAN_FRONTEND'] = 'noninteractive'\n if 'DEBIAN_HAS_FRONTEND' in child_env:\n del child_env['DEBIAN_HAS_FRONTEND']\n for hookentry in hooks:\n hook = os.path.join(hookdir, hookentry)\n if os.access(hook, os.X_OK):\n # Errors are ignored at present, although this may change.\n subprocess.call([hook], env=child_env)", "title": "" }, { "docid": "1b864916beac1e7a3522c10bae5c0f74", "score": "0.56313306", "text": "def run_hook(db, hook_name, **kwargs):\n\n\t# Get all hooks in the DB\n\thooks = db.get_hook()\n\thooks = [dict(_)['name'] for _ in hooks]\n\n\t# Iteratue through the hooks\n\tfor hook in hooks:\n\t\t# Try importing the hook module and finding any functions that have used the ydl.hook decorate\n\t\t# which means it has a _hooks attribute indicating which hooks it accepts\n\t\ttry:\n\t\t\tz = importlib.import_module(hook)\n\t\t\tfor x in dir(z):\n\t\t\t\to = getattr(z, x)\n\t\t\t\tif hasattr(o, '_hooks') and hook_name in o._hooks:\n\t\t\t\t\to(hook_name, db, **kwargs)\n\t\t\t\tdel o\n\t\t\tdel z\n\t\texcept Exception as e:\n\t\t\ttraceback.print_exc()\n\t\t\t# Move along", "title": "" }, { "docid": "2cfa6dc8d943c488df97857dd75522ee", "score": "0.5624743", "text": "def setup(bot: Bot) -> None:\n bot.add_cog(Reminder(bot))\n log.info(\"Cog loaded: reminder\")", "title": "" }, { "docid": "e7df076bc40f988cd86f24cb3e5db89e", "score": "0.56241655", "text": "def setup(bot: Bot) -> None:\n bot.add_cog(SpookyEightBall())", "title": "" }, { "docid": "53fa260f6f97a677c52fabbf44b86b38", "score": "0.5622936", "text": "def add_commands(self, *args):\n for command in args:\n self._script += command + '\\n'", "title": "" }, { "docid": "7496c40c7d9e4b36c3df9cb62d2c5ad4", "score": "0.55818033", "text": "def run(self, hook, *args, **kwargs):\n\n assert hook in self._callbacks, f\"hook '{hook}' not found in callbacks {self._callbacks}\"\n\n for logger in self._callbacks[hook]:\n logger['callback'](*args, **kwargs)", "title": "" }, { "docid": "9499c2213ce0272e0a32ffd4439f8e08", "score": "0.5580782", "text": "def _init_airflow_core_hooks(self):\n core_dummy_hooks = {\n \"generic\": \"Generic\",\n \"email\": \"Email\",\n \"mesos_framework-id\": \"Mesos Framework ID\",\n }\n for key, display in core_dummy_hooks.items():\n self._hooks_lazy_dict[key] = HookInfo(\n hook_class_name=None,\n connection_id_attribute_name=None,\n package_name=None,\n hook_name=display,\n connection_type=None,\n connection_testable=False,\n )\n for cls in [FSHook, PackageIndexHook]:\n package_name = cls.__module__\n hook_class_name = f\"{cls.__module__}.{cls.__name__}\"\n hook_info = self._import_hook(\n connection_type=None,\n provider_info=None,\n hook_class_name=hook_class_name,\n package_name=package_name,\n )\n self._hook_provider_dict[hook_info.connection_type] = HookClassProvider(\n hook_class_name=hook_class_name, package_name=package_name\n )\n self._hooks_lazy_dict[hook_info.connection_type] = hook_info", "title": "" }, { "docid": "4fd2b1eb01258336636e0bd8adc0178b", "score": "0.5579576", "text": "def _trigger_hooks(self, hook_dict, hook_name, **kwargs):\n for attached_callback in hook_dict[hook_name].attached_callbacks:\n try:\n if attached_callback.callback(**kwargs):\n if attached_callback.action == MMHookAct.LOAD_MODULE:\n # load the module!\n self.logger.debug('some hook returned true, '\n 'loading module {}'\n .format(attached_callback.argument))\n # module must accept same kwargs, this is mandatory\n # with this discovery event\n try:\n cb_arg = attached_callback.argument\n module_name = cb_arg.get_module_desc().arg_name\n self.load_module(module_name,\n **kwargs)\n except Exception as ex:\n self.logger.error('loading of module of class '\n '\"{}\" failed with: {}'\n .format(cb_arg.__name__,\n str(ex)))\n elif attached_callback.action == MMHookAct.UNLOAD_MODULE:\n # unload the attached module\n self.logger.debug('a hook required module '\n '{} to be unloaded'\n .format(attached_callback.argument))\n self.unload_module(attached_callback.argument)\n except Exception as ex:\n self.logger.error('failed to call function '\n '{} attached to \"{}\" with: {}'\n .format(attached_callback,\n hook_name,\n str(ex)))", "title": "" }, { "docid": "b19e4deca1d85266e102d958ffa41ff0", "score": "0.55784094", "text": "def setup(bot):\n bot.add_cog(ReminderCog(bot))", "title": "" }, { "docid": "8e2d79bb76fc224f12b869daf1739030", "score": "0.55750185", "text": "def runHook(self):\n self.run()", "title": "" }, { "docid": "a78408bd292126b7640c238bfb0c8ec8", "score": "0.55676615", "text": "async def setup(bot: Bot) -> None:\n await bot.add_cog(Patreon(bot))", "title": "" }, { "docid": "05f45412830a1b3a2b2c9b7bdb0d1cbb", "score": "0.556712", "text": "def hooks(self) -> MutableMapping[str, HookInfo | None]:\n self.initialize_providers_hooks()\n # When we return hooks here it will only be used to retrieve hook information\n return self._hooks_lazy_dict", "title": "" } ]
0678ed29cb4ed04785fd661416546558
Check is a path is a broken symlink.
[ { "docid": "202729acc69cd2b9313649493f56ef9f", "score": "0.870235", "text": "def is_symlink_broken(path):\n if os.path.islink(path):\n return not os.path.exists(os.readlink(path))\n else:\n raise TypeError('path={!r} is not a symbolic link'.format(path))", "title": "" } ]
[ { "docid": "71564a56fc163c8e7d254c575b623e30", "score": "0.7800918", "text": "def checklink(path):\n # mktemp is not racy because symlink creation will fail if the\n # file already exists\n name = tempfile.mktemp(dir=path)\n try:\n os.symlink(\".\", name)\n os.unlink(name)\n return True\n except (OSError, AttributeError):\n return False", "title": "" }, { "docid": "1301b1cb5da838eaea975a866ca6bb1f", "score": "0.7800738", "text": "def test_is_symlink(path: Path) -> None:\n assert (path / \"link\").is_symlink()", "title": "" }, { "docid": "b1c304a94d85d6653cded13e445a0b19", "score": "0.7560013", "text": "def is_symlink(self):\n try:\n return S_ISLNK(self.lstat().st_mode)\n except OSError as e:\n if e.errno not in (ENOENT, ENOTDIR):\n raise\n # Path doesn't exist\n return False", "title": "" }, { "docid": "45e1ac62ad49004dab7dc483330692cb", "score": "0.75018257", "text": "def is_symlink(self):\n return False", "title": "" }, { "docid": "896891d9194d8ad8e3c7b93d9f4735d6", "score": "0.7391739", "text": "def test_symlink_on_unknown_file(self):\n root = URI(self.baseurl)\n notexisting_path = root / 'ma' / 'moo'\n tee_path = root / 'helloworld'\n notexisting_path.symlink(tee_path)\n\n self.assertTrue(tee_path.islink())\n self.assertEqual(tee_path.readlink(), notexisting_path)\n self.assertTrue(not notexisting_path.exists())", "title": "" }, { "docid": "9543b5f7076123e979ef23a52736cca5", "score": "0.7297981", "text": "def test_symlink_missing_src(file, source):\n target = source.parent / \"symlink.lnk\"\n missing_source = source.parent / \"missing.txt\"\n try:\n file.symlink(str(missing_source), str(target))\n assert salt.utils.path.islink(str(target))\n finally:\n target.unlink()", "title": "" }, { "docid": "faaf23de79bf29e8868d7acf350b1324", "score": "0.72364473", "text": "def islink(path):\r\n return False", "title": "" }, { "docid": "117fd290d3a937d8daa06de391ab888b", "score": "0.7161739", "text": "def islink(self, path):\n path = ftputil.tool.as_unicode(path)\n try:\n lstat_result = self._host.lstat(\n path, _exception_for_missing_path=False)\n except ftputil.error.RootDirError:\n return False\n else:\n if lstat_result is None:\n # Non-existent path\n return False\n else:\n return stat.S_ISLNK(lstat_result.st_mode)", "title": "" }, { "docid": "7e466325a1eb6f6c43507803db038e87", "score": "0.7131687", "text": "def islink(path):\r\n try:\r\n st = os.lstat(path)\r\n except (os.error, AttributeError):\r\n return False\r\n return stat.S_ISLNK(st.st_mode)", "title": "" }, { "docid": "e1f68b0566a7c14c2badd11901266889", "score": "0.71089053", "text": "def is_symlink(self):\n if os.name == \"posix\": return os.path.islink(self.path)\n if os.name == \"nt\":\n import win32api\n import win32con\n num = win32con.FILE_ATTRIBUTE_REPARSE_POINT\n return bool(win32api.GetFileAttributes(self.path) & num)", "title": "" }, { "docid": "dac7484c1ede678b11d426cfd0dff2ef", "score": "0.71058583", "text": "def islink(path):\n try:\n st = os.lstat(path)\n except (OSError, AttributeError):\n return False\n return stat.S_ISLNK(st.st_mode)", "title": "" }, { "docid": "dac7484c1ede678b11d426cfd0dff2ef", "score": "0.71058583", "text": "def islink(path):\n try:\n st = os.lstat(path)\n except (OSError, AttributeError):\n return False\n return stat.S_ISLNK(st.st_mode)", "title": "" }, { "docid": "4c1aec9e1a0da95eb624cbc8cdd68e79", "score": "0.70764434", "text": "def test_symlink_exists_different(file, source):\n dif_source = source.parent / \"dif_source.txt\"\n target = source.parent / \"symlink.lnk\"\n target.symlink_to(dif_source)\n try:\n with pytest.raises(CommandExecutionError) as exc:\n file.symlink(str(source), str(target))\n assert \"Found existing symlink:\" in exc.value.message\n finally:\n target.unlink()", "title": "" }, { "docid": "8816e12f05b8fce70a4dc0a21726d931", "score": "0.70211464", "text": "def test_symlink_exists_different_force(file, source):\n dif_source = source.parent / \"dif_source.txt\"\n target = source.parent / \"symlink.lnk\"\n target.symlink_to(dif_source)\n try:\n file.symlink(str(source), str(target), force=True)\n assert salt.utils.path.readlink(str(target)) == str(source)\n finally:\n target.unlink()", "title": "" }, { "docid": "bb95c61abb8f5354e7e9d4d715e4ef7a", "score": "0.6982841", "text": "def test_readlink(path: Path) -> None:\n assert PurePath(\"file\") == (path / \"link\").readlink()", "title": "" }, { "docid": "f6c1e77dc34b5c401947222e0bbf911a", "score": "0.6982368", "text": "def _is_link(self, path):\n return os.path.islink(os.path.expanduser(path))", "title": "" }, { "docid": "8627bffa0d7e759f5d4710ef759afa77", "score": "0.69228035", "text": "def is_symlink(self):\n if self._is_symlink is None:\n self._is_symlink = os.path.islink(self.storage_url.object_name)\n return self._is_symlink", "title": "" }, { "docid": "fe21b7cecc2c9973a253737963706133", "score": "0.68756634", "text": "def test_constrain_symlinks_to_filesystem() -> None:\n path = root(\n {\n \"file\": \"text\",\n \"dir\": {\"link\": PurePath(\"..\", \"file\")},\n },\n innerpath=PurePath(\"dir\"),\n )\n # XXX Should we rewrite the error message to hide the 'dir/' prefix?\n with pytest.raises(FileNotFoundError, match=\"file not found: dir/file\"):\n assert (path / \"link\").read_text()", "title": "" }, { "docid": "923a01647ae572518a7cb698b01d2789", "score": "0.6867287", "text": "def test_symlink_exists_file(file, source):\n with pytest.helpers.temp_file(\"symlink.txt\", contents=\"Source content\") as target:\n with pytest.raises(CommandExecutionError) as exc:\n file.symlink(str(source), str(target))\n assert \"Existing path is not a symlink:\" in exc.value.message", "title": "" }, { "docid": "41bff0c324b7c71a826535c2a5479b5c", "score": "0.6856679", "text": "def test_symlink(file, source):\n target = source.parent / \"symlink.lnk\"\n try:\n file.symlink(str(source), str(target))\n assert salt.utils.path.islink(str(target))\n finally:\n target.unlink()", "title": "" }, { "docid": "1d7a46dd208c245d602979b6f8c24590", "score": "0.68321455", "text": "def test_symlink_exists_same(file, source):\n target = source.parent / \"symlink.lnk\"\n target.symlink_to(source)\n try:\n before_time = os.stat(str(target)).st_mtime\n ret = file.symlink(str(source), str(target))\n after_time = os.stat(str(target)).st_mtime\n assert before_time == after_time\n assert ret is True\n finally:\n target.unlink()", "title": "" }, { "docid": "73c3309bca6d764f2d429a94c01e2867", "score": "0.68014234", "text": "def is_symlink(\n linkfile: T.Union[str, pathlib.Path],\n src: T.Union[None, str, pathlib.Path] = None,\n) -> bool:\n linkfile = pathlib.Path(linkfile)\n if not linkfile.is_symlink():\n return False\n\n if src is None:\n return True\n\n return linkfile.resolve() == pathlib.Path(src).resolve()", "title": "" }, { "docid": "6a5673d6915d6847708663b71cf2eab7", "score": "0.67836535", "text": "def check_if_relative_link(link):\n\n if not link.startswith(\"http\"):\n if not link.startswith(\"/\"):\n return True\n return False", "title": "" }, { "docid": "1067f2b5ebb747c4d55bca1975463a6a", "score": "0.6782696", "text": "def _valid_link(self, link, fail_on_disk_check=True):\n\n if not link in self._links:\n raise Exception('link [{0}] not found in timeline!'.format(link))\n\n if not os.path.islink(self._links[link]['path']):\n msg = 'link [{0}] not found!'.format(self._links[link]['path'])\n if fail_on_disk_check:\n raise Exception(msg)\n self.logger.warning(msg)\n return False\n\n if not os.path.exists(self._links[link]['path']):\n msg = 'link [{0}] is broken!'.format(self._links[link]['path'])\n if fail_on_disk_check:\n raise Exception(msg)\n self.logger.warning(msg)\n return False\n\n return True", "title": "" }, { "docid": "968ba0654b4a586ad577e6ad741ee0ed", "score": "0.6718646", "text": "def is_symlink(self):\n return self.type == self.Type.SYMLINK", "title": "" }, { "docid": "f7331b2df6b96881496979b7a913c461", "score": "0.6693001", "text": "def is_symlink(self):\n if (\n not self.custom_fields\n or resource_util.SYMLINK_METADATA_KEY not in self.custom_fields\n ):\n return False\n return (\n self.custom_fields[resource_util.SYMLINK_METADATA_KEY].lower() == 'true'\n )", "title": "" }, { "docid": "81d65ef687eac4cb308d24311371b04b", "score": "0.66920704", "text": "def islink(self, path: AnyStr) -> bool:\n return self.filesystem.islink(path)", "title": "" }, { "docid": "0a1435142fcb2dce5b3f51f29d763b77", "score": "0.66706246", "text": "def islink (self) :\n try :\n return stat.S_ISLNK (self.mode)\n except OSError :\n return 0", "title": "" }, { "docid": "318e97be28a1d07940b9a69de3c8b1f6", "score": "0.6630757", "text": "def copy_if_symlink_fails(\n src: Union[str, Path],\n dst: Union[str, Path],\n backend_args: Optional[dict] = None,\n) -> bool:\n backend = get_file_backend(\n src, backend_args=backend_args, enable_singleton=True)\n return backend.copy_if_symlink_fails(src, dst)", "title": "" }, { "docid": "8c3050fd54816a14c87c867d2e9a3748", "score": "0.6584976", "text": "def test_through_symlink(self) -> None:\n link1 = os.path.join(self.tmp_dir, \"link1\")\n os.symlink(self.mount, link1)\n\n info1 = json.loads(self.eden.run_cmd(\"info\", link1))\n self.assertEqual(self.mount, info1[\"mount\"])\n\n # Create a non-normalized symlink pointing to the parent directory\n # of the mount\n link2 = os.path.join(self.tmp_dir, \"mounts_link\")\n os.symlink(self.mount + \"//..\", link2)\n mount_through_link2 = os.path.join(link2, self.repo_name)\n\n info2 = json.loads(self.eden.run_cmd(\"info\", mount_through_link2))\n self.assertEqual(self.mount, info2[\"mount\"])", "title": "" }, { "docid": "fc10782d2de274eef803bde25fbaf972", "score": "0.65710294", "text": "def test_symlink_exists_different_atomic(file, source):\n dif_source = source.parent / \"dif_source.txt\"\n target = source.parent / \"symlink.lnk\"\n target.symlink_to(dif_source)\n try:\n file.symlink(str(source), str(target), atomic=True)\n assert salt.utils.path.readlink(str(target)) == str(source)\n finally:\n target.unlink()", "title": "" }, { "docid": "741e573a30028ce18a81c363e3d5a5e9", "score": "0.6550941", "text": "def islink(s):\n\n try:\n import Carbon.File\n return Carbon.File.ResolveAliasFile(s, 0)[2]\n except:\n return False", "title": "" }, { "docid": "3b04b3104b1dd15775e4fb15a939ca49", "score": "0.6517931", "text": "def islink(pathname):\n from os import listdir\n from os.path import isdir, isfile\n import os.path\n pathname = pathname.rstrip(\"/\")\n if os.path.islink(pathname): return True\n elif isfile(pathname) and ishidden(pathname): return True\n elif isdir(pathname):\n entries = listdir(pathname)\n for entry in entries:\n if not islink(pathname+\"/\"+entry): return False\n return True\n else: return False", "title": "" }, { "docid": "2c17a63c3f070a9ace82b381e0997881", "score": "0.6510741", "text": "def check_file(filename,path):\n chksumX = chksum_file(\"%s/%s\" % (path,filename))\n (pathY,chksumY) = find_file(filename,path,chksumX)\n if chksumX == chksumY:\n\thardlink(filename,path,pathY)", "title": "" }, { "docid": "6924dff27d54d6cab5d242f412cc20a7", "score": "0.6508725", "text": "def silent_remove_symlink(path): # TODO: test\n result = {'error': ''}\n if symlink_exists(path):\n try:\n os.unlink(path)\n except OSError as e:\n result['error'] = str(e)\n return result", "title": "" }, { "docid": "595ab9271ea617ec1ff274f308897593", "score": "0.64662755", "text": "def has_valid_link(dotfile, dotfiles_dir):\n homefolder_path = get_homefolder_path(dotfile, dotfiles_dir)\n if os.path.islink(homefolder_path) and os.readlink(homefolder_path) == dotfile:\n return True\n\n return False", "title": "" }, { "docid": "b8b6fb9618f82b8429612697328e30ae", "score": "0.6455614", "text": "def is_valid_file(abs_path):\n if os.path.exists(abs_path) and os.path.isfile(abs_path) \\\n and not os.path.islink(abs_path):\n return True\n\n else: return False", "title": "" }, { "docid": "8a6353f544288001a6c15f9a0558223e", "score": "0.6414611", "text": "def ismount_raw(path):\r\n try:\r\n s1 = os.lstat(path)\r\n except os.error as err:\r\n if err.errno == errno.ENOENT:\r\n # It doesn't exist -- so not a mount point :-)\r\n return False\r\n raise\r\n\r\n if stat.S_ISLNK(s1.st_mode):\r\n # A symlink can never be a mount point\r\n return False\r\n\r\n s2 = os.lstat(os.path.join(path, '..'))\r\n dev1 = s1.st_dev\r\n dev2 = s2.st_dev\r\n if dev1 != dev2:\r\n # path/.. on a different device as path\r\n return True\r\n\r\n ino1 = s1.st_ino\r\n ino2 = s2.st_ino\r\n if ino1 == ino2:\r\n # path/.. is the same i-node as path\r\n return True\r\n\r\n return False", "title": "" }, { "docid": "8d851ba210920164362039bc52ea42fd", "score": "0.6394149", "text": "def is_linked(self) -> bool:\n return self.parent_path.is_symlink() \\\n and (not self.child_path.is_symlink()) \\\n and self.child_path.samefile(self.parent_path)", "title": "" }, { "docid": "6b635c87ed41938fbf0245d78d827c98", "score": "0.63874704", "text": "def symlinks_supported():\n with tempfile.TemporaryDirectory() as temp_dir:\n original_path = os.path.join(temp_dir, 'original')\n symlink_path = os.path.join(temp_dir, 'symlink')\n os.makedirs(original_path)\n try:\n os.symlink(original_path, symlink_path)\n supported = True\n except (OSError, NotImplementedError):\n supported = False\n return supported", "title": "" }, { "docid": "89bd6c13a93b82e506573bc794a24693", "score": "0.6330474", "text": "def check_path(data_pointer, log, msg):\n if not os.path.exists(data_pointer):\n log.debug(msg)\n return False\n else:\n return data_pointer", "title": "" }, { "docid": "13fb5713db8da8497f3a87fe901a9745", "score": "0.6315982", "text": "def test_symlink_target_relative_path(file, source):\n target = \"..{}symlink.lnk\".format(os.path.sep)\n with pytest.raises(SaltInvocationError) as exc:\n file.symlink(str(source), str(target))\n assert \"Link path must be absolute\" in exc.value.message", "title": "" }, { "docid": "0f87c41104cb5341d0b2c9ad58a4970e", "score": "0.6286066", "text": "def _resolve_symlinks_and_relative_paths(path):\n # type: pathlib.Path -> pathlib.Path\n try:\n path = path.resolve()\n except WindowsError: # TODO: check for more specific error\n pass\n except OSError as e:\n if e.errno == 2:\n pass\n else:\n raise\n\n return path", "title": "" }, { "docid": "d75a5874ba22708116a98d08093a24ae", "score": "0.6252006", "text": "def check_path(path):\n if(os.path.exists(path) == False):\n print_error_and_exit(\"Cannot find %s\" % path)", "title": "" }, { "docid": "3d007389a31ebd7fb16f28910d95422f", "score": "0.62242776", "text": "def symlink(self, target, source):\n if self.debug == True:\n logging.debug(\"in symlink\")\n raise FuseOSError(ENOENT)", "title": "" }, { "docid": "90387b542107e8c28fba24a7fd822dad", "score": "0.62149453", "text": "def path_ok(match_tuple: MatchTuple) -> bool:\n relative_path = match_tuple.link.split(\"#\")[0]\n full_path = os.path.join(os.path.dirname(str(match_tuple.source)), relative_path)\n return os.path.exists(full_path)", "title": "" }, { "docid": "c41d676430289b18beaa7f4c28d60dc6", "score": "0.6214474", "text": "def test_clear_broken_symlink(self):\n nonexistent_file_path = os.path.join(settings.STATIC_ROOT, 'nonexistent.txt')\n broken_symlink_path = os.path.join(settings.STATIC_ROOT, 'symlink.txt')\n os.symlink(nonexistent_file_path, broken_symlink_path)\n self.run_collectstatic(clear=True)\n self.assertFalse(os.path.lexists(broken_symlink_path))", "title": "" }, { "docid": "7db9930863d4dcb3dd9b4a03dbd9b6d5", "score": "0.6211496", "text": "def test_symlinks():\n\n config_file = \"study/profit_symlink.yaml\"\n config = BaseConfig.from_file(config_file)\n base_file = \"./study/run_000/mockup.in\"\n link_file = \"./study/run_000/some_subdir/symlink_link.txt\"\n try:\n run(f\"profit run {config_file}\", shell=True, timeout=TIMEOUT)\n with open(link_file, \"r\") as link:\n with open(base_file, \"r\") as base:\n link_data = link.read()\n base_data = base.read()\n assert link_data == base_data and not link_data.startswith(\"{\")\n finally:\n clean(config)\n run(f\"profit clean --all {config_file}\", shell=True, timeout=CLEAN_TIMEOUT)", "title": "" }, { "docid": "de9e9a27b97a6b79b9f1382fe5c6addc", "score": "0.6205026", "text": "def do_ismount(path):\n try:\n s1 = os.lstat(path)\n except os.error as err:\n if err.errno == errno.ENOENT:\n # It doesn't exist -- so not a mount point :-)\n return False\n else:\n raise GlusterFileSystemOSError(\n err.errno, '%s, os.lstat(\"%s\")' % (err.strerror, path))\n\n if stat.S_ISLNK(s1.st_mode):\n # A symlink can never be a mount point\n return False\n\n try:\n s2 = os.lstat(os.path.join(path, '..'))\n except os.error as err:\n raise GlusterFileSystemOSError(\n err.errno, '%s, os.lstat(\"%s\")' % (err.strerror,\n os.path.join(path, '..')))\n\n dev1 = s1.st_dev\n dev2 = s2.st_dev\n if dev1 != dev2:\n # path/.. on a different device as path\n return True\n\n ino1 = s1.st_ino\n ino2 = s2.st_ino\n if ino1 == ino2:\n # path/.. is the same i-node as path\n return True\n\n return False", "title": "" }, { "docid": "d97181c311338ad47abd8855991b9b9a", "score": "0.6189933", "text": "def test_broken_symlink(self):\n path = os.path.join(settings.STATIC_ROOT, 'test.txt')\n os.unlink(path)\n self.run_collectstatic()\n self.assertTrue(os.path.islink(path))", "title": "" }, { "docid": "a4525da4084481db3ff2dc0510d16515", "score": "0.6175588", "text": "def lexists(self, path: AnyStr) -> bool:\n return self.filesystem.exists(path, check_link=True)", "title": "" }, { "docid": "e869be7867b9731154d811f96ce03792", "score": "0.6169737", "text": "def check_if_link_in_use(filepath, link):\n\n if not \"previous\" in link:\n if not in_use_links.get(link):\n new_file_name = remove_extra_from_path(filepath)\n\n if new_file_name.startswith(\"\\\\\"):\n new_file_name = new_file_name.replace(\"\\\\\", \"/\")\n\n if new_file_name != link:\n in_use_links[link] = True", "title": "" }, { "docid": "e3a6e146797833743267c59619036c22", "score": "0.61650556", "text": "def ismount(path):\n try:\n s1 = os.lstat(path)\n except OSError:\n # It doesn't exist -- so not a mount point. :-)\n return False\n else:\n # A symlink can never be a mount point\n if stat.S_ISLNK(s1.st_mode):\n return False\n\n if isinstance(path, bytes):\n parent = join(path, b'..')\n else:\n parent = join(path, '..')\n try:\n s2 = os.lstat(parent)\n except OSError:\n return False\n\n dev1 = s1.st_dev\n dev2 = s2.st_dev\n if dev1 != dev2:\n return True # path/.. on a different device as path\n ino1 = s1.st_ino\n ino2 = s2.st_ino\n if ino1 == ino2:\n return True # path/.. is the same i-node as path\n return False", "title": "" }, { "docid": "2e371c24edcbab6afb4eab8ea6a95def", "score": "0.61448544", "text": "def check_path(path):\r\n if os.path.exists(path):\r\n return True\r\n return False", "title": "" }, { "docid": "6bfbb920868b15f86a5bd492a0366588", "score": "0.6144068", "text": "def test_check_permissions_and_broken_symlinks(self, filesystem_world_writable_whitelist, filesystem_ignore_patterns):\n stack = [] # Used for storing directories to parse\n starting_dir = \"/\"\n failed = False\n\n # Helper functions\n is_dir = lambda x: stat.S_ISDIR( x.st_mode)\n is_lnk = lambda x: stat.S_ISLNK(x.st_mode)\n is_sock = lambda x: stat.S_ISSOCK(x.st_mode)\n is_chr = lambda x: stat.S_ISCHR(x.st_mode)\n is_blk = lambda x: stat.S_ISBLK(x.st_mode)\n is_reg = lambda x: stat.S_ISREG(x.st_mode)\n is_world_writable = lambda x: stat.S_IWOTH & x.st_mode\n\n # Start it up\n stack.append(starting_dir)\n while len(stack) > 0:\n # Get actual directory\n directory = stack.pop()\n # Parse through it\n for entry in os.listdir(directory):\n # Stat it\n path = \"%s/%s\" % (directory, entry)\n if path.startswith(\"//\"): # Strip beginning // because of root dir\n path = path[1:]\n\n # Skip this file/dir, has the wrong pattern\n if any([fnmatch.fnmatch(path, p) for p in filesystem_ignore_patterns]):\n sys.stdout.write(\"[ignoring] %s\\n\" % (path,))\n continue\n\n info = os.lstat(path)\n if is_lnk(info):\n # Check if it's broken or not\n if not Test.Shell.exists_in_path(os.readlink(path), os.path.abspath(directory)):\n sys.stderr.write(\"[broken-symlink] %s -> %s\\n\" % (path, os.readlink(path)))\n failed = True\n else:\n if is_dir(info):\n stack.append(path)\n\n # FIXME - determine whether file is truly writable (parent dir\n # may not be readable)\n # Check its permissions, if wrong, append it\n if is_world_writable(info):\n # If the path isn't on the whitelist ... we found a failure\n if not any([fnmatch.fnmatch(path, p) for p in filesystem_world_writable_whitelist]):\n sys.stderr.write(\"[world-writable] %s %s\\n\" % (self.fmt_mode_str(info.st_mode), path))\n failed = True\n if failed:\n Test.Fail(msg=\"Test failed\")", "title": "" }, { "docid": "8ffe407236011d5ddfea62710e51f2d8", "score": "0.6135178", "text": "def is_device_link_fail(self):\n return self._tag == 'device_link_fail'", "title": "" }, { "docid": "3d9a697a78f12fdb87286c0b5c3558e3", "score": "0.6132719", "text": "def test_symlink_file5(self):\n src = os.path.join(self.rootdir,\"non-existing-file\") \n target = os.path.join(self.targetdir,os.path.basename(src))\n with self.assertRaises(transfer.TransferError):\n transfer.SymlinkAgent(src,target).transfer()", "title": "" }, { "docid": "771b570903a84e5ab0839ea57aff428e", "score": "0.6085972", "text": "def silent_symlink(src, dst):\n try:\n os.symlink(src, dst)\n except OSError:\n pass", "title": "" }, { "docid": "b346d5a2d9646b6ff0a8844a820c0b6e", "score": "0.60828763", "text": "def readlink(path):\n if not path:\n return path\n\n if path[0] == \"~\":\n path = os.path.expanduser(path)\n\n return run_bash_cmd([\"readlink\", \"-f\", path]) or path", "title": "" }, { "docid": "bdc8a241fd5bbbd50d166cdb6622d9a4", "score": "0.6063484", "text": "def _is_valid_path(path):\r\n try:\r\n urlresolvers.resolve(path)\r\n return True\r\n except urlresolvers.Resolver404:\r\n return False", "title": "" }, { "docid": "a8bae97837e4ff75dd6ca0ac4d853a0f", "score": "0.6037345", "text": "def exists(path, followlinks=True):\n if path is None:\n raise Tools.exceptions.Value(\"Path is not passed in system.fs.exists\")\n found = False\n try:\n st = os.lstat(path)\n found = True\n except (OSError, AttributeError):\n pass\n if found and followlinks and stat.S_ISLNK(st.st_mode):\n if MyEnv.debug:\n Tools.log(\"path %s exists\" % str(path.encode(\"utf-8\")))\n linkpath = os.readlink(path)\n if linkpath[0] != \"/\":\n linkpath = os.path.join(Tools.path_parent(path), linkpath)\n return Tools.exists(linkpath)\n if found:\n return True\n # Tools.log('path %s does not exist' % str(path.encode(\"utf-8\")))\n return False", "title": "" }, { "docid": "dd3c3cb6566686327f09cc836e98684d", "score": "0.60363925", "text": "def check_path(p):\n if not os.path.exists(p):\n msg = 'Error: Path \"{}\" does not exist.'.format(p)\n print_help(msg)", "title": "" }, { "docid": "b6931c31cb4ea78fdd2dcb5957104c1b", "score": "0.6035318", "text": "def check_path(path):\n if (not os.path.isfile(path)):\n raise Exception('The path specified does not point to a file')", "title": "" }, { "docid": "bfdbda55742febd9b0ba06e3c8969c13", "score": "0.6016378", "text": "def symlinks_test(dirname):\n\n links = []\n\n for (path, dirs, files) in os.walk(dirname):\n if path.find('/.git') > -1:\n continue\n\n # search for directory links\n for d in dirs:\n dname = os.path.join(path, d)\n if os.path.islink(dname):\n links.append(os.path.relpath(dname, dirname)+'/')\n\n # search for file links\n for f in files:\n fname = os.path.join(path, f)\n if os.path.islink(fname):\n links.append(os.path.relpath(fname, dirname))\n\n if links:\n return (FAIL, '\\n'.join(links))\n else:\n return (PASS, \"None\")", "title": "" }, { "docid": "a97ceeb3e2a1ef2d4f8add3a4d41507d", "score": "0.60025454", "text": "def test_force_symlink_overwrite(tmpdir):\n src = join(str(tmpdir), \"src\")\n dst = join(str(tmpdir), \"dst\")\n\n with open(src, \"w\") as f:\n f.write(\"Correct.\")\n\n with open(dst, \"w\") as f:\n f.write(\"Wrong.\")\n\n utils.force_symlink(src, dst)\n assert os.path.islink(dst)\n\n with open(dst, \"r\") as f:\n assert \"Correct\" in f.read()", "title": "" }, { "docid": "5fbf806aa189307c157cd564dbcd1a8d", "score": "0.59952956", "text": "def fix_all_git_symlinked(topdir):\n # Determine whether or not symlinks need fixing (they don’t if installing\n # from a .tar.gz file)\n with io.open(topdir + r'\\nikola\\data\\symlink-test-link.txt', 'r', encoding='utf-8-sig') as f:\n text = f.read()\n if text.startswith(\"NIKOLA_SYMLINKS=OK\"):\n return -1\n with io.open(topdir + r'\\nikola\\data\\symlinked.txt', 'r', encoding='utf-8-sig') as f:\n text = f.read()\n # expect each line a relpath from git or zip root,\n # smoke test relpaths are relative to git root\n if text.startswith('.'):\n raise Exception(r'Bad data in \\nikola\\data\\symlinked.txt')\n relnames = text.split('\\n')\n relnames = [name.strip().replace('/', '\\\\') for name in relnames]\n relnames = [name for name in relnames if name]\n\n failures = 0\n for name in relnames:\n # build dst path and do some basic validation\n dst = os.path.join(topdir, name)\n # don't access files outside topdir\n if not is_file_into_dir(dst, topdir):\n continue\n if os.path.isdir(dst):\n # assume the file was de-symlinked\n continue\n\n # build src path and do some basic validation\n with io.open(os.path.join(topdir, dst), 'r', encoding='utf-8-sig') as f:\n text = f.read()\n dst_dir = os.path.dirname(dst)\n try:\n src = os.path.normpath(os.path.join(dst_dir, text))\n if not os.path.exists(src):\n # assume the file was de-symlinked before\n continue\n # don't access files outside topdir\n if not is_file_into_dir(src, topdir):\n continue\n except Exception:\n # assume the file was de-symlinked before\n continue\n\n # copy src to dst\n try:\n if os.path.isdir(src):\n os.unlink(dst)\n shutil.copytree(src, dst)\n else:\n shutil.copy2(src, dst)\n except Exception:\n failures += 1\n print(\"*** copy failed for\")\n print(\"\\t src:\", src)\n print(\"\\t dst:\", dst)\n\n return failures", "title": "" }, { "docid": "727d2ea9be0ae6be316b42e299d3ad3b", "score": "0.599236", "text": "def test_hardlink(self):\n # Prevent errors from being caught\n self.tar.errorlevel = 1\n\n self.tar.extract(\"0-REGTYPE\", dirname())\n try:\n # Extract 1-LNKTYPE which is a hardlink to 0-REGTYPE\n self.tar.extract(\"1-LNKTYPE\", dirname())\n except EnvironmentError, e:\n import errno\n if e.errno == errno.ENOENT:\n self.fail(\"hardlink not extracted properly\")", "title": "" }, { "docid": "e9745941b98c710c4c4ec6d21f02e9e1", "score": "0.59648645", "text": "def test_symlink_folder7(self):\n src = self.rootdir\n target = os.path.join(self.targetdir,os.path.basename(src))\n with mock.patch.object(\n transfer.os,\n 'symlink',\n side_effect=OSError(\"Mocked error\")):\n with self.assertRaises(transfer.SymlinkError):\n transfer.SymlinkAgent(src,target).transfer()", "title": "" }, { "docid": "01c8991464103ff5c306a31023d7fce3", "score": "0.5945459", "text": "def can_symlink_to(self, outfile_name, symlink_candidate):\n if symlink_candidate == outfile_name:\n # Symlinking to ourself is *not* ok.\n return False\n\n if symlink_candidate in self._db.transaction_keys():\n # Don't try to symlink if our target file is being updated\n # itself; we can't tell whether we're 'equivalent' to it\n # or not if it's currently being modified!\n return False\n\n symlink_mtime_map = self._db.get(symlink_candidate)\n if symlink_mtime_map is None:\n # We don't have up-to-date-ness information on the candidate.\n return False\n\n # outfile_name is currently in a transaction (changed_files()\n # returned false), so we have to get its mtime info that way.\n outfile_mtime_map = self._db.get_transaction(outfile_name)\n\n # Get a copy of mtime_maps that do not include the output file.\n pruned_outfile_mtime_map = outfile_mtime_map.copy()\n pruned_outfile_mtime_map.pop(outfile_name)\n pruned_symlink_mtime_map = symlink_mtime_map.copy()\n pruned_symlink_mtime_map.pop(symlink_candidate)\n\n if (frozenset(pruned_outfile_mtime_map.keys()) !=\n frozenset(pruned_symlink_mtime_map.keys())):\n # This means symlink_candidate has different deps that we\n # do (common case).\n return False\n\n for (k, v) in pruned_outfile_mtime_map.iteritems():\n # This means symlink_candidate has the same deps as us,\n # but those deps aren't up to date.\n # This holds because infile_map has the *current* mtimes of\n # the input files, and if the (db-based) values of\n # symlink_candidate don't match, that means it's out of date.\n symlink_v = pruned_symlink_mtime_map.get(k, (None, None, None))\n if not file_info_equal(v, symlink_v):\n return False\n\n if not file_info_equal(get_file_info(symlink_candidate),\n symlink_mtime_map[symlink_candidate]):\n # This finishes off the 'other_outfile is up-to-date' check.\n # We tested all the input files are up-to-date above, now we\n # need to test the output file too. Note we never bother to\n # do the crc check here; we assume that as a generated file,\n # other_outfile will not suffer from the\n # 'git-changed-my-mtime' problem.\n return False\n\n return True", "title": "" }, { "docid": "1e9b2fc97f39ea65d0aff639dfc352fb", "score": "0.5944636", "text": "def verify_path(self, path):\n if Path(path).exists():\n return True\n return False", "title": "" }, { "docid": "28f897359724e00998034cf686012bf2", "score": "0.59438217", "text": "def check_path(path):\n assert isinstance(path, basestring), \"Expected string or unicode, found %s.\" % type(path)\n try:\n open(path, \"r\")\n except IOError:\n raise IOError('File does not exist: %s' % path)\n return path", "title": "" }, { "docid": "1d2dcd5fddfe2a01bb5a15519baa67b1", "score": "0.59393996", "text": "def test_symlink_folder8(self):\n src = self.rootdir\n target = self.targetdir\n with mock.patch.object(\n transfer.os.path,\n 'exists',\n side_effect=Exception(\"Mocked error\")):\n with self.assertRaises(Exception):\n transfer.SymlinkAgent(src,target).transfer()", "title": "" }, { "docid": "a04cd212041a2c05af0b7a22f90a585d", "score": "0.5926641", "text": "def is_wsl2_path(path: p.PurePath) -> bool:\n return re.match(r\"^/mnt/[a-z]/\", path.as_posix()) is not None", "title": "" }, { "docid": "72eaf06e2133216050eaec901507e2aa", "score": "0.59235525", "text": "def S_ISLNK(mode):\n return S_IFMT(mode) == S_IFLNK", "title": "" }, { "docid": "2b55905af587ce30f9e7fd3e568d64b0", "score": "0.59146184", "text": "def is_malformed_path(self):\n return self._tag == 'malformed_path'", "title": "" }, { "docid": "b32a7f3b0b52be5499eccabc3f8c8143", "score": "0.59071994", "text": "def is_link_ok(link: str) -> bool:\n return not any([re.search(exc, link) for exc in excludes])", "title": "" }, { "docid": "e1dfb6e5ef886289956a028ef3bd3d34", "score": "0.5889858", "text": "def is_safe_path(basedir, path, follow_symlinks=True):\n if basedir is None:\n return True\n if follow_symlinks:\n return os.path.realpath(path).startswith(basedir.encode('utf-8'))\n return os.path.abspath(path).startswith(basedir.encode('utf-8'))", "title": "" }, { "docid": "d5ffdb8f73ffd102df61f9778afce65d", "score": "0.5889097", "text": "def valid_permlink(permlink, allow_empty=False):\n if not permlink:\n assert allow_empty, 'permlink cannot be blank'\n return \"\"\n assert isinstance(permlink, str), 'permlink must be string'\n assert len(permlink) <= 256, \"invalid permlink length\"\n return permlink", "title": "" }, { "docid": "d5ffdb8f73ffd102df61f9778afce65d", "score": "0.5889097", "text": "def valid_permlink(permlink, allow_empty=False):\n if not permlink:\n assert allow_empty, 'permlink cannot be blank'\n return \"\"\n assert isinstance(permlink, str), 'permlink must be string'\n assert len(permlink) <= 256, \"invalid permlink length\"\n return permlink", "title": "" }, { "docid": "7413b59212368171f2e2be09dfcd62d8", "score": "0.58867145", "text": "def check_links(params):\n\n\tfor s in _file_links:\n\t\t#print \"check:\", s, cfg.out_dir\n\t\tout_file = os.path.join(cfg.out_dir, s)\n\t\tif not os.path.exists(out_file):\n\t \t\twarning(\"%s: invalid link to '%s'\" % (_file_links[s], s))", "title": "" }, { "docid": "e62c55844b430153e153df0db262b290", "score": "0.588417", "text": "def _resolve_link(path):\r\n paths_seen = []\r\n while islink(path):\r\n if path in paths_seen:\r\n # Already seen this path, so we must have a symlink loop\r\n return None\r\n paths_seen.append(path)\r\n # Resolve where the link points to\r\n resolved = os.readlink(path)\r\n if not isabs(resolved):\r\n dir = dirname(path)\r\n path = normpath(join(dir, resolved))\r\n else:\r\n path = normpath(resolved)\r\n return path", "title": "" }, { "docid": "c8b85174b6188a7a19df4eff61790b94", "score": "0.58817095", "text": "def hardlink(source, link_name):\n try:\n os.link(source, link_name)\n except FileExistsError:\n # It's possible that the user created a different file with the same name as the\n # one we're trying to download. Thus we need to check the if the inode is different\n # and raise an error in this case.\n source_stat = os.stat(source)\n dest_stat = os.stat(link_name)\n # Check device first because different drives can have the same inode number\n if source_stat.st_dev != dest_stat.st_dev or source_stat.st_ino != dest_stat.st_ino:\n raise", "title": "" }, { "docid": "962858e7a2194d210b9dfedd9b981e87", "score": "0.5866979", "text": "def _force_symlink(\n root: Path, target: Union[str, PurePath], link_to: Union[str, Path]\n) -> None:\n current_symlink = root.joinpath(target)\n try:\n current_symlink.unlink()\n except OSError:\n pass\n try:\n current_symlink.symlink_to(link_to)\n except Exception:\n pass", "title": "" }, { "docid": "adf4744cb862d5219816f94cf0ef9f77", "score": "0.58613175", "text": "def _process_symlink(self, target, path): #aka target_path, dotfile_path\n\n if not os.path.lexists(target):\n self._error = True\n print('\\tmissing target: {}'.format(target))\n return ('DOTFILE NOT FOUND', None)\n\n if not os.path.lexists(path):\n self._changed = True\n if self.args.test:\n return ('missing', None)\n else:\n create_dotfile_link(target, path)\n return ('missing', 'synced')\n\n existing_realpath = os.path.realpath(path)\n if existing_realpath != os.path.realpath(target):\n if self.args.debug:\n print('\\tCurrent target path: {}'.format(existing_realpath))\n status = 'unmanaged' if os.path.lexists(existing_realpath) else 'broken'\n\n if self.args.verbose:\n pass # XXX: TODO: ...\n\n if status != 'broken' and not self.args.force:\n return (status, 'skipping')\n self._changed = True\n if self.args.test:\n return (status, None)\n\n #import pdb; pdb.set_trace()\n remove_any(path)\n create_dotfile_link(target, path)\n return (status, 'removed; synced')\n\n return ('ok', None)", "title": "" }, { "docid": "0f124a12bcf057e1e114461e076db3da", "score": "0.58529246", "text": "def _checkLink(link):\n try:\n link = link.attrs['href']\n except KeyError:\n return -1\n if link[0] == '/':\n link = '://'.join(list(urllib.parse.urlparse(self.address)[0:2])) + link\n elif re.match(r'^mailto|^#', link):\n return None\n good = ['200', '206', '301', '302']\n try:\n status, response = self.http.request(link)\n if status['status'] not in good:\n self.badlinks.append((status['status'], link))\n else:\n self.goodlinks.append(link)\n except httplib2.ServerNotFoundError:\n self.other.append((\"httplib2.ServerNotFoundError\", link))\n except httplib2.ssl.SSLError:\n self.other.append((\"httplib2.ssl.SSLError\", link))\n except ConnectionResetError:\n self.other.append((\"ConnectionResetError\", link))\n except UnicodeError:\n self.other.append((\"UnicodeError\", link))", "title": "" }, { "docid": "af98442fe78a61c211571253d3a076e5", "score": "0.58304495", "text": "def internal_link_checker(filepath, html_str):\n\n # Flag to determine if internal link is broken\n internal_link_error = False\n\n # \n\n problems = []\n relative_links = []\n\n # find all links\n for prefix in [\"href\", \"src\"]:\n\n links = re.findall(\n f\"{prefix}\\s?=\\s?[\\\"']([{allowed_in_link}]+)[\\\"']\", html_str)\n\n # check if link has a dest\n for link in links:\n\n # Check if link is relative path\n is_relative = check_if_relative_link(link)\n\n # Add to relative links list if relative\n if is_relative:\n if not link in relative_links:\n relative_links.append(link)\n\n # Get correct path\n link = get_correct_link(link)\n\n # Check if link is in use\n check_if_link_in_use(filepath, link)\n\n if link in links_list:\n if links_list[link]:\n if link not in problems:\n problems.append(link) \n else:\n if internal_link_test(link):\n problems.append(link)\n links_list[link] = True\n\n if not internal_link_error:\n internal_link_error = True\n else:\n links_list[link] = False\n\n return problems, relative_links, internal_link_error", "title": "" }, { "docid": "8a8c415ec1777ae102d937832bd39339", "score": "0.5820183", "text": "def symlink (self, src, dst):\n if self.system ('/bin/ln -sf %s %s'%(src, dst)):\n raise OSError, 'can\\'t symlink [%s] as [%s]' % (src, dst)", "title": "" }, { "docid": "bf5255caff8fa0c01af5e9a0e3b0a7b8", "score": "0.58178663", "text": "def test_symlink_file6(self):\n src = self.rootdir\n target = os.path.join(self.targetdir,\"target-file\")\n open(target,'w').close()\n with mock.patch.object(\n transfer.os,\n 'unlink',\n side_effect=OSError(\"Mocked error\")):\n with self.assertRaises(transfer.SymlinkError):\n transfer.SymlinkAgent(src,target).transfer()", "title": "" }, { "docid": "9b019bad4cd351d9787fe08c2bc3f228", "score": "0.57966596", "text": "def symlink(self, req, link, parent, name):\n self.reply_err(req, EROFS)", "title": "" }, { "docid": "b9684816cb8583b1d44b66d7cd04ca40", "score": "0.5782404", "text": "def test_force_link(tmpdir):\n src = join(str(tmpdir), \"src\")\n dst = join(str(tmpdir), \"dst\")\n\n with open(src, \"w\") as f:\n f.write(\"Not empty.\")\n\n utils.force_link(src, dst)\n assert os.path.isfile(dst)\n assert not os.path.islink(dst)", "title": "" }, { "docid": "6419b2fcb183ad206a852387f3bc8d86", "score": "0.57806474", "text": "def test_scan_files(self):\n self.assertFalse(fdf.valid_path('some thing wrong'))\n result = fdf.scan_files(join(getcwd(), 'test'))\n self.assertNotIn(join(getcwd(), 'test/symlink/symlink'), result)", "title": "" }, { "docid": "e2fcb55acc8c1275ebfe5f75e72876a0", "score": "0.57789207", "text": "def test_non_html_links_are_path_sensitive(self):\n validator = self._create_validator(\n \"\"\"Use [this CSV](data.csv) for the exercise.\"\"\")\n self.assertFalse(validator._validate_links())", "title": "" }, { "docid": "169daa1a343f03131a9d00db91a8b596", "score": "0.5773865", "text": "def test_symlink_folder4(self):\n src = os.path.join(self.rootdir) \n target = os.path.join(self.targetdir)\n with mock.patch('taca.utils.transfer.os.path') as mockobj:\n mockobj.ismount.return_value = False\n mockobj.isfile.return_value = False\n mockobj.islink.return_value = False\n mockobj.isdir.return_value = False\n with self.assertRaises(transfer.SymlinkError):\n transfer.SymlinkAgent(src,target).transfer()", "title": "" }, { "docid": "7a3cd10319fcfb6990ceba408de225a7", "score": "0.5773567", "text": "def is_file_link(self, link):\n return link.startswith(\"###file\") or link.startswith('/repository') or \\\n (link.startswith('../') and '/files/' in link)", "title": "" }, { "docid": "78c2fa09a69a0c70db6567fbff768f2d", "score": "0.57712406", "text": "def provide_link(dest, src):\n if not os.path.exists(dest):\n # strip trailing /'s just in case it exists\n os.symlink(src, dest.rstrip(\"/\"))", "title": "" }, { "docid": "f5141a7e126b929ce6f8259580dbeb5c", "score": "0.5764898", "text": "def filepath_validator(path):\n if isinstance(path, str):\n path = Path(path)\n return path.exists() and path.isfile()", "title": "" }, { "docid": "308f791745e59acf2880a0ec1831b152", "score": "0.5761583", "text": "def test_symlink_lock(self):\n\n # Get an image object and tests its manual lock mechanism.\n api_obj = self.get_img_api_obj()\n img = api_obj.img\n\n # Verify a lock file is created.\n img.lock()\n lfpath = os.path.join(img.imgdir, \"lock\")\n self.assertTrue(os.path.exists(lfpath))\n img.unlock()\n os.remove(lfpath)\n\n # Make lock file a symlink by pointing it to a random file .\n tmp_file = os.path.join(img.imgdir, \"test_symlink\")\n fo = open(tmp_file, 'wb+')\n fo.close()\n os.symlink(tmp_file, lfpath)\n\n # Verify that both pkg install and refresh generate an error\n # if the lock file is a symlink.\n self.pkg(\"install foo\", su_wrap=True, exit=1)\n self.assertTrue(\"contains a symlink\" in self.errout)\n\n self.pkg(\"refresh --full\", su_wrap=True, exit=1)\n self.assertTrue(\"contains a symlink\" in self.errout)", "title": "" }, { "docid": "18658f8e04412069e3a03d80b360b5b3", "score": "0.57601094", "text": "def check_path_existence(fpath: str) -> bool:\n return os.path.exists(fpath)", "title": "" }, { "docid": "31411b5398f85370a62fa13f46f581a5", "score": "0.5740772", "text": "def validate_path(self, path):\n if not os.path.exists(path):\n raise IOError('Path {} does not exist'.format(path))\n return True", "title": "" }, { "docid": "042985e4ccc6c626626e50eb20ef3c8a", "score": "0.5739684", "text": "def ismount(path):\r\n try:\r\n s1 = os.lstat(path)\r\n s2 = os.lstat(join(path, '..'))\r\n except os.error:\r\n return False # It doesn't exist -- so not a mount point :-)\r\n dev1 = s1.st_dev\r\n dev2 = s2.st_dev\r\n if dev1 != dev2:\r\n return True # path/.. on a different device as path\r\n ino1 = s1.st_ino\r\n ino2 = s2.st_ino\r\n if ino1 == ino2:\r\n return True # path/.. is the same i-node as path\r\n return False", "title": "" } ]
54c87c218016afc006e423f7687dcaee
Returns a boolean indicating whether this type is an 'interface' type that is implemented in Gecko. At the moment, this returns true for all interface types that are not types from the TypedArray spec.
[ { "docid": "50186e06c1db50f28ee3b3f7d75a25e9", "score": "0.700691", "text": "def isGeckoInterface(self):\n return self.isInterface() and not self.isSpiderMonkeyInterface()", "title": "" } ]
[ { "docid": "3fd0af121674f7ce21b6fec67acc48c9", "score": "0.75389546", "text": "def isSpiderMonkeyInterface(self):\n return self.isInterface() and (self.isArrayBuffer() or \\\n self.isArrayBufferView() or \\\n self.isTypedArray())", "title": "" }, { "docid": "f22ccea57db2c6f27555c3c7019e94e2", "score": "0.74693924", "text": "def is_interface(self):\n\n return True", "title": "" }, { "docid": "f22ccea57db2c6f27555c3c7019e94e2", "score": "0.74693924", "text": "def is_interface(self):\n\n return True", "title": "" }, { "docid": "8df4e30ccecf6df007a1799056144bd7", "score": "0.7385049", "text": "def is_interface(self):\n return True", "title": "" }, { "docid": "8df4e30ccecf6df007a1799056144bd7", "score": "0.7385049", "text": "def is_interface(self):\n return True", "title": "" }, { "docid": "8df4e30ccecf6df007a1799056144bd7", "score": "0.7385049", "text": "def is_interface(self):\n return True", "title": "" }, { "docid": "8df4e30ccecf6df007a1799056144bd7", "score": "0.7385049", "text": "def is_interface(self):\n return True", "title": "" }, { "docid": "8df4e30ccecf6df007a1799056144bd7", "score": "0.7385049", "text": "def is_interface(self):\n return True", "title": "" }, { "docid": "8df4e30ccecf6df007a1799056144bd7", "score": "0.7385049", "text": "def is_interface(self):\n return True", "title": "" }, { "docid": "8df4e30ccecf6df007a1799056144bd7", "score": "0.7385049", "text": "def is_interface(self):\n return True", "title": "" }, { "docid": "8df4e30ccecf6df007a1799056144bd7", "score": "0.7385049", "text": "def is_interface(self):\n return True", "title": "" }, { "docid": "b10dfb26a4192cdf8ba42575e9069538", "score": "0.6441149", "text": "def has_interface(self, name):\n if not self.all_interfaces:\n self.find_interfaces()\n return name in self.all_interfaces", "title": "" }, { "docid": "45b6ef24706b35f4b5afbd0e1d5f38d2", "score": "0.5977207", "text": "def obj_has_interface(obj, *ifaces):\r\n for iface in ifaces:\r\n if iface.providedBy(obj):\r\n return True\r\n return False", "title": "" }, { "docid": "533ee492d61d1c5bc1503a56ef9018b8", "score": "0.5907879", "text": "def is_implemented(self):\n try:\n self.coxeter_diagram()\n return True\n except Exception:\n return False", "title": "" }, { "docid": "a118f27e95bb4e2efdd0e2efeaa97b85", "score": "0.5775072", "text": "def InterfaceType(self):\n return self._get_attribute('interfaceType')", "title": "" }, { "docid": "c6611878a3ca087c3654d231659f4be6", "score": "0.5677758", "text": "def hasImplementation(self):\n return False", "title": "" }, { "docid": "56bb5520d6d5bbc20c414ff8f77589ec", "score": "0.5661154", "text": "def is_interface_available(interface_name):\n\n try:\n interface = NXCLI('show run interface %s' %interface_name)\n return True\n except:\n return False", "title": "" }, { "docid": "de489f3501d8d96a1747af4e3fa8365a", "score": "0.56517386", "text": "def is_available(self, hub, interface_name):\n \n result = False\n\n names_dict = self._names[hub.interface_type]\n if interface_name in names_dict: result = True\n\n return result", "title": "" }, { "docid": "6e398ccc621d69c564f8f847747c78c4", "score": "0.5499881", "text": "def is_ipython() -> bool:\n\n try:\n __IPYTHON__ # type: ignore[name-defined] # pylint: disable=pointless-statement\n return True\n except NameError:\n return False", "title": "" }, { "docid": "2b1f1e38910f5021d929657b36f4107b", "score": "0.5479125", "text": "def interface_type(self) -> Optional[str]:\n return pulumi.get(self, \"interface_type\")", "title": "" }, { "docid": "76782b80ec61eb9faf8d19425d74ec52", "score": "0.5415978", "text": "def has_web_interface(self) -> bool:\n for port in self._open_ports:\n if port in WEB_PORTS:\n return True\n return False", "title": "" }, { "docid": "68a8c3def7d14b06861b675d1393a489", "score": "0.5412653", "text": "def _is_ipython() -> bool:\n try:\n import IPython # pylint: disable=g-import-not-at-top\n ipy = IPython.get_ipython()\n if ipy is None:\n return False\n except ImportError:\n return False\n\n return True", "title": "" }, { "docid": "9b8e18a44978fd4eb1e209c2c287ec51", "score": "0.54086965", "text": "def Interfaces(self):\n return self._get_attribute('interfaces')", "title": "" }, { "docid": "a3fea24ba206c82a91dda73264705530", "score": "0.5395206", "text": "def interface(self) -> \"RegisterArray.Interface\":\n\n return self._interface", "title": "" }, { "docid": "3e9d2e76a21ccc26f741935f1d57e547", "score": "0.538823", "text": "def _fallback_hasinterface(self, conn, ident, obj, typename):\r\n # It's very difficult to get an arbitrary `typename` here.\r\n dot = typename.rfind('.')\r\n module_name = typename[:dot]\r\n class_name = typename[dot+1:]\r\n try:\r\n module = __import__(module_name)\r\n except ImportError: #pragma no cover\r\n return False\r\n module = sys.modules[module_name]\r\n try:\r\n cls = getattr(module, class_name)\r\n except AttributeError: #pragma no cover\r\n return False\r\n return obj_has_interface(obj, cls)", "title": "" }, { "docid": "cac6848b7d97d0e550c1e19e3a5b2931", "score": "0.53677785", "text": "def has_name(self, hub, interface_name):\n \n result = False\n \n if not self.is_available(hub, interface_name): return result\n\n interface_cls_name = self.get_cls_name(hub, interface_name) \n if interface_cls_name in hub.get_interface_map(): result = True\n \n return result", "title": "" }, { "docid": "14c87943a24ff4af02ded3128e5f9cfb", "score": "0.53670704", "text": "def validate_interface(self, interface):\n url = 'http://0.0.0.0:8181/api/kytos/topology/v3/interfaces'\n headers = {'Content-Type': 'application/json'}\n current_interfaces = requests.get(url, headers=headers).json()\n\n return interface in [current_interfaces['interfaces'][key]['id'] for key in current_interfaces['interfaces']]", "title": "" }, { "docid": "2b8c14d1778a807ad3477e673e5f52ae", "score": "0.5335405", "text": "def is_using_ipython():\n try:\n __IPYTHON__ # pylint: disable=undefined-variable, pointless-statement\n return True\n except NameError:\n return False", "title": "" }, { "docid": "877ebdf13c62ad5cc4e9d351404c0b5a", "score": "0.53146756", "text": "def connected(self):\n return self._interface is not None", "title": "" }, { "docid": "3b65645c6a7e2b0dde1897ad823f21a9", "score": "0.53040075", "text": "def interface(self) -> \"Register.Interface\":\n\n return self._interface", "title": "" }, { "docid": "f26c30c251276dcf6c03b41f175a6d71", "score": "0.52766734", "text": "def is_enumerated_type(self):\n return self.enumerated_values is not None", "title": "" }, { "docid": "8b0df933c507721b9942727b22ad436a", "score": "0.5256926", "text": "def implements(self, calculation_type: \"CalculationType\") -> bool:", "title": "" }, { "docid": "f2518bc585dd4a9de207fd4517c48cbc", "score": "0.5226839", "text": "def _is_intf_deconfigured(self, interface):\n _logger.debug('%s\\n%s', where_am_i(), pformat(interface, indent=4))\n for deconf in self.vnic_info.get('deconfig', ()):\n if deconf in (interface['IFACE'], interface['VNIC'], interface['ADDR']):\n return True\n return False", "title": "" }, { "docid": "f5f354b72d13a9226af14d6c81f81035", "score": "0.5220707", "text": "def interface_profile_has_metrics(profile):\n for m in profile.metrics:\n if m.is_active and m.metric_type.name in (\n \"Interface | Load | In\",\n \"Interface | Load | Out\"\n ):\n return True\n return False", "title": "" }, { "docid": "fda1a9bf8244ba8f27d6fdf5af8308a1", "score": "0.5209826", "text": "def has_alibi_witness (self):\n if not self.alibi:\n return False\n\n if self.alibi.witness == -1:\n return False\n\n return True", "title": "" }, { "docid": "6517bec04735935d4ee0ff5369e567b2", "score": "0.51612246", "text": "def interfaces(self):\n return self._view_by_kind(Database._Kind.INTERFACE)", "title": "" }, { "docid": "9361244c8e0054f3dfbc19e65431e661", "score": "0.515429", "text": "def is_notebook():\n try:\n from IPython import get_ipython\n\n if \"IPKernelApp\" not in get_ipython().config: # pragma: no cover\n return False\n except ImportError:\n return False\n except AttributeError:\n return False\n return True", "title": "" }, { "docid": "59bb25ccb55103b904b985f4bc55107c", "score": "0.5145271", "text": "def isinstance(self, obj: Any) -> bool:\n raise NotImplementedError()", "title": "" }, { "docid": "003fb219083b8ab7fd5ffbadc5bf29be", "score": "0.5139457", "text": "def using_ipython() -> bool:\n return hasattr(builtins, '__IPYTHON__')", "title": "" }, { "docid": "538031a7ad259ac07679a53e8f09cde4", "score": "0.51392484", "text": "def interface_exists(interface):\n return os.path.exists(\"/sys/class/net/%s\" % interface)", "title": "" }, { "docid": "aee1f9d0829c76e4f029811214eaaff8", "score": "0.51390105", "text": "def test_implemented_by_subinterface(self):\n self.assertTrue(IFoo.implemented_by(IFooBar))", "title": "" }, { "docid": "8c36c1320b613f243935dd850a8b2ded", "score": "0.513061", "text": "def implements_attribute_manager(obj):\n return all([hasattr(obj, n) for n in\n ['keys', '__getitem__', '__contains__', '__iter__']])", "title": "" }, { "docid": "b62457039b5b99728ebd277fe83408b8", "score": "0.5127941", "text": "def interfaces(self):\n return self._interfaces", "title": "" }, { "docid": "b62457039b5b99728ebd277fe83408b8", "score": "0.5127941", "text": "def interfaces(self):\n return self._interfaces", "title": "" }, { "docid": "7ab9332932fc0556505ed054e4534e36", "score": "0.5107286", "text": "def implements_dataset(obj):\n return all([hasattr(obj, n) for n in\n ['attrs', 'keys', 'items',\n '__getitem__', '__contains__', '__iter__']])", "title": "" }, { "docid": "08bc9a476b5488ef4062c42d2385795a", "score": "0.50944895", "text": "def is_type(self):\n return True", "title": "" }, { "docid": "ab35b1572f9c98cfe710abd8db4719b5", "score": "0.50910896", "text": "def interface_exists(interface):\n return os.path.exists(\"/sys/class/net/\" + interface)", "title": "" }, { "docid": "3abfdcdaa155732f67dec65ff27eb22a", "score": "0.5085412", "text": "def _currently_in_ipython():\n try:\n __IPYTHON__ # pylint: disable=undefined-variable\n return True\n except NameError:\n return False", "title": "" }, { "docid": "2c118e3dfce2fd85243a6f79eb006fb1", "score": "0.50795716", "text": "def _was_registered(m, interface):\n \n for item in m.call_args_list:\n if item[0][-1] == interface:\n return True\n \n return False", "title": "" }, { "docid": "d88ac3ffa427897baf9ab47042c839d8", "score": "0.5077435", "text": "def interface(self):\n return self.__interface", "title": "" }, { "docid": "4ec85e70ccdb9bd315392ad5fd59f147", "score": "0.50658995", "text": "def in_ipython():\n try:\n shell = get_ipython().__class__.__name__\n if shell == \"ZMQInteractiveShell\":\n return True # Jupyter notebook or qtconsole\n elif shell == \"TerminalInteractiveShell\":\n return False # Terminal running IPython\n else:\n return False # Other type (?)\n except NameError:\n return False # Probably standard Python interpreter", "title": "" }, { "docid": "cefc0b8f212634a96a065179fee635a8", "score": "0.50610983", "text": "def is_abstract(obj: typing.Type) -> bool:\n try:\n return ABC in obj.__bases__\n except AttributeError:\n return False", "title": "" }, { "docid": "d9eca26713378f7517471cf61b8928f5", "score": "0.50561816", "text": "def GetInterfaces(self):\n return self._interfaces", "title": "" }, { "docid": "289f245efa808a9a32ad5962d51e5c8d", "score": "0.5047994", "text": "def get_interface_class() -> Interface:\n interface = Office.get_interface() # gets str name of interface class\n if not interface:\n raise ValueError('Should be run as macro using XLWings or PyUno.'\n 'Neither could be detected.')\n return getattr(Office, interface)", "title": "" }, { "docid": "52ebfab792e76b181e3ad009385342d2", "score": "0.5029029", "text": "def is_this_run_in_ipython():\n try:\n __IPYTHON__\n return True\n except NameError:\n return False", "title": "" }, { "docid": "e5e8dbc9fc4e755ad5723910adda3862", "score": "0.50271434", "text": "def interface(self):\n return self._interface", "title": "" }, { "docid": "e5e8dbc9fc4e755ad5723910adda3862", "score": "0.50271434", "text": "def interface(self):\n return self._interface", "title": "" }, { "docid": "565ddff6ab951dd08895c4b3ac15d578", "score": "0.501947", "text": "def is_mpsse_interface(self, interface: int) -> bool:\n if not self.has_mpsse:\n return False\n if self.device_version == 0x0800 and interface > 2:\n return False\n if self.device_version == 0x3600 and interface > 2:\n return False\n return True", "title": "" }, { "docid": "018e794216fecc1b79414a808c496be8", "score": "0.49981865", "text": "def _is_intf_excluded(self, interface):\n _logger.debug('%s\\n%s', where_am_i(), pformat(interface, indent=4))\n for excl in self.vnic_info.get('exclude', ()):\n if excl in (interface['IFACE'], interface['VNIC'], interface['ADDR']):\n return True\n return False", "title": "" }, { "docid": "dc45a0530a6ce88744047b14cb31fae3", "score": "0.4976998", "text": "def get_interface() -> str or None:\n # test for Python Uno\n try:\n XSCRIPTCONTEXT # if this variable exists, PyUno is being used.\n except NameError:\n pass\n else:\n return 'Uno'\n\n if xw is not None:\n return 'XW'\n\n # otherwise, return None / False", "title": "" }, { "docid": "5f5dd9da15c84c05dd10c6e4f2bf8554", "score": "0.4964717", "text": "def _in_ipython():\n\n try:\n # See https://stackoverflow.com/questions/15411967\n shell = get_ipython().__class__.__name__\n return bool(shell == \"TerminalInteractiveShell\")\n except NameError:\n return False", "title": "" }, { "docid": "4ce46daaa6dd2bbf0905ee3bc8dc17a1", "score": "0.49643916", "text": "def interface_mode(self):\n return self._interface_mode.value if self._interface_mode else None", "title": "" }, { "docid": "7dd29c00eb8a74ef6f6863fa100b1b37", "score": "0.49566942", "text": "def isnotebook(self):\n try:\n shell = get_ipython().__class__.__name__\n if (shell == 'ZMQInteractiveShell' or shell == 'TerminalInteractiveShell'):\n return True # Jupyter notebook or IPython\n else:\n return False # Other type (?)\n except NameError:\n return False", "title": "" }, { "docid": "0494fee01a6c84336625999ec71e9e88", "score": "0.49517235", "text": "def exposed_interfaces(self):\n raise exceptions.NotImplementedError()", "title": "" }, { "docid": "9370580bbbb45adf1a90aee162216377", "score": "0.49513036", "text": "def __eq__(self, other):\n if not isinstance(other, Interface):\n return False\n\n return self.to_dict() == other.to_dict()", "title": "" }, { "docid": "3e95ddf88ba69823cfcf93bffd8f262b", "score": "0.49503726", "text": "def interface(self):\n return self._if.interface()", "title": "" }, { "docid": "2642419c8ab4cb9c915374e4d5b11405", "score": "0.49493083", "text": "def has_dax_datatypes(intf):\n xnat_datatypes = intf.inspect.datatypes()\n for dax_datatype in DAX_SETTINGS.get_xsitype_include():\n if dax_datatype not in xnat_datatypes:\n return False\n return True", "title": "" }, { "docid": "c2485db54dc47373ad9a3a24581a01ce", "score": "0.49441043", "text": "def is_integer(self) -> bool:\n return isinstance(self, IntegerTyp)", "title": "" }, { "docid": "ce814aee006c85f85177fbe1b80a91d5", "score": "0.49327436", "text": "def getInterface(self):\n return self.__interface", "title": "" }, { "docid": "eb9bafa5ebb16cf0c63b3a9320dde068", "score": "0.49253488", "text": "def initial_interface_checks(iface_name):\n if not interface.is_valid_interface(iface_name):\n print 'Error: %s is not a valid interface.' % iface_name\n return False\n\n return True", "title": "" }, { "docid": "e54278cd2bd316da7523d00f43e4cbd5", "score": "0.4919012", "text": "def test_interface_glue(self):\r\n iface = self.create(items.InterfaceItem, UML.Interface)\r\n impl = self.create(items.ImplementationItem)\r\n\r\n glued = self.allow(impl, impl.head, iface)\r\n self.assertTrue(glued)", "title": "" }, { "docid": "56aeeefcd366178ea0ba238a08926672", "score": "0.49182153", "text": "def verify_ospf_interface_type(device, interface, interface_type, \r\n max_time=60, check_interval=10):\r\n timeout = Timeout(max_time, check_interval)\r\n while timeout.iterate():\r\n out = None\r\n try:\r\n out = device.parse('show ospf interface extensive')\r\n except SchemaEmptyParserError:\r\n timeout.sleep()\r\n continue\r\n for ospf_interface in out.q.get_values('ospf-interface'):\r\n intf = ospf_interface.get('interface-name', None)\r\n intf_type = ospf_interface.get('interface-type', None)\r\n if intf == interface and intf_type == interface_type:\r\n return True\r\n timeout.sleep()\r\n return False", "title": "" }, { "docid": "d4d5ffb882554298c0be64514283fa9f", "score": "0.48915172", "text": "def isframe(object):\n return isinstance(object, types.FrameType)", "title": "" }, { "docid": "d72ec5d1dcac59b3959fbe6302505c3c", "score": "0.48835492", "text": "def is_image(self):\n if self.axes is None:\n return True\n return len(self.axes) == 0", "title": "" }, { "docid": "607beb0c4acf51ab0e4a475909c1bf4f", "score": "0.48734248", "text": "def enterInterfaceDeclaration(self, ctx:JavaParserLabeled.InterfaceDeclarationContext):\n \n self.__current_class_or_interface = ctx.IDENTIFIER().getText()\n self.__classes.append(ctx.IDENTIFIER().getText())\n self.__methods_of_classes[self.__current_class_or_interface] = []\n self.__class_interface[ctx.IDENTIFIER().getText()] = True\n\n if ctx.EXTENDS() is not None and ctx.typeList().getText() in self.__classes:\n self.__parent_and_child_classes[ctx.IDENTIFIER().getText()] = ctx.typeList().getText()", "title": "" }, { "docid": "d32a13afe4db6cc3251dcfeb92b8725f", "score": "0.48706833", "text": "def ProtocolInterface(self):\n return self._get_attribute('protocolInterface')", "title": "" }, { "docid": "fbe526509239c5808b11866ccc137425", "score": "0.48549366", "text": "def is_known_type(cls, ioc_type: str) -> bool:\n return ioc_type in IoCType.__members__ and ioc_type != \"unknown\"", "title": "" }, { "docid": "b3c71b64bcf1f098b63928f0c2fbc4bc", "score": "0.48411354", "text": "def is_simulator(self) -> bool:\n return isinstance(self._backend, Simulator)", "title": "" }, { "docid": "e18c61d197fa747c4617a6582d110ba0", "score": "0.48401403", "text": "def is_elemental(self):\n return self._is_elemental", "title": "" }, { "docid": "f61697810a3675c5564cf631af3d2d91", "score": "0.48228928", "text": "def interfaces():\n pass", "title": "" }, { "docid": "913c32a9fad13671240a3accbca77d37", "score": "0.48065004", "text": "def SupportsPlugins(cls):\n return cls._plugin_classes is not None", "title": "" }, { "docid": "a1ed8f865cc32f16b7caa6a7baaebbcb", "score": "0.4798193", "text": "def __ne__(self, other):\n if not isinstance(other, Interface):\n return True\n\n return self.to_dict() != other.to_dict()", "title": "" }, { "docid": "8f3f206bf554e28eac92333360608cdf", "score": "0.47953272", "text": "def in_ipynb():\n try:\n if \"terminal\" in str(type(get_ipython())):\n return False\n else:\n return True\n except NameError:\n return False", "title": "" }, { "docid": "89148e8b9fea47ebc8679ffdb438721b", "score": "0.47926775", "text": "def get_interfaces(self):\n return self._interfaces", "title": "" }, { "docid": "e5c5decf9179fb87c752b8e5fdac1663", "score": "0.47859254", "text": "def is_supported(self) -> bool:\n pass", "title": "" }, { "docid": "67585469ad50e8769ef5e4346dcb255d", "score": "0.47641656", "text": "def isnotebook():\r\n try:\r\n shell = get_ipython().__class__.__name__\r\n if shell == 'ZMQInteractiveShell':\r\n return True # Jupyter notebook or qtconsole\r\n elif shell == 'TerminalInteractiveShell':\r\n return False # Terminal running IPython\r\n else:\r\n return False # Other type (?)\r\n except NameError:\r\n return False # Probably standard Python interpreter\r", "title": "" }, { "docid": "25a503865f67cf1555d23e56fe98cf76", "score": "0.47600353", "text": "def is_simulator(self) -> bool:\n return isinstance(self._backend, OT3Simulator)", "title": "" }, { "docid": "8a5c5cd0af7c197859e68fa667250759", "score": "0.47472557", "text": "def test_interface(self):\n flags = self.config_flags.NAT_IS_INSIDE\n self.vapi.nat64_add_del_interface(\n is_add=1, flags=flags, sw_if_index=self.pg0.sw_if_index\n )\n self.vapi.nat64_add_del_interface(\n is_add=1, flags=0, sw_if_index=self.pg1.sw_if_index\n )\n\n interfaces = self.vapi.nat64_interface_dump()\n self.assertEqual(len(interfaces), 2)\n pg0_found = False\n pg1_found = False\n for intf in interfaces:\n if intf.sw_if_index == self.pg0.sw_if_index:\n self.assertEqual(intf.flags, self.config_flags.NAT_IS_INSIDE)\n pg0_found = True\n elif intf.sw_if_index == self.pg1.sw_if_index:\n self.assertEqual(intf.flags, self.config_flags.NAT_IS_OUTSIDE)\n pg1_found = True\n self.assertTrue(pg0_found)\n self.assertTrue(pg1_found)\n\n features = self.vapi.cli(\"show interface features pg0\")\n self.assertIn(\"nat64-in2out\", features)\n features = self.vapi.cli(\"show interface features pg1\")\n self.assertIn(\"nat64-out2in\", features)\n\n self.vapi.nat64_add_del_interface(\n is_add=0, flags=flags, sw_if_index=self.pg0.sw_if_index\n )\n self.vapi.nat64_add_del_interface(\n is_add=0, flags=flags, sw_if_index=self.pg1.sw_if_index\n )\n\n interfaces = self.vapi.nat64_interface_dump()\n self.assertEqual(len(interfaces), 0)", "title": "" }, { "docid": "28b4220da7617062876632280a83bcb1", "score": "0.47413203", "text": "def has_nat(self):\n return self.typeof in self", "title": "" }, { "docid": "843d7ba37f9c30d53aeab8fdf39117e2", "score": "0.47389", "text": "def check_if_interface_exists(device_name, interface_type, interface_number):\n with ncs.maapi.single_write_trans('admin', 'python', groups=['ncsadmin']) as t:\n root = ncs.maagic.get_root(t)\n device = root.devices.device[device_name]\n print type(device.interface[interface_type])\n if interface_number in device.interface[interface_type]:\n print(\"Interface is on the device!\")\n else:\n print(\"Interface is not on the device!\")", "title": "" }, { "docid": "252c601cfbd7392ad20a08385be7058d", "score": "0.47366798", "text": "def has_donor_cbginterface(self,cbg):\n if hasattr(cbg,\"_CBGinterface3p\") and cbg._CBGinterface3p:\n return True\n else:\n return False", "title": "" }, { "docid": "b79bc03e384ef302826e112ba6ed98c1", "score": "0.47240645", "text": "def test_subclass_provides_interface(self):\n class IFooBarSubclass(self.FooBarHasFooBarBaz):\n\n def provides_interface(self, interface):\n return interface.implemented_by(IFooBaz)\n\n obj = IFooBarSubclass()\n with self.assertRaises(TypeError):\n IFooBar(obj)\n foobaz = IFooBaz(obj)\n foobaz.baz()\n self.assertEqual(obj.foo, 3)", "title": "" }, { "docid": "d6eef8e9daccf5af22fc3e60aa0db7da", "score": "0.4722961", "text": "def any_extension_types(self) -> bool:\n return False # any(block.is_extension for block in self.blocks)", "title": "" }, { "docid": "5c6e122479958a19399bf44fd93c3b6c", "score": "0.47229505", "text": "def is_interface_connected_to_source(self):\n source_ip = self.get_tree_id()[0]\n for network in self.get_all_interface_networks():\n if ipaddress.ip_address(source_ip) in ipaddress.ip_network(network):\n return True\n return False", "title": "" }, { "docid": "bd25262cd7cec55d00e342d756b3109e", "score": "0.47206524", "text": "def isa(self, other):\r\n\r\n m = self\r\n while m and m is not other:\r\n m = m.inherits\r\n return bool(m)", "title": "" }, { "docid": "f3547b097146dd27e26dae98c79f66fe", "score": "0.47147197", "text": "def declare_interface(ea):\n kind = (get_struct_val(ea, 'go_type0.kind') & 0x1f)\n if kind != TYPEKIND_VALS['kindInterface'][0]:\n msg = 'type at 0x%s not an interface' % ea\n LOG.error(msg)\n raise Exception(msg)\n\n module_idx = resolve_moduledata_idx(ea)\n type_size = get_ida_struc_size('go_type0')\n tflag = (get_struct_val(ea, 'go_type0.tflag') & 0x7)\n\n iface_name = 'anonymous'\n if tflag & TYPE_TFLAGS['named']:\n iface_name = normalize_name(get_type_name(ea), ea,\n prepend_type_info=False)\n if tflag & TYPE_TFLAGS['extraStar']:\n iface_name = iface_name[1:]\n LOG.info('declare_interface: declaring iface %s at 0x%x',\n iface_name, ea)\n\n iface_ea = ea + type_size\n iface_mhdr_ea = iface_ea + PTRSIZE\n mhdr_ea = get_struct_val(iface_mhdr_ea, 'go_slice.array')\n mhdr_len = get_struct_val(iface_mhdr_ea, 'go_slice.len')\n iface_methods = []\n for i in range(0, mhdr_len):\n mhdr_entry_ea = mhdr_ea + (i * PTRSIZE)\n imethod_struc_name = 'go_type%d_metadata_imethod' % module_idx\n imethod_struc_size = get_ida_struc_size(imethod_struc_name)\n name_offset = nameoff(\n get_struct_val(mhdr_entry_ea,\n '%s.name' % imethod_struc_name),\n module_idx)\n imethod_name = get_raw_name_str(name_offset)\n LOG.info('found interface method %s.%s()',\n iface_name, imethod_name)\n ida_bytes.del_items(mhdr_entry_ea, imethod_struc_size)\n ida_bytes.create_struct(mhdr_entry_ea,\n imethod_struc_size,\n ida_struct.get_struc_id(imethod_struc_name))\n iface_methods.append((\n normalize_name(imethod_name, None,\n prepend_type_info=False),\n PTRSIZE,\n FF_PTR|FF_DATA|off_flag(),\n OFF_CURRENT_SEGMENT, None, None\n ))\n\n itab_fields = [\n ('inter', PTRSIZE, FF_PTR|FF_DATA|off_flag(),\n OFF_CURRENT_SEGMENT, None, None),\n ('_type', PTRSIZE, FF_PTR|FF_DATA|off_flag(),\n OFF_CURRENT_SEGMENT, None, None),\n ('hash', 4, FF_DWORD|FF_DATA, None, None, None),\n ('_unused', 4, FF_DWORD|FF_DATA, None, None, None)\n ] + iface_methods\n LOG.debug('Creating new itab for %s', iface_name)\n create_and_populate_struct('go_itab__%s' % iface_name,\n itab_fields)\n\n LOG.debug('Creating new iface for %s', iface_name)\n interface_tinfo = ida_typeinf.tinfo_t()\n ida_typeinf.parse_decl(interface_tinfo,\n idaapi.cvar.idati,\n 'go_itab__%s *;' % iface_name,\n ida_typeinf.PT_TYP)\n iface_fields = [\n ('tab', PTRSIZE, FF_PTR|FF_DATA|off_flag(), OFF_CURRENT_SEGMENT,\n None, None),\n ('data', PTRSIZE, FF_PTR|FF_DATA|off_flag(), OFF_CURRENT_SEGMENT,\n None, None)\n ]\n iface_tinfos = [interface_tinfo, None]\n iface_name = 'go_iface__%s' % iface_name\n create_and_populate_struct(iface_name,\n iface_fields, None,\n iface_tinfos)\n return iface_name", "title": "" }, { "docid": "c6ee064dbb5293432396b13c5e23c308", "score": "0.47141168", "text": "def inherited_interface(self):\n raise exceptions.NotImplementedError()", "title": "" }, { "docid": "0d6f031d8e91e7018033fd8f3d7e0118", "score": "0.47140905", "text": "def is_array(obj):\n return hasattr(obj, 'dtype')", "title": "" }, { "docid": "d25fcdb091462da09938c70988e017d7", "score": "0.47132087", "text": "def isnotebook():\n try:\n shell = get_ipython().__class__.__name__\n if shell == 'ZMQInteractiveShell':\n return True # Jupyter notebook or qtconsole\n elif shell == 'TerminalInteractiveShell':\n return False # Terminal running IPython\n else:\n return False # Other type (?)\n except NameError:\n return False", "title": "" }, { "docid": "9b3021caa56b8e96d786105815561d54", "score": "0.47128734", "text": "def are_supported(config: configuration.NamespaceConfig,\n installer: Optional[interfaces.Installer]) -> bool:\n for enh in enabled_enhancements(config):\n if not isinstance(installer, enh[\"class\"]):\n return False\n return True", "title": "" } ]
260a4b8f6a111255d751b34ee4804b99
Return an annual probability given a return period $$AEP = 1 \exp{1/ARI}$$
[ { "docid": "76ed3955f395457f23e1a92d5ee923b0", "score": "0.78453267", "text": "def probability(ari):\n aep = 1.0 - np.exp(-1.0/ari)\n return aep", "title": "" } ]
[ { "docid": "a969b34f946ea4cab04180a858e705ce", "score": "0.66628295", "text": "def probability(e, en, t):\n if t == 0:\n return 0\n else:\n return exp((e-en)/t)", "title": "" }, { "docid": "6d171e23a9d819ff00ddad221521a2ea", "score": "0.64231294", "text": "def annuity(tot_sum: float, required_return: float, period: int) -> float:\n v = 1 / (1 + required_return)\n a_t = (1 - v ** period) / required_return\n return tot_sum / a_t", "title": "" }, { "docid": "e54c4f992753aed19aae3a0fbf9e69b6", "score": "0.63084", "text": "def _prob(self, rate):\n return 1 - m.exp(-rate * self.timestep_len)", "title": "" }, { "docid": "07232e7572226eacc7c8ad4d764030eb", "score": "0.6257997", "text": "def annualized_return(df,periodicity=252):\n # convert return series to numpy array (in case Pandas series is provided)\n df = np.asarray(df)\n # how many years of returns data is provided in df\n difference_in_years = len(df)/periodicity\n # starting net asset value / NAV (assumed to be 1) and cumulative returns (r) over time period provided in returns data\n start_NAV=1.0\n r = np.nancumprod(df+start_NAV)\n # end NAV based on final cumulative return\n end_NAV=r[-1]\n # determine annualized return\n AnnualReturn = end_NAV**(1 / difference_in_years) - 1\n return AnnualReturn", "title": "" }, { "docid": "154922c23546a6280747801269452348", "score": "0.60345423", "text": "def annualized_return(returns, start_date, end_date):\n total_return = ((1 + returns).cumprod())[-1]\n t = (end_date - start_date).days / 365.25\n ar = (total_return ** (1 / t)) - 1\n return ar", "title": "" }, { "docid": "66b4443c3b33f1455b678df725351107", "score": "0.60135186", "text": "def probability(self, v, a):\n return self.effective_energy(v, a).exp()", "title": "" }, { "docid": "d93567a5d1ddf14025996e3afb62cbb8", "score": "0.59987706", "text": "def ev_log_p(eta):\n return ev_log_h(eta) + ev_t(eta) @ eta - a(eta)", "title": "" }, { "docid": "cc13a260398e24edc6261513dec9992a", "score": "0.5968521", "text": "def annualized_returns(ser):\n return ser.mean() * DAYS_IN_YEAR", "title": "" }, { "docid": "41c09486a9eefe1f4ecd2286d6e97b57", "score": "0.5949281", "text": "def get_annualized_percent_change_per_year(diff, years):\n return math.exp(math.log(diff) / years) - 1", "title": "" }, { "docid": "eccfd5ead284dde79de143510d846be1", "score": "0.59371513", "text": "def probability(self):\n product=self.features.dot(self.w)\n #print(product)\n expr=np.exp(-product)\n \n return 1/(1+expr)", "title": "" }, { "docid": "17ad259a90ab4b35af7af1020d840805", "score": "0.5879564", "text": "def avg_rate(p):\n return (p[-1][0]-p[0][0])/(p[-1][1]-p[0][1])", "title": "" }, { "docid": "e88279a38ba012acd7789ced66e89ea0", "score": "0.58563554", "text": "def getfreq(pianonum):\n \n exponential = 2 ** ((pianonum - 49) / 12)\n return exponential * 440", "title": "" }, { "docid": "d08118845d5b36ce5bd753f63e3d3b10", "score": "0.58353317", "text": "def annualise(num):\n anld = float(num) * 12 * 100\n return anld", "title": "" }, { "docid": "017db4fe9f5b64e04d26169fa1596b1a", "score": "0.5829257", "text": "def Probability(o):\n return o / (o + 1)", "title": "" }, { "docid": "8d6e15058f1871d5a04c9c8ec9906aed", "score": "0.5813514", "text": "def ioniz_prob(ergs):\n prob = np.exp(0.05*ergs - 8.0)\n return prob", "title": "" }, { "docid": "f28da29699709c641d11b78e12aa222a", "score": "0.576968", "text": "def dbl_logistic_model ( p, x ):\n return p[0] + p[1]* ( 1./(1+np.exp(p[2]*(365.*x-p[3]))) + \\\n 1./(1+np.exp(p[4]*(365.*x-p[5]))) - 1 )", "title": "" }, { "docid": "8983ec782f85690e786fb0777161d298", "score": "0.57667994", "text": "def probability(self, observation, next_state, action):\n if self.sample(next_state, action) == observation:\n return 1.0 - self.epsilon\n else:\n return self.epsilon", "title": "" }, { "docid": "cf2ed7994e322867aa727db1a84dc57f", "score": "0.5764005", "text": "def _annualizationFactor(lifetime, r=4.0):\n if r==0: return lifetime\n return (1-(1+(r/100.0))**-lifetime)/(r/100.0)", "title": "" }, { "docid": "7998a972bd9d384b08964c1ef376da06", "score": "0.576322", "text": "def _eta(data):\r\n # if len(data) <= 1: return 0\r\n ldata = len(list(data))\r\n if ldata <= 1: return 0\r\n _exp = exp(1)\r\n counts = Counter()\r\n for d in data:\r\n counts[d] += 1\r\n # probs = [float(c) / len(data) for c in counts.values()]\r\n probs = [float(c) / ldata for c in counts.values()]\r\n probs = [p for p in probs if p > 0.]\r\n ent = 0\r\n for p in probs:\r\n if p > 0.:\r\n ent -= p * log(p, _exp)\r\n return ent", "title": "" }, { "docid": "1bb053fe634110591f254c113c674cd4", "score": "0.5735856", "text": "def payback_period(self, proforma):\r\n capex = abs(proforma.loc['CAPEX Year', :].sum())\r\n\r\n first_opt_year = min(self.opt_years)\r\n yearlynetbenefit = proforma.loc[pd.Period(year=first_opt_year, freq='y'), :].sum()\r\n\r\n return capex/yearlynetbenefit", "title": "" }, { "docid": "ba1bd10268b1aa5df2ab5497a287b4f8", "score": "0.5728151", "text": "def prob(self, Pn):\n if self.norm==\"Scargle\": return exp(-Pn)\n if self.norm==\"HorneBaliunas\": return (1.-2.*Pn/(self.N-1.))**((self.N-3.)/2.)\n if self.norm==\"Cumming\": return (1.+2.*Pn/(self.N-3.))**(-(self.N-3.)/2.)", "title": "" }, { "docid": "ba1bd10268b1aa5df2ab5497a287b4f8", "score": "0.5728151", "text": "def prob(self, Pn):\n if self.norm==\"Scargle\": return exp(-Pn)\n if self.norm==\"HorneBaliunas\": return (1.-2.*Pn/(self.N-1.))**((self.N-3.)/2.)\n if self.norm==\"Cumming\": return (1.+2.*Pn/(self.N-3.))**(-(self.N-3.)/2.)", "title": "" }, { "docid": "36452380e66393425596e4644ba5f147", "score": "0.57024795", "text": "def airdensity(p,t):\n Re = 287.04 \n rho = p / ( Re * t ) \n return rho", "title": "" }, { "docid": "cede57d7caad42c6c13cfe71a4d66b9f", "score": "0.56989205", "text": "def amplitude_calc(x, p, beta=1, alpha=0):\n return (1+alpha**2)/beta/2 * x**2 + alpha*x*p + beta/2*p**2", "title": "" }, { "docid": "baea0c4c34222f3ca14644480b1365ab", "score": "0.5695328", "text": "def find_aps(r,kappain):\n return -2.*np.log(r) - kappain**2./(np.exp(1.)*r**2.)", "title": "" }, { "docid": "2e175a0a8d31194ee97285b667f9f17c", "score": "0.5669334", "text": "def P(self):\n\t\tP = 2*pi * sqrt( (self.a()*AU)**3. / (G*(self.mCent+self.mass)*mSun) )/day\n\t\treturn P", "title": "" }, { "docid": "f3b5dd9bb01f02b96740daacd3610301", "score": "0.5666913", "text": "def P(self):\n\t\tP = 2*pi * sqrt( (self.a*AU)**3. / (G*(self.M+self.m)*mSun) )/(24*3600.)\n\t\treturn P", "title": "" }, { "docid": "2031ab84851620cb2d48a6d0123baee0", "score": "0.5654314", "text": "def _calculate_aprior_probabilities(aposterior_probabilities: np.array) -> np.array:\n return aposterior_probabilities.mean(axis=0)", "title": "" }, { "docid": "6c64cde34a2700c76d76fc56c57511d9", "score": "0.5640836", "text": "def get_a(x):\n\n return np.exp(x)", "title": "" }, { "docid": "b5526b4851786a92173c9ec8c0b34e99", "score": "0.56370974", "text": "def p(self, x: np.array) -> float:\n p = - x[0]**2 / 200.0 - 0.5 * (x[1] - self.b * x[0]**2 +\n 100.0 * self.b)**2\n p_sum = - 0.5 * np.sum(x[2:-1]**2)\n p = p + p_sum\n return np.exp(p)", "title": "" }, { "docid": "7512fe5873ab477ff6210d31de27da47", "score": "0.5598786", "text": "def aire(R):\n return pi * (R ** 2)", "title": "" }, { "docid": "e4a3541d9cae87df9cc65e10c53d9c51", "score": "0.5589556", "text": "def eta_ideal(self):\n return 1. - (self.engine_in.pr_ovr ** ((1 - self.engine_in.ambient.kappa_air ) /\n self.engine_in.ambient.kappa_air))", "title": "" }, { "docid": "e1d890d991df68637d2ed572b546f57f", "score": "0.5578697", "text": "def adiabatic_lapse_rate_from_t(SA, t, p):\n\n return -gibbs(n0, n1, n1, SA, t, p) / (gibbs(n0, n2, n0, SA, t, p))", "title": "" }, { "docid": "441f4390671151a658ae641b017047d6", "score": "0.556664", "text": "def ppmv2pa(x, p):\n return x * p / (1e6 + x)", "title": "" }, { "docid": "d27ca1e003b53ece5ab9f1f0e8f2ce93", "score": "0.55485916", "text": "def f_pseudo_periodique(beta, omega, A, B, t):\n return math.exp(-beta*t)*(A*math.cos(omega*t)+B*math.sin(omega*t))", "title": "" }, { "docid": "06cd4e2088c2c1d43960776b6e1a579e", "score": "0.5546343", "text": "def annualized_return(self) -> float:\n return self.__annualized_return", "title": "" }, { "docid": "fe98470dccf2e8ced11da6af1252fce1", "score": "0.5527254", "text": "def getProbability(self):\n return self.p", "title": "" }, { "docid": "daeb75883fd9d23198eaf7361e39c953", "score": "0.5521143", "text": "def p_alpha(p,p0,th,s):\n a11,a12,a13,a14,a15,a16,a21,a22,a23,a24,a25,a26,b11,b12,b13,b21,b22,b23=eosben07_const()\n #\n b1i=1/(b11+b12*th+b13*s)\n a1=(a11+th*(a12+a14*th+a15*s)+s*(a13+a16*s))*b1i\n a2=(a21+th*(a22+a24*th+a25*s)+s*(a23+a26*s))*b1i\n b2=(b21+b22*th+b23*s)*b1i\n #\n r=b2*(p-p0)+(a2-a1*b2)*ma.log((a1+p)/(a1+p0))\n #\n return r", "title": "" }, { "docid": "5e3d6309e5ae8993e438d8b810b110c3", "score": "0.55092055", "text": "def proton_distribution_probability(product, substrate, precursor):\n global BASIC_AA\n p = substrate[1]\n q = substrate[2]\n p1 = product[1]\n p2 = p - p1\n q1 = product[2]\n q2 = q - q1\n p0 = precursor[1]\n nu = len(substrate[0])\n nu1 = len(product[0])\n #nu = sum(aa in BASIC_AA for aa in substrate[0])\n #nu1 = sum(aa in BASIC_AA for aa in product[0])\n nu2 = nu - nu1\n # Distribution over initial charging on product sequence:\n # Hypergeometric with population nu, successes nu1, trials p0\n XA1 = hypergeom_pmf_iterator(N=nu, k=nu1, n=p0)\n XA1_0 = max(0, p0-nu2)\n PTR = iterate_over_ptr_probabilities(p1=p1, q1=q1, p=p, q=q, p0=p0)\n PTR_0 = p1+q1+1\n kmin = max(XA1_0, PTR_0)\n kmax = min(nu1, p0-p2-q2+1)\n # Moving past the \"null\" summands\n for i in range(max(0, kmin - PTR_0 - 1)):\n PTR.next()\n for i in range(max(0, kmin - XA1_0 - 1)):\n XA1.next()\n pr = 0.\n for i in range(kmin, kmax+1):\n A, B = PTR.next()\n C = XA1.next()\n pr += (float(A)*float(nu2)/nu + float(nu1)/nu)*B*C\n #pr = sum((float(A)*float(nu2)/nu + float(nu1)/nu)*B*C for i, (A, B), C in enumerate(zip(PTR, XA1)) if i + kmin < kmax)\n return pr", "title": "" }, { "docid": "d1a724016faa87efe811cac38a976021", "score": "0.55043864", "text": "def avg_amp(self, params):\n amps = params[:self.no_ants*2:2]\n if self.logamp:\n amps = np.exp(amps)\n return np.mean(amps) - 1", "title": "" }, { "docid": "66fe6cf3be98ef6d29b947bf9083ca27", "score": "0.54913193", "text": "def compute_period(a, M, m):\n return np.sqrt(4 * np.pi **2 / (takahe.constants.G * (M+m) * takahe.constants.SOLAR_MASS) * (a * takahe.constants.SOLAR_RADIUS)**3) / (60 * 60 * 24)", "title": "" }, { "docid": "e5a5607d44f9bb2f5301283876b8f61d", "score": "0.5488452", "text": "def prob(self, at, log = False):", "title": "" }, { "docid": "7ca10d9bd1a5946e2d0dff1c986e69c1", "score": "0.54698384", "text": "def exp_decay(p,x):\n a, b, c = p\n\n return a + b * np.exp(-x / c)", "title": "" }, { "docid": "8f906cdb8b7cc43327c26bd266d0974a", "score": "0.5467352", "text": "def get_annual_eps_value(eps_data: dict):\n years = list(set([int(k.split('-')[0]) for (k, v) in eps_data.items()]))\n max_year = max(years)\n three_years_ago = max_year - 3\n filter_years = sorted([str(x) for x in years if max_year >= x >= three_years_ago], reverse=True)\n values = list()\n for year in filter_years:\n values.append(round(sum([v for (k, v) in eps_data.items() if year in k]), 2))\n return dict(zip(filter_years, values))", "title": "" }, { "docid": "71d8b77d14e51ab8e133db3f50a7fe87", "score": "0.5463908", "text": "def prob(self, Pn):\n return (1.-2.*Pn/(self.N-1.))**((self.N-3.)/2.)", "title": "" }, { "docid": "5878d8693907bf81216b28f335fdc419", "score": "0.5459113", "text": "def rate(e):\n d = np.abs(e - e[-1])\n p = []\n for i in range(len(d)-1):\n p.append(np.log(d[i+1])/np.log(d[i]))\n return p", "title": "" }, { "docid": "7be049e111a0f8923aebf37b90cef533", "score": "0.5458327", "text": "def paretovariate_impl(alpha):\n # Jain, pg. 495\n u = 1.0 - _random()\n return 1.0 / u ** (1.0/alpha)", "title": "" }, { "docid": "6aa974bfaae301fa79ac0fbeabb3bb21", "score": "0.54536045", "text": "def EP(d):\n return 4*d*(5/6)**d + 1 - (5/6)**d", "title": "" }, { "docid": "46e76d91ba872e7607db1518181831cf", "score": "0.54457945", "text": "def calc_alpha(epsilon):\n\n if (epsilon == 0):\n return np.inf\n\n\n return 0.5 * np.log( (1. - epsilon)/epsilon )", "title": "" }, { "docid": "0c4c2f4d23858b4b4c3cc0ad0d940f8d", "score": "0.5436166", "text": "def beamfrac(FWHM, length, angle):\n height_of_sample = length * np.sin(np.radians(angle))\n beam_sd = FWHM / 2 / np.sqrt(2 * np.log(2))\n probability = 2.0 * (\n stats.norm.cdf(height_of_sample / 2.0 / beam_sd) - 0.5\n )\n return probability", "title": "" }, { "docid": "d0f6ccb38864d16aea5f119c73c0b0fb", "score": "0.54260474", "text": "def theta(self, *args, eps=0.001):\n # Fix the random generator seed for MonteCarlo pricing\n st = np.random.get_state()\n price1 = self.price(*args)\n np.random.set_state(st)\n self.term -= eps\n price2 = self.price(*args)\n self.term += eps\n return (price2 - price1) / (365 * eps)", "title": "" }, { "docid": "bad795ccd1af697171146833dd55aaa9", "score": "0.5425807", "text": "def _calculate_PAYE(period_income, period):\r\n predicted_annual_income = period * period_income\r\n \r\n income_tax = _calculate_income_tax(predicted_annual_income)\r\n acc_levy = _calculate_ACC_levy(predicted_annual_income)\r\n \r\n total_tax_paid = income_tax + acc_levy\r\n \r\n return round(total_tax_paid / period, 2)", "title": "" }, { "docid": "e00a47c7e5892e10021e3d495160bb4d", "score": "0.5419779", "text": "def e_var(partition):\n P = np.log(partition)\n S = len(partition)\n X = 0\n for x in P:\n X += (x - np.mean(P))**2/S\n evar = 1 - 2/math.pi*np.arctan(X) \n return(evar)", "title": "" }, { "docid": "55297a2ff4086c2bf1acf53482780e23", "score": "0.5414374", "text": "def expected_earnings(mao):\r\n return (.5 - (mao - ((mao ** 2) / 2)))", "title": "" }, { "docid": "244d6a5bac71aaacecf03ad1089d25dc", "score": "0.5397943", "text": "def calculate_probability(x, mean, stdev):\n exponent = np.exp(-1.0 * ((x-mean)**2 / (2*(stdev**2))))\n return (1/(np.sqrt(2.0 * np.pi) * stdev)) * exponent", "title": "" }, { "docid": "2dac13767a930f12ba0d31b9db65a986", "score": "0.5397827", "text": "def empirical_prob(self, *items, delta = 0):\n assert (len(items) in [1, 2])\n if (len(items) == 1):\n denominator = self.total_counts + delta * self.num_vocab ** 2\n else:\n denominator = self.total_edges + delta * self.num_possible_pairs\n return self.empirical_freq(*items, delta = delta) / denominator", "title": "" }, { "docid": "647fe4d57e4a1de86bbc9cd34a971c9c", "score": "0.5396929", "text": "def soilmoisture( self, evaporative_fraction ):\n\t\t\tresult = (exp((evaporative_fraction-1.2836)/0.4213))/0.511\n\t\t\treturn result", "title": "" }, { "docid": "0a486069b20a599edb89c810c19a2d7b", "score": "0.53961486", "text": "def gaussian_moment(p):\n if p % 2 == 0:\n return factorial2(p-1)\n else:\n return 0", "title": "" }, { "docid": "49ab846f51909ca0b71e08b8f97013e3", "score": "0.53892523", "text": "def logit2probability(log_odds):\n # convert log odds to odds\n odds = math.exp(log_odds)\n # return probability\n return odds / float(1 + odds)", "title": "" }, { "docid": "8170543ac5ff1d2bba1fd16b24f8d220", "score": "0.5387339", "text": "def decay(t, p):\n return p[0] * np.exp(-t / p[1]) + p[2]", "title": "" }, { "docid": "40958abdc2ac4eefc3a72a6573351082", "score": "0.53814656", "text": "def _approx_ap(airf,pres):\n pvap = pres * (1-airf)/(airf*_EPSW + 1-airf)\n v = numpy.log(pvap/_PTPE)/_BVI\n x = maths4.lamb2(v,_AVI/_BVI)\n temp = _TTP/x\n dhum = (pres-pvap + _EPSW*pvap) / (_RDRY*temp)\n return temp, dhum", "title": "" }, { "docid": "cba8b19e85284fbfe2a59ea77809b5ed", "score": "0.53794813", "text": "def startprob(self):\n return np.exp(self._log_startprob)", "title": "" }, { "docid": "008acf3c978605d22a56d6110134eaba", "score": "0.5377723", "text": "def get_exp_probabilities(self, fluxes, inst_params):\n prob = 1 - np.exp(-fluxes / inst_params[\"scale\"])\n\n return prob", "title": "" }, { "docid": "3c14c31873659ea1950017763c7e4e2b", "score": "0.5364507", "text": "def get_ev(a: np.ndarray) -> Param:\n\n return a / np.sum(a),", "title": "" }, { "docid": "54881653f7ce52de6c69bcc2f8abd6f2", "score": "0.53623134", "text": "def topp(v):\n return np.exp(v) / np.sum(np.exp(v))", "title": "" }, { "docid": "eb044c957e5b8785a3ac8b0bf516416f", "score": "0.5361167", "text": "def probability(self, next_state, state, action):\n if self.sample(state, action) == next_state:\n return 1.0 - self.epsilon\n else:\n return self.epsilon", "title": "" }, { "docid": "d9c6ca6447b8cce60ebe738b1842c361", "score": "0.5350514", "text": "def test_getAngularDistribution_PWave(self):\n egrid = self.RRR1PRes.generateEnergyGrid()\n eStep = 1\n J = 5.5 # for this test data\n S = 4.5 # for this test data\n from numericalFunctions import angularMomentumCoupling as nf_amc\n self.assertAlmostEqual(\n nf_amc.z_coefficient(2, int(2 * J), 2, int(2 * J), int(2 * S), 4) / nf_amc.z_coefficient(2, int(2 * J), 2,\n int(2 * J),\n int(2 * S), 0),\n math.sqrt(0.33090909)) # nf_amc functions use the 2J trick\n for E in egrid[::eStep]:\n B = self.RRR1PRes.getAngularDistribution(E)\n self.assertAllWithinXPercent([x / B[0] for x in B], [1.0, 0.0, 0.33090909])", "title": "" }, { "docid": "54012fa6ca93fc00bc7a9e27ee1fe249", "score": "0.5335799", "text": "def offer_prob(year, month, sex, race, referral, precomputed_emp_dist=None):\n if precomputed_emp_dist is not None:\n emp = precomputed_emp_dist[year][month - 1][race.name][sex.name]\n else:\n emp = employment_dist(year, month, sex, race)\n\n # quick-and-dirty mapping to race keys\n if not isinstance(race, str):\n if race.name == 'aian':\n race = 'nativeamerican'\n elif race.name == 'white':\n # b/c white and hispanic are merged together\n # TODO find dataset that separates these?\n race = random.choice(['hispanic', 'white'])\n elif race.name in ['chinese', 'japanese', 'api']:\n race = 'asian'\n elif race.name == 'black':\n race = 'black'\n\n # mean of two random races. this is a sloppy guess,\n # since the actual evidence may not work this way\n elif race.name in ['other', 'two', 'three_plus']:\n races = list(p_offer['offered']['race'].keys())\n op1 = _offer_prob(sex, random.choice(races), referral, emp)\n op2 = _offer_prob(sex, random.choice(races), referral, emp)\n return (op1 + op2)/2\n\n return _offer_prob(sex, race, referral, emp)", "title": "" }, { "docid": "f7550b5825aea3cfb202d2a09abfeabe", "score": "0.53341776", "text": "def ampsAtPeriod(period):\n checkPeriod(period, \"ampsAtPeriod::period\")\n return startingSequence[period-1][1]", "title": "" }, { "docid": "8680f547542f4aa8920e1df44de4b6b9", "score": "0.5333341", "text": "def AIC(self):\n RSS = self.getRMSE()\n k = len(self.variables)\n n = len(self.exp_data)\n AIC = 2 * k + n * (log(2 * pi * RSS/n) + 1)\n return AIC", "title": "" }, { "docid": "879fc398a4991981587ecdfa58f50ded", "score": "0.5328824", "text": "def prediction_interval(self, xp=None, yp=None, alpha=0.05):\n if not (xp is None) ^ (yp is None):\n raise ValueError('Only one of [xp, yp] must be specified.')\n\n if xp is not None:\n return self._y_pi(xp, alpha=alpha)\n if yp is not None:\n return self._x_pi(yp, alpha=alpha)", "title": "" }, { "docid": "243d4ce62d5695a4a1cde42e12efba40", "score": "0.5327332", "text": "def alpha_wrt_t_exact(SA, t, p):\n\n return gibbs(n0, n1, n1, SA, t, p) / gibbs(n0, n0, n1, SA, t, p)", "title": "" }, { "docid": "dbaeb65c4918cd305cc1d2de1739b203", "score": "0.53269297", "text": "def probability():\n f1 = AffineTransform(0, 0, 0, 0.16, 0, 0)\n f2 = AffineTransform(0.85, 0.04, -0.04, 0.85, 0, 1.60)\n f3 = AffineTransform(0.20, -0.26, 0.23, 0.22, 0, 1.60)\n f4 = AffineTransform(-0.15, 0.28, 0.26, 0.24, 0, 0.44)\n functions = [f1, f2, f3, f4]\n p1 = 0.01\n p2 = 0.85\n p3 = 0.07\n p4 = 0.07\n p_cumulative = np.array([[0, p1],\n [1, p1+p2],\n [2, p1+p2+p3],\n [3, p1+p2+p3+p4]])\n r = np.random.random()\n for j, p in p_cumulative:\n if r < p:\n return functions[int(j)]", "title": "" }, { "docid": "7a47e37d6559b69bf1b357b7e8a0d017", "score": "0.5326326", "text": "def exponential_var(p,x):\n \n \n plt.subplot(311)\n plt.semilogy(x, exp_decay(np.array([p[0[0]],p[1[3]],p[2[3]]])), x)\n plt.savefig('KEG_exponential_var.pdf', bbox_inches='tight', dpi=300)", "title": "" }, { "docid": "90e6a4c6840c0695d1f98535c676f6f3", "score": "0.5323668", "text": "def date_of_annual_after(title, eff_date):\n publication_date = date(eff_date.year, publication_month(title), 1)\n if eff_date <= publication_date:\n return publication_date\n else:\n return publication_date.replace(year=eff_date.year + 1)", "title": "" }, { "docid": "2b026ba3820417246be18a6149607298", "score": "0.53191286", "text": "def cronbach_alpha(data):\n n_items = data.shape[0]\n\n valid_mask = data != INVALID_RESPONSE\n \n item_variance = np.var(data, axis=1, ddof=1, where=valid_mask).sum()\n people_variance = (n_items * n_items\n * np.mean(data, axis=0, where=valid_mask).var(ddof=1))\n \n return (n_items / (n_items - 1) \n * (1 - item_variance / people_variance))", "title": "" }, { "docid": "7e73ca74ce81005f48c4c98ed9cd9001", "score": "0.53163207", "text": "def calc_extreme_value(params,distribution,nyear,datafraction=1.0):\n probability=1.0/(nyear*365.0*datafraction)\n \n x=np.arange(0,10000,10)\n pdf=distribution.pdf(x,*params)\n pos=np.where((pdf>=probability) & (pdf>0))[0]\n \n if len(pos)==0:return 0\n pos=pos[-1]\n\n x=np.arange(x[pos]-10,x[pos]+10,0.1)\n pdf=distribution.pdf(x,*params)\n pos=np.where((pdf<=probability) &(pdf>0))[0]\n\n if x[pos[0]]<0:return 0\n return x[pos[0]]", "title": "" }, { "docid": "a87e4e5986c8905599dc2a722d7a32cd", "score": "0.5314459", "text": "def period(r):\n m = 0\n d = 1\n a = a0 = floor(r)\n p = []\n\n while a != 2*a0:\n m = round(d*a - m)\n d = round((r*r - m*m)/d)\n a = round(floor((a0 + m)/d))\n p.append(a)\n return (a0, p)", "title": "" }, { "docid": "2330a22f92cb5bc3db086c0c7218a8cb", "score": "0.5306773", "text": "def calculateAnom(var):\n ### Import modules\n import numpy as np\n \n mean = np.nanmean(var,axis=0)\n anom = var - mean\n \n print('Completed: Calculate anomalies for polar cap!')\n return anom", "title": "" }, { "docid": "c6a555f2566874f2dfe5d796711f788f", "score": "0.53059053", "text": "def exponential_annealing(init_value, min_value, decay_ratio):\n result = init_value\n\n while True:\n result = max(min_value, result * decay_ratio)\n yield result", "title": "" }, { "docid": "ccaab3a5cf7e0e80c93134e6ef44d7e9", "score": "0.53049403", "text": "def per(soup):\r\n\r\n period = soup.find(id=\"planet_period_0\").text\r\n\r\n period = float(period.split(\" \")[0])\r\n \r\n return period", "title": "" }, { "docid": "80192f78f06ba42dced0be35570c30c2", "score": "0.5300492", "text": "def calculate_probability(self, value, mean, std_dev):\n # Calculates the exponent.\n exponent = math.exp(-(math.pow(value-mean,2)/(2*math.pow(std_dev,2))))\n # Returns the probability.\n return (1 / (math.sqrt(2*math.pi) * std_dev)) * exponent", "title": "" }, { "docid": "d683d54fb547bb0d5ffb95227395954c", "score": "0.52974075", "text": "def getFrequency(self):\n if self.period: return 1.0/self.period\n return 0", "title": "" }, { "docid": "8af254d526217ba25a3ddee9e94c8a83", "score": "0.52971166", "text": "def annual_eval(out_dir, pts):\n dgp_value = np.genfromtxt(f\"{out_dir}dgp_{pts}.out\", delimiter=\" \", usecols=3)\n fdgpe = np.zeros(3) # DGP > 0.35, 0.4, 0.45\n for i in range(len(dgp_value)):\n for j in range(3):\n if dgp_value[i] > (0.35 + j * 0.05):\n fdgpe[j] = fdgpe[j] + 1\n for j in range(3):\n fdgpe[j] = fdgpe[j] * 5.43 / 1304\n np.savetxt(f\"{out_dir}fDGPe_{pts}.out\", fdgpe, delimiter=\" \", fmt=\"%1.3f\")", "title": "" }, { "docid": "db93ccc3207dc8aa7a31f25624b10f80", "score": "0.52957386", "text": "def zolotarev_pdf_exponentiated(self, x, alpha):\n denominator = pow(sinc(alpha * x), alpha) \\\n * pow(sinc((1. - alpha) * x), (1. - alpha))\n numerator = sinc(x)\n return numerator / denominator", "title": "" }, { "docid": "5133c303c323d063c9da40af348b07b2", "score": "0.5292389", "text": "def bar2psia(a):\n return (a + 1.013) * 14.50377", "title": "" }, { "docid": "433b64525c26c03fe98d5aacf62f0afd", "score": "0.5291426", "text": "def p_from_am(sma, ms):\n a = sma * u.au.to(u.m)\n ms = ms * u.Msun.to(u.kg)\n G = c.G.value\n p = np.sqrt((4 * np.pi ** 2 * a ** 3) / (G * ms))\n return p * u.second.to(u.day)", "title": "" }, { "docid": "b7ea2e4a7c9d0289f55632f817962b64", "score": "0.52898544", "text": "def damping_ratio(self):\n return self._eta", "title": "" }, { "docid": "60f281fb55f9ec9866f8938ebd39e617", "score": "0.5287896", "text": "def mean(p):\n\n v = 0.0\n for i, pi in zip(_I, p):\n v += i*pi\n\n return v", "title": "" }, { "docid": "8d5dacdd4a7189d99c9572027701b844", "score": "0.52864736", "text": "def get_probsrsa_vectorized(number, exp_rate):\r\n return (np.power(exp_rate, number))*(np.exp(-exp_rate))/(scipy.special.factorial(number))", "title": "" }, { "docid": "5b25b7c54d9e8dc6830baa497956688d", "score": "0.52847904", "text": "def arc_length(Eps, alpha):\n return Eps * alpha", "title": "" }, { "docid": "b5fc16f5816f57878f83370c9daf880f", "score": "0.5284486", "text": "def int_eta0(self,p,T,muB,muQ,muS,**kwargs):\n Ep = np.sqrt(p**2.+self.M(T,muB,muQ,muS,**kwargs)**2.) # pole energy\n ffq = self.fq(Ep,T,muB,muQ,muS) # distribution function\n return 1./(15.*T)*self.dg*(4.*pi*p**2.)/((2.*pi)**3)*(p**4.)/(Ep**2.)*ffq*(1-self.BEFD*ffq)/(2*self.w(T,muB,muQ,muS,**kwargs))", "title": "" }, { "docid": "5f95cf88f90172d89ed05f46bff261c0", "score": "0.52806705", "text": "def period(x, y, ofac=4, hifac):\n n = len(x)\n (wi, wpi, wpr, wr, px, py) = ([0]*n, [0]*n, [0]*n, [0]*n, [0]*n)\n nout = int(.5*ofac*hifac*n)\n if np < nout:\n px.np.ndarray.resize(nout)\n py.np.ndarray.resize(nout)\n ave = np.avearge(y)\n var = np.var(y)\n if var == 0:\n print(\"Zero variacne in period\")\n return\n xmax = xmin = x[0]\n for j in range(0, n):\n if x[j] > xmax:\n xmax = x[j]\n if x[j] < xmin:\n xmin = x[j]\n xdif = xmax - xmin\n xave = .5*(xmax+xmin)\n pymax =0\n pnow = 1/(xdif*ofac) # starting frequency\n for j in range(0, n): # initialize values for the trigonometric recurrences at each data point\n arg = 2*np.pi*(x[j]-ave)*pnow\n wpr[j] = -2*np.sqrt(np.sin(.5*arg))\n wpi[j] = np.sin(arg)\n wr[j] = np.cos(arg)\n wi[j] = wpi[j]\n for i in range(0, nout):\n px[i] = pnow\n sumsh = sumc = 0\n for j in range(0,n):\n c = wr[j]\n s = wi[j]\n sumsh += s*c\n sumc += (c-s)*(c+s)\n wtau = .5*np.arctan(p2*sumsh, sumc])\n swtau = np.sin(wtau)\n cwtau = np.cos(wtau)\n sums = sumc= sumsy = sumcy = 0\n for j in range(0, n): # loop over the data again to get the periiodogram value\n s = wi[j]\n c = wr[j]\n ss = s * cwtau - c*swtau\n cc = c*cwtau + s*swtau\n sums += ss*ss\n sumc += cc*cc\n yy = y[j] - ave\n sumsy += yy*ss\n sumcy += yy*cc\n wtemp = wr[j]\n wr[j] = (wtemp*wpr[j] - wr[j] * wpi[j]) + wr[j] # trigonometric recurrences\n wi[j] = (wi[j] * wpr[j] + wtemp * wpi[j]) + wi[j]\n py[i] = .5*(sumcy * sumcy/sumc + sumsy * sumsy/sums)/var\n if py[i] >= pymax:\n jmax = i\n pymax = py[jmax]\n pnow += 1/(ofac*xdif)\n expy = np.exp(-pymax)\n effm = 2*nout/ofac\n prob = effm * expy\n if prob > .01:\n prob = 1 - np.power(1 - expy, effm)\n return px, py, prob", "title": "" }, { "docid": "73fa5608009670c1ebc32a92411e499a", "score": "0.5279629", "text": "def _probability_of_title_with_length(self, length):\n sigma = 6\n return np.exp(-1 * ((length - self.avg_length) ** 2) / (2 * (sigma ** 2))) / (math.sqrt(2 * np.pi) * sigma)", "title": "" }, { "docid": "f4d435eac6074cd3164079f5ca5870ba", "score": "0.5273136", "text": "def frac_position(eta,eta_distribution):\n pass", "title": "" }, { "docid": "a337c988c1ebbf9f39e6dda243a101f1", "score": "0.52646464", "text": "def apo(price, period_slow = 26, period_fast = 12, ma = \"sma\"):\n if ma.lower() not in [\"sma\", \"ema\"]:\n raise ValueError(\"param 'ma' needs to be 'ema' or 'sma'\")\n\n return apo_calc(price, period_slow, period_fast, ma.lower())", "title": "" }, { "docid": "7dea0661caea45ea6b8f392fa9985659", "score": "0.5260097", "text": "def rand_exp(rate):\n \n return -log(random()) / rate", "title": "" }, { "docid": "c0621abd8797937864d627e54d54a7c8", "score": "0.5259038", "text": "def discounted_payback_period(self, proforma):\r\n payback_period = self.payback_period(proforma) # This is simply (capex/yearlynetbenefit)\r\n\r\n if not self.npv_discount_rate:\r\n dr = self.npv_discount_rate*.01\r\n discounted_pp = np.log(1/(1-(dr*payback_period)))/np.log(1+dr)\r\n else:\r\n # if discount_rate = 0, then the equation simplifies to np.log(1)/np.log(1)-- which is undefined, so we return nan\r\n discounted_pp = 'nan'\r\n\r\n return discounted_pp", "title": "" }, { "docid": "6c08eac2e3e543e690b820889ca1868d", "score": "0.5257021", "text": "def ann_return1_year(self) -> float:\n return self.__ann_return1_year", "title": "" } ]
a2553f674f9dde5c403aedc77dda2d72
Gets the metadata for the copyright.
[ { "docid": "add6e0375766c1ae82f03796afa040ba", "score": "0.8633044", "text": "def get_copyright_metadata(self):\n pass", "title": "" } ]
[ { "docid": "24f84680cf596560ecdac39cce9e64bc", "score": "0.8069426", "text": "def get_copyright_registration_metadata(self):\n pass", "title": "" }, { "docid": "9b439f8061f824cb732d55672fc3eaff", "score": "0.7513539", "text": "def copyright():\n # pull and print the meta-data\n return print(meta.header)", "title": "" }, { "docid": "9b439f8061f824cb732d55672fc3eaff", "score": "0.7513539", "text": "def copyright():\n # pull and print the meta-data\n return print(meta.header)", "title": "" }, { "docid": "9ce9dfaa695a5f317033c21935fab989", "score": "0.72402924", "text": "def copyright(self):\n return self._copyright", "title": "" }, { "docid": "69c61a3fb10cf9c01b25ed094a4ef720", "score": "0.7208793", "text": "def copyright(self) -> str:\n return pulumi.get(self, \"copyright\")", "title": "" }, { "docid": "69c61a3fb10cf9c01b25ed094a4ef720", "score": "0.7208793", "text": "def copyright(self) -> str:\n return pulumi.get(self, \"copyright\")", "title": "" }, { "docid": "69acb209ae4f2b9afe7528869611d6af", "score": "0.702879", "text": "def get_copyright(self):\n return self._payload.get_copyright()", "title": "" }, { "docid": "f60a01cb4b03c7e41fdce6f74baa2074", "score": "0.7024526", "text": "def copyright():\n return print(meta.header)", "title": "" }, { "docid": "dc89c32813693811b9f0d3f971f18ea0", "score": "0.698063", "text": "def _copyright(self):\n return self.copyright", "title": "" }, { "docid": "2160cef48267207c30a1b6b4184ac02f", "score": "0.69552654", "text": "def getCopyright(self):\n return self.copyright", "title": "" }, { "docid": "6d8b1679f609c6d0124c424dcbf59974", "score": "0.69459635", "text": "def get_metadata(self, citation):\n pass", "title": "" }, { "docid": "bad2ebb93b92f073ae4061987b3d7f07", "score": "0.6943443", "text": "def GetCopyright(self):", "title": "" }, { "docid": "ec88284d34c75931c56892c679c7a4bb", "score": "0.6834667", "text": "def feed_copyright(self):\n return _siteinfo().copyright", "title": "" }, { "docid": "671c3475d5dbaafeb2cd46f5c7684610", "score": "0.6762198", "text": "def meta_copyright(cls, text, time=0.0):\n return cls._textmeta(mm.kCopyRight, text, time)", "title": "" }, { "docid": "97a01f19b6c844cfe4b5f92954152630", "score": "0.6670829", "text": "def item_copyright(self):\n return _siteinfo().copyright", "title": "" }, { "docid": "9c6a1486503376ff18f1a8044bc275f8", "score": "0.66507226", "text": "def get_metadata(self, soup):\n metadata = {}\n meta = soup.find_all('meta')\n for m in meta:\n if m.has_attr('name') and m.get('name').startswith('dcterms'):\n metadata[m.get('name')] = m.get('content')\n return metadata", "title": "" }, { "docid": "83dbd6adadea9e223f97c4bc546b48d0", "score": "0.6647738", "text": "def copyright():\n # generate the message\n return print(meta.copyright)", "title": "" }, { "docid": "d59e15d78633fd7875488fb65753797d", "score": "0.6624673", "text": "def metadata(self):\n # identifiers\n dump = self.download()\n if dump['totalItems'] == 0:\n return 0\n\n google_books_data = dump['items'][0]['volumeInfo']\n authors = \"\"\n for author in google_books_data['authors']:\n authors = author + \",\" + authors\n authors = authors[0:-1]\n # i=0\n # for val in meta_data.keys():\n # t = google_books_data[kickstr[i]]\n # if val == 'cover':\n # t = google_books_data['imageLinks']['thumbnail']\n # elif val == 'published_date':\n # t=\n # meta_data[val] = t\n # i+=1\n meta_data = {\n 'google_url' : \"\",\n 'description' : \"\",\n 'tags' : \"\",\n 'cover' : \"\",\n 'published_date': \"\",\n 'publisher' : \"\",\n 'authors' : \"\",\n 'page_count' : \"\",\n 'title' : \"\"\n }\n try:\n meta_data['google_url'] = google_books_data['canonicalVolumeLink']\n except:\n meta_data['google_url'] = None\n try:\n meta_data['description'] = google_books_data['description']\n except:\n meta_data['description'] = None\n try:\n meta_data['tags'] = google_books_data['categories']\n except:\n meta_data['tags'] = None\n try:\n meta_data['cover'] = google_books_data['imageLinks']['thumbnail']\n except:\n meta_data['cover'] = None\n try:\n meta_data['published_date'] = parser.parse(google_books_data['publishedDate'])\n except:\n meta_data['published_date'] = None\n try:\n meta_data['publisher'] = google_books_data['publisher']\n except:\n meta_data['publisher'] = None\n try:\n meta_data['authors'] = authors\n except:\n meta_data['authors'] = None\n try:\n meta_data['page_count'] = google_books_data['pageCount']\n except:\n meta_data['page_count'] = None\n try:\n meta_data['title'] = google_books_data['title']\n except:\n meta_data['title'] = \"Title\"\n print(meta_data)\n print(google_books_data)\n return meta_data", "title": "" }, { "docid": "a4f7eb2f8c6a2c093a771176339fc58d", "score": "0.6615866", "text": "def get_metadata(self):\n data = {\n \"license_code\": self.license_code,\n \"version\": self.version,\n \"title_english\": self.title_english,\n }\n if self.jurisdiction_code:\n data[\"jurisdiction\"] = self.jurisdiction_code\n\n data[\"permits_derivative_works\"] = self.permits_derivative_works\n data[\"permits_reproduction\"] = self.permits_reproduction\n data[\"permits_distribution\"] = self.permits_distribution\n data[\"permits_sharing\"] = self.permits_sharing\n data[\"requires_share_alike\"] = self.requires_share_alike\n data[\"requires_notice\"] = self.requires_notice\n data[\"requires_attribution\"] = self.requires_attribution\n data[\"requires_source_code\"] = self.requires_source_code\n data[\"prohibits_commercial_use\"] = self.prohibits_commercial_use\n data[\n \"prohibits_high_income_nation_use\"\n ] = self.prohibits_high_income_nation_use\n\n data[\"translations\"] = {}\n for lc in self.legal_codes.order_by(\"language_code\"):\n language_code = lc.language_code\n with active_translation(lc.get_translation_object()):\n data[\"translations\"][language_code] = {\n \"license\": lc.license_url,\n \"deed\": lc.deed_url,\n \"title\": gettext(self.title_english),\n }\n\n return data", "title": "" }, { "docid": "930ea34840c9e82253cc2b16322d01cf", "score": "0.65950376", "text": "def copyright(self):\n\n return self.tagsToFind[self.COPYRIGHT_TAG].strip('\\x20\\x00')", "title": "" }, { "docid": "7be0226f9ced814809bed0f04b879888", "score": "0.6556654", "text": "def copyright(self, format=\"json\"):\n\n endpoint = \"copyright\"\n\n return self._make_request(endpoint, {\"format\": format})", "title": "" }, { "docid": "6d0fe9427617aca6967e17a2b63681dc", "score": "0.6530905", "text": "def copyright_str(self) -> str:\n with self.path.open() as f:\n match = re.search(self.copyright_re, f.read(1000))\n return match.group() if match is not None else ''", "title": "" }, { "docid": "f11dac91b5ae360193af2698e24cbfa6", "score": "0.64764947", "text": "def get_metadata(self):\n return self.content[\"metadata\"]", "title": "" }, { "docid": "5a02524fe6ba0d458c650a2c566f23b7", "score": "0.63898975", "text": "def getMetadata():", "title": "" }, { "docid": "ddf409025c90dbd123a7ac7132e8c2ce", "score": "0.633487", "text": "def get_copyright_registration(self):\n return self._payload.get_copyright_registration()", "title": "" }, { "docid": "5cc7229c6196b75042c80dd2fb36e5ab", "score": "0.6308308", "text": "def copyrights(self) -> List[CopyrightObject]:\n return [CopyrightObject(item) for item in self._json_dict['copyrights']]", "title": "" }, { "docid": "5cc7229c6196b75042c80dd2fb36e5ab", "score": "0.6308308", "text": "def copyrights(self) -> List[CopyrightObject]:\n return [CopyrightObject(item) for item in self._json_dict['copyrights']]", "title": "" }, { "docid": "5cc7229c6196b75042c80dd2fb36e5ab", "score": "0.6308308", "text": "def copyrights(self) -> List[CopyrightObject]:\n return [CopyrightObject(item) for item in self._json_dict['copyrights']]", "title": "" }, { "docid": "bb0d25b59c21e2cbc2ce757c420006cc", "score": "0.6302299", "text": "def get_metadata():\n here = os.path.abspath(os.path.dirname(__file__))\n init_path = os.path.join(here, 'src', 'serde_ext', '__init__.py')\n readme_path = os.path.join(here, 'README.rst')\n\n with io.open(init_path, encoding='utf-8') as f:\n about_text = f.read()\n\n metadata = {\n key: re.search(r'__' + key + r'__ = [\"\\'](.*?)[\"\\']', about_text).group(1)\n for key in ('title', 'version', 'url', 'author', 'author_email', 'license', 'description')\n }\n metadata['name'] = metadata.pop('title')\n\n with io.open(readme_path, encoding='utf-8') as f:\n metadata['long_description'] = f.read()\n\n return metadata", "title": "" }, { "docid": "78d4d24772433b15772260ee08b53245", "score": "0.62911433", "text": "def get_metadata(html, text, filename):\n blog, post = filename.split(\"_\")\n url = \"http://\" + blog + \".hypotheses.org/\" + str(post)\n title = get_title(html, filename)\n author = get_author(html)\n date = get_date(html)\n licence = get_licence(html)\n language = check_language(text)\n category = get_category(text, title)\n numwords = get_numwords(text)\n metadata = [filename, language, author, numwords, category, date, licence, blog, post, url, title]\n return metadata", "title": "" }, { "docid": "70d5607b649ee929aa19bf87e13a4a9d", "score": "0.6261161", "text": "def get_metadata(self, **kwargs):\n return self.get_entry(\n METADATA_URI, desired_class=gdata.docs.data.Metadata, **kwargs)", "title": "" }, { "docid": "e92c69e3e660e418673713ba774eaf56", "score": "0.62316877", "text": "def get_license():\n data_license = {}\n data_license[\"name\"] = \"cc-by-4.0\"\n data_license[\"url\"] = \"https://creativecommons.org/licenses/by/4.0/\"\n data_license[\n \"description\"] = \"The content can be shared and adapted but you must\\\n give appropriate credit and cannot restrict access to others.\"\n return data_license", "title": "" }, { "docid": "db49b6089ece2e54a555d92c706d8067", "score": "0.6197543", "text": "def get_metadata(self):\n api_call = self._get_url(\"catalog\")\n\n response = get_response(api_call)\n return json.load(response)", "title": "" }, { "docid": "6916c01dffbfac6104f7b478a6ac5b10", "score": "0.619042", "text": "def get_metadata(name):", "title": "" }, { "docid": "661b5544077ce492c145dac1c9cebc98", "score": "0.6173343", "text": "def getMetadata(content):", "title": "" }, { "docid": "b4dea74a7d52e3b49307d8736c3d2022", "score": "0.616945", "text": "def meta(data):\n return data[1]['metadata']", "title": "" }, { "docid": "2a09a6ed670672426b8d110a4b474f1b", "score": "0.6166778", "text": "def _GetMetadata(self, version):\n raw_json = None\n version_base = os.path.join(self.gs_base, version)\n with self.misc_cache.Lookup(\n (self.board, version, constants.METADATA_JSON)) as ref:\n if ref.Exists(lock=True):\n raw_json = osutils.ReadFile(ref.path)\n else:\n raw_json = self.gs_ctx.Cat(\n os.path.join(version_base, constants.METADATA_JSON)).output\n ref.AssignText(raw_json)\n\n return json.loads(raw_json)", "title": "" }, { "docid": "f3aab84491c21d1127d87bfaa6edee3d", "score": "0.6166168", "text": "def metadata(self):\r\n url = \"{url}/info/metadata\".format(url=self._url)\r\n params = {\"f\": \"json\"}\r\n return self._con.get(url, params, token=self._token)", "title": "" }, { "docid": "75b4e254ee8240f4ac8da787b98023aa", "score": "0.61653906", "text": "def get_metadata(self, identifier):", "title": "" }, { "docid": "ecd4d1e1ec210a4543fda79dfb14e941", "score": "0.61573976", "text": "def get_dataObject_modac_meta(data_object_path):\n #data_object_path = encode_path(data_object_path)\n auth = authenticate_modac()\n\n get_response = requests.get(data_object_path, auth = auth)\n if get_response.status_code != 200:\n print(\"Error downloading from modac.cancer.gov\", data_object_path)\n raise Exception(\"Response code: {0}, Response message: {1}\".format(get_response.status_code, get_response.text))\n\n metadata_dic = json.loads(get_response.text)\n self_metadata = metadata_dic['metadataEntries']['selfMetadataEntries']['systemMetadataEntries']\n self_dic = {}\n for pair in self_metadata:\n self_dic[pair['attribute']] = pair['value'] \n\n return self_dic", "title": "" }, { "docid": "3f5dd041ea3c0bdcc8389558c52fb93f", "score": "0.6153275", "text": "def get_metadata(self):\n\t\treturn self._metadata", "title": "" }, { "docid": "3f5dd041ea3c0bdcc8389558c52fb93f", "score": "0.6153275", "text": "def get_metadata(self):\n\t\treturn self._metadata", "title": "" }, { "docid": "3f5dd041ea3c0bdcc8389558c52fb93f", "score": "0.6153275", "text": "def get_metadata(self):\n\t\treturn self._metadata", "title": "" }, { "docid": "3f5dd041ea3c0bdcc8389558c52fb93f", "score": "0.6153275", "text": "def get_metadata(self):\n\t\treturn self._metadata", "title": "" }, { "docid": "4c1a5d4d20aca43089c913959ba5b359", "score": "0.6152495", "text": "def get_metadata(self) -> Mapping[int, Any]:\n return self._call(CredentialManagement.CMD.GET_CREDS_METADATA)", "title": "" }, { "docid": "2791defc294d703d6056962a5e568286", "score": "0.61457705", "text": "def extract_metadata(self) -> DocumentMetadata:\n raise NotImplementedError", "title": "" }, { "docid": "b6717d0b3afb11db4705a85098f55754", "score": "0.6125887", "text": "def get_metadata():\n try:\n metadata = cache.getDataset(os.environ.get('GCS_BUCKET'),\n os.environ.get('METADATA_FILENAME'))\n except Exception as err:\n logging.error(err)\n return 'Internal server error: {}'.format(err), 500\n\n def generate_response(data: bytes):\n next_row = b'['\n for row in data.splitlines():\n yield next_row\n next_row = row + b','\n yield next_row.rstrip(b',') + b']'\n headers = Headers()\n headers.add('Content-Disposition', 'attachment',\n filename=os.environ.get('METADATA_FILENAME'))\n headers.add('Vary', 'Accept-Encoding')\n return Response(generate_response(metadata), mimetype='application/json',\n headers=headers)", "title": "" }, { "docid": "6d19e00e8e2c4b71131d2c0a0c3d7852", "score": "0.61157686", "text": "def __md(self):\n p_l = self.__basepl('metadata')\n p_l['content'] = 'metadata'\n return RCRequest(self.url, p_l, 'metadata').execute()[0]", "title": "" }, { "docid": "6677f88ea288b5f1cd71ffa7b6c7309d", "score": "0.61138695", "text": "def metadata(self):\n self._parse_metadata_dates()\n if self._single_result:\n return self._metadata[0]\n else:\n return self._metadata", "title": "" }, { "docid": "f7f1b36e9ad5f893583292d517e8d016", "score": "0.6051656", "text": "def copyright(self, plexus, **kwds):\n # show the copyright note\n plexus.info.log(hello.meta.copyright)\n # all done\n return", "title": "" }, { "docid": "d73e78de7590fd2b5f905a784502060e", "score": "0.60199094", "text": "def metadata(self):\n return self._metadata", "title": "" }, { "docid": "fe079f4baf7615ee412f97917cef5c70", "score": "0.6006893", "text": "def get_published_metadata(self):\n pass", "title": "" }, { "docid": "a0102f653530de87a0e764b126b64a1b", "score": "0.6002597", "text": "def get_metadata(self):\n return self.metadata", "title": "" }, { "docid": "538f7cb4a8353bd46997b84091fa8913", "score": "0.6001879", "text": "def _get_metadata(cfg, relpath, doc_type):\n return {\n 'document_id': _get_document_id(cfg, relpath, doc_type),\n 'compilation_date': cfg['timestamp']['date'],\n 'timebox_id': cfg['timestamp']['timebox_id'],\n 'configuration_id': cfg['defined_baseline']['short_hexsha']}", "title": "" }, { "docid": "9c321faf1c3d2465ee5a469c968dda67", "score": "0.598438", "text": "def get_metadata(self, params=None):\n metadata_url = \"{CDN_URL}/{handle}/metadata\".format(\n CDN_URL=CDN_URL, handle=self.handle\n )\n response = utils.make_call(metadata_url, 'get',\n params=params,\n security=self.security)\n return response.json()", "title": "" }, { "docid": "2c4f2a4feca8651f337f99957dac17b2", "score": "0.59841037", "text": "def get_metadata(self, region, locale):\n resource = \"/hearthstone/metadata\"\n query_params = {\"locale\": locale}\n return super().get_resource(resource, region, query_params)", "title": "" }, { "docid": "db16e2f10afadc62d177dad977a75d96", "score": "0.5975165", "text": "def get_title_metadata(self):\n pass", "title": "" }, { "docid": "6a5061a1edd34767f69b736b9d627df0", "score": "0.59744596", "text": "def metadata(self) -> str:\n return pulumi.get(self, \"metadata\")", "title": "" }, { "docid": "6a5061a1edd34767f69b736b9d627df0", "score": "0.59744596", "text": "def metadata(self) -> str:\n return pulumi.get(self, \"metadata\")", "title": "" }, { "docid": "6a5061a1edd34767f69b736b9d627df0", "score": "0.59744596", "text": "def metadata(self) -> str:\n return pulumi.get(self, \"metadata\")", "title": "" }, { "docid": "c04a536b7e84c43786e374e02689c96b", "score": "0.59680593", "text": "def metadata(self):\n return metadata(self.data)", "title": "" }, { "docid": "bd6cfe5148d4e25ae2c6fd4ea9638c39", "score": "0.59559137", "text": "def metadata(self) -> 'outputs.MetadataResponse':\n return pulumi.get(self, \"metadata\")", "title": "" }, { "docid": "bd6cfe5148d4e25ae2c6fd4ea9638c39", "score": "0.59559137", "text": "def metadata(self) -> 'outputs.MetadataResponse':\n return pulumi.get(self, \"metadata\")", "title": "" }, { "docid": "8cbc65d3d9618dbbba2ce5834dbce34a", "score": "0.59523225", "text": "def get_assets_metadata(self):\n return # osid.Metadata", "title": "" }, { "docid": "2489839cae2ffcb513a9cb6d44d5a3ca", "score": "0.59516597", "text": "def get_metadata(self, key, version=None):\n rsp = urllib2.urlopen(self.commit_uri(key, version) + \"/meta\")\n return json.loads(rsp.read())", "title": "" }, { "docid": "be9138810d5a6abda32fd82152f8f6bd", "score": "0.59148645", "text": "def get_metadata_fields(corpus_name):\n if corpus_name == 'sample_novels':\n return ['author', 'date', 'title', 'country_publication', 'author_gender', 'filename', 'notes']\n else:\n return common.METADATA_LIST", "title": "" }, { "docid": "59e0d35b76afcb6e4168e5e731d9a749", "score": "0.591214", "text": "def metadata(self):\n return self.file_source.metadata()", "title": "" }, { "docid": "d938306b829603f77ccce0d944cb1c1c", "score": "0.59070027", "text": "def get_metadata(self):\n return self._metadata", "title": "" }, { "docid": "d938306b829603f77ccce0d944cb1c1c", "score": "0.59070027", "text": "def get_metadata(self):\n return self._metadata", "title": "" }, { "docid": "05a395d61ee79ebb591a6d627cd4eb5d", "score": "0.590246", "text": "def metadata(self):\r\n return self._md", "title": "" }, { "docid": "33e6f7385b46d30f36840fa3e286b657", "score": "0.5901217", "text": "def metadata(self):\n\n if self._metadata is None:\n # metadata_url = \"https://raw.githubusercontent.com/{owner}/{repo_name}/master/metadata.yaml\".format(owner=self.repo_owner,\n # repo_name=self.repo_name)\n\n # self._metadata = metadata.pandata.Pandata(metadata_url)\n\n self._metadata = metadata.pandata.Pandata(datafile=None)\n md_hash = self.gh_repo.contents(\"metadata.yaml\")\n # if the file exists, load the content\n if md_hash is not None:\n self._metadata.metadata = yaml.safe_load(md_hash.decoded)\n return self._metadata\n else:\n return self._metadata", "title": "" }, { "docid": "f7a66a1b2c1db8d2055399b9710bf827", "score": "0.5895827", "text": "def get_metadata(self):\n metadata_path = \"{}/{}/{}\".format(self.url,\n self.metadata_dir,\n self.metadata_file)\n metadata_sig_path = \"{}/{}/{}.sig\".format(self.url.rstrip('/'),\n self.metadata_dir,\n self.metadata_file)\n # load metadata\n req = requests.get(metadata_path)\n if req.status_code is 200:\n raw_metadata = req.content\n else:\n raise RepositoryError(metadata_path, (\"status code not 200: \"\n \"{}\".format(req.status_code)))\n\n if self.gpg_verify:\n self.verify_data_signature(metadata_sig_path, metadata_path,\n raw_metadata)\n\n return self.parse_metadata(raw_metadata)", "title": "" }, { "docid": "a13eec55499ba96229f465afe282ce4d", "score": "0.5892936", "text": "def process_license(license):\n\n ind = license.find('Attribution')\n ind = license.find('creativecommons.org/licenses/')\n if ind == -1:\n zlicense = { \"description\": {\"en\": license},\n \"title\": {\"en\": \"Custom license\"} }\n else:\n zlicense = {\"id\": \"-\".join([\"cc\", license[ind:].split(\"/\")[2],\"4.0\"])}\n return zlicense", "title": "" }, { "docid": "93943df688f99276f31135ff8816b065", "score": "0.58883893", "text": "def get_share_metadata(self, share, microversion=None):\n metadata_raw = self.manila(\n 'metadata-show %s' % share, microversion=microversion)\n metadata = output_parser.details(metadata_raw)\n return metadata", "title": "" }, { "docid": "d10ca39cdcff87db3ff6582c6a897a61", "score": "0.5878597", "text": "def license():\n # print it\n return print(meta.license)", "title": "" }, { "docid": "471733d816771050fa7bfb8e10b643fc", "score": "0.58772045", "text": "def get_socrata_metadata(credentials, nadac_parameters):\n client = setup_socrata_client(credentials, nadac_parameters)\n metadata = client.get_metadata(nadac_parameters['DATA_LOCATION'],\n content_type='json')\n\n check_build_filepath('raw_data')\n save_to_disk('raw_data/price_metadata.json', metadata)\n client.close()", "title": "" }, { "docid": "db72656e8108d61bfdd93f1a3161f692", "score": "0.58748287", "text": "def get_infos(self):\n infos = super().get_infos()\n infos[\"licenses\"].append(chime5_license)\n return infos", "title": "" }, { "docid": "c7f3c442696494bee65b316832b76d3d", "score": "0.58630574", "text": "def get_metadata(self,mp3):\n metadata = None\n if self.mp3items.has_key(mp3):\n metadata = self.mp3items[mp3]['metadata']\n \n return metadata", "title": "" }, { "docid": "e1ae2264504cb454f98acedbc9a9e486", "score": "0.5856848", "text": "def _extractMetadata(self):\n\n try:\n dcMetadata = dicom.read_file(self.path, stop_before_pixels=True)\n if dcMetadata:\n self.metadata = dict()\n for item in dcMetadata:\n # pass forces rawDataElement to DataElement conversion\n pass\n if not item.tag.is_private:\n name = str(item.name).replace('.', '')\n value = str(item.value)\n self.metadata[name] = value\n self.metadata['Info Extracted'] = True\n else:\n self.metadata = None\n\n except dicom.filereader.InvalidDicomError:\n self.metadata = None", "title": "" }, { "docid": "36e33dd772743148f1dae807f728d1ef", "score": "0.5852637", "text": "def extract_metadata(self):\n self.contacts_list = self.metadata['chanlabels']\n try:\n # comment out\n self.modality = self.metadata['modality']\n except Exception as e:\n self.metadata['modality'] = 'ieeg'\n self.modality = self.metadata['modality']\n print(\"Loading result object. Error in extracting metadata: \", e)\n\n # convert channel labels into a Contacts data struct\n if self.reference == 'bipolar' or self.modality == 'scalp':\n self.contacts = Contacts(self.contacts_list, require_matching=False)\n else:\n self.contacts = Contacts(self.contacts_list)", "title": "" }, { "docid": "72a2c0e421539a33a7afbeec62a64b64", "score": "0.5843592", "text": "def set_img_copyright(self, uri): \n \n tags = [\"MWG:Copyright\"]\n key = \"copyright\"\n self.set_meta_prop(tags, uri, key)", "title": "" }, { "docid": "7dc2755324d92788991852a56f33beb0", "score": "0.58300406", "text": "def get_metadata():\n res = requests.get(URL).json()\n return res[\"response\"][\"docs\"]", "title": "" }, { "docid": "87ba37b3dbaa8401e45f44686382e2fd", "score": "0.5820466", "text": "def get_metadata(publisher, package):\n try:\n metadata = MetaDataDB.query.filter_by(name=package, publisher=publisher).first().descriptor\n return jsonify({\"data\": metadata, \"status\": \"OK\"}), 200\n except Exception as e:\n return jsonify({'status': 'KO', 'message': e.message}), 500", "title": "" }, { "docid": "7fb812cf1b4c681f3e913eb98d7cfed2", "score": "0.58103585", "text": "def metadata(self):\n if self.__last_get_response is None:\n if self.__initial_metadata is not None:\n return self.__initial_metadata\n self.get()\n return self.__last_get_response.content", "title": "" }, { "docid": "3abfbd4d70ab9fab15ccb068927f8b4c", "score": "0.579478", "text": "def getMetadata(self, private_key, client_id):\n return self.proxy.samp.hub.getMetadata(private_key, client_id)", "title": "" }, { "docid": "b39a28ca457275565c55db1f7331e0be", "score": "0.57906055", "text": "def get_courses_metadata(self):\n return # osid.Metadata", "title": "" }, { "docid": "e1cd31ac6dcb6d8e9adf6c1218e1daf7", "score": "0.578709", "text": "def create_metadata(title, comment='', author=None):\n author = 'https://github.com/narimiran' if author is None else author\n return dict(artist=author, title=title, comment=comment)", "title": "" }, { "docid": "80f03cdcdbcf6f0f4de1c9d9204f4851", "score": "0.5785481", "text": "def metadata(self):\n metadata_service = self.find_service_by_type(ServiceTypes.METADATA)\n return metadata_service.values['metadata']", "title": "" }, { "docid": "ae30f97bc9dda8b92cc34dc15d1adf6c", "score": "0.57851726", "text": "def get_license_data(self):\n return licenses.get_license_by_url(self.license or \"\")", "title": "" }, { "docid": "afeddd9616b57cd176036fd0071ee294", "score": "0.5769014", "text": "def metadata(forge):\n forge.metadata()", "title": "" }, { "docid": "9a751d8085f977491b2730caf4593e12", "score": "0.5764288", "text": "def get_source_metadata(self):\n pass", "title": "" }, { "docid": "e936e6fd083d940e003160caa3de0391", "score": "0.576013", "text": "def files_license_info(self) -> Sequence[str]:\n return pulumi.get(self, \"files_license_info\")", "title": "" }, { "docid": "1dc3a89bbab7c806e9d09f629e01a7ad", "score": "0.5758056", "text": "def metadata(self) -> Mapping[str, str]:\n return pulumi.get(self, \"metadata\")", "title": "" }, { "docid": "1dc3a89bbab7c806e9d09f629e01a7ad", "score": "0.5758056", "text": "def metadata(self) -> Mapping[str, str]:\n return pulumi.get(self, \"metadata\")", "title": "" }, { "docid": "1dc3a89bbab7c806e9d09f629e01a7ad", "score": "0.5758056", "text": "def metadata(self) -> Mapping[str, str]:\n return pulumi.get(self, \"metadata\")", "title": "" }, { "docid": "1dc3a89bbab7c806e9d09f629e01a7ad", "score": "0.5758056", "text": "def metadata(self) -> Mapping[str, str]:\n return pulumi.get(self, \"metadata\")", "title": "" }, { "docid": "1dc3a89bbab7c806e9d09f629e01a7ad", "score": "0.5758056", "text": "def metadata(self) -> Mapping[str, str]:\n return pulumi.get(self, \"metadata\")", "title": "" }, { "docid": "1dc3a89bbab7c806e9d09f629e01a7ad", "score": "0.5758056", "text": "def metadata(self) -> Mapping[str, str]:\n return pulumi.get(self, \"metadata\")", "title": "" }, { "docid": "4ac9d5037085069356d9b5b257996854", "score": "0.5740702", "text": "def getMetadata(self):\n return self._metadata", "title": "" }, { "docid": "b06810d8c09ac0aa1fc53ae4bea9e99f", "score": "0.57405835", "text": "def metadata(self) -> Optional[Any]:\n return pulumi.get(self, \"metadata\")", "title": "" } ]
f5f36a0534f4ea94adb8aad46271138a
Can this RecurringCost be enacted
[ { "docid": "757ffa3fd3a6bd9841075b459360f26d", "score": "0.49190748", "text": "def is_enactable(self, as_of):\n return \\\n not self.disabled and \\\n not self.archived and \\\n not self._is_finished(as_of) and \\\n self._is_ready(as_of) and \\\n not self._is_billing_complete()", "title": "" } ]
[ { "docid": "4c04ec54a498200fddebe61bfad145c7", "score": "0.6974328", "text": "def enact(self, billing_cycle, disable_if_done=True):\n as_of = billing_cycle.date_range.lower\n if not self.is_enactable(as_of):\n raise CannotEnactUnenactableRecurringCostError(\n \"RecurringCost {} is unenactable.\".format(self.uuid)\n )\n\n if self.has_enacted(billing_cycle):\n raise RecurringCostAlreadyEnactedForBillingCycle(\n 'RecurringCost cost {} already enacted for {}'.format(self, billing_cycle)\n )\n\n with db_transaction.atomic():\n recurred_cost = RecurredCost(\n recurring_cost=self,\n billing_cycle=billing_cycle,\n )\n recurred_cost.make_transaction()\n recurred_cost.save()\n\n if disable_if_done:\n self.disable_if_done(billing_cycle)", "title": "" }, { "docid": "b7445a2491181d4ec5c6d4f004d143bf", "score": "0.6717582", "text": "def has_enacted(self, billing_cycle):\n return RecurredCost.objects.filter(\n recurring_cost=self,\n billing_cycle=billing_cycle,\n ).exists()", "title": "" }, { "docid": "a365ef9fd5b0a002ccf6bc7bdc0e6573", "score": "0.62935936", "text": "def make_eligible(self):\n pass", "title": "" }, { "docid": "d8dff91131c125b350fd2012d9790602", "score": "0.5993728", "text": "def charge_recurring(self, grace_period=None):\n pass", "title": "" }, { "docid": "9039e5f6aa07f9bcc62aac117fff3c64", "score": "0.5735424", "text": "def confirmed(self, cr, uid, ids, context=None):\n\tallow_archive_line_obj = self.pool.get('services.contracts.allowances.lines')\n for record in self.browse(cr, uid, ids, context=context):\n\t\tif not record.allowances_lines_before :\n \traise osv.except_osv(_('Partner Lines !'), _('Sorry no partner Lines!'))\n\n\t \tlines_ids = [line.id for line in record.allowances_lines_after]\n \tallow_archive_line_obj.unlink(cr,uid,lines_ids,context=context)\n\n\t\tfor lines in record.allowances_lines_before:\n\t\t\tif lines.percentage_rating < 0 or lines.percentage_rating > 100 :\n \t\traise osv.except_osv(_('Rate Error !'), _('Sorry you insert wrong rate ... rate is between (0,100)!'))\n \t\tamount_after_rate_id = allow_archive_line_obj.create(cr, uid, {\n \t\t\t\t'cost_of_rent':lines.cost_of_rent,\n \t\t\t\t'amount_untaxed':round (lines.amount_untaxed*lines.percentage_rating/100,2),\n \t\t\t\t'amount_tax':round(lines.amount_tax*lines.percentage_rating/100,2),\n \t\t\t\t'amount_total':round(lines.amount_total*lines.percentage_rating/100,2),\n \t\t\t\t'deduct_days':lines.deduct_days,\n \t\t\t\t'deduct_amount':lines.deduct_amount,\n \t\t\t\t'contract_id':lines.contract_id.id,\n\t\t\t\t\t'env_allow_id_after_rate':record.id,\n\t\t\t\t\t'type': 'after',\n 'category_id':lines.category_id.id,\n\t\t\t\t\t'percentage_rating':lines.percentage_rating,\n\n })\n\t\t\n \n self.write(cr, uid, ids, {'state':'confirmed'})\n return True", "title": "" }, { "docid": "e06878f1f73ebe95cde824d74b51178a", "score": "0.57182837", "text": "def credit_deliverer():\n return True", "title": "" }, { "docid": "072e3adb9a2b8b733955c2d0470f5cfd", "score": "0.57024777", "text": "def disable_costs():\n RecurringCost.objects.all().disable_if_done()", "title": "" }, { "docid": "729f7ea408343845dae0e20a444735a6", "score": "0.5669726", "text": "def awaiting_payment(self):", "title": "" }, { "docid": "46b89439cb1fb4ae0622c0559693f5b3", "score": "0.56223905", "text": "def test_add_recurring_schedule(self):\n pass", "title": "" }, { "docid": "43c2e807bcff1aea5874de05585dc1b9", "score": "0.55890375", "text": "def test_renew_user_pending_cancel(self):\n self.braintree_customer.active = True\n self.braintree_customer.pending_cancel = True\n self.braintree_customer.subscription_id = \"ValidSubscriptionID\"\n\n result = SubscriptionManager.renew(self.braintree_customer)\n self.assertEqual(\"ValidSubscriptionID\",result)\n self.assertFalse(self.braintree_customer.pending_cancel)", "title": "" }, { "docid": "c4473da0917973c1c85d7b653ba0f05d", "score": "0.5505976", "text": "def _check_cost(self, cr, uid, ids, context=None):\n for enrich in self.browse(cr, uid, ids, context=context):\n if enrich.amount <= 0:\n raise osv.except_osv(_('ValidateError'), _('The Cost Must Be Greater Than Zero!'))\n return True", "title": "" }, { "docid": "2e2212377ef5be3f290b7c7bf4d129b1", "score": "0.5493613", "text": "def test_subscribe_already_subscribed(self):\n self.braintree_customer.active = True\n self.braintree_customer.save()\n with self.assertRaises(BraintreeError):\n SubscriptionManager.subscribe(self.braintree_customer)\n\n # Check state not altered\n self.assertTrue(self.braintree_customer.active)\n self.assertFalse(self.braintree_customer.pending_cancel)\n self.assertIsNone(self.braintree_customer.expiry_date)", "title": "" }, { "docid": "0eff043c23370b9890345712648bb3b8", "score": "0.54642403", "text": "def test_renew_attempt_on_active_subscription(self):\n self.braintree_customer.pending_cancel = False\n self.braintree_customer.active = True\n\n with self.assertRaises(BraintreeError):\n SubscriptionManager.renew(self.braintree_customer)", "title": "" }, { "docid": "2878f031423ce10692b6087cd1179530", "score": "0.5447805", "text": "def make_transaction(self):\n if self.pk:\n raise CannotRecreateTransactionOnRecurredCost(\n 'The transaction for this recurred cost has already been created. You cannot create it again.'\n )\n\n amount = self.recurring_cost.get_amount(self.billing_cycle)\n\n # It is quite possible that there will be nothing to bill, in which\n # case we cannot create a transaction with no legs, nor can we create\n # legs with zero values. Therefore we don't create any transaction.\n if not amount:\n return None\n\n self.transaction = Transaction.objects.create(\n description='Created by recurring cost',\n date=self.billing_cycle.date_range.lower\n )\n\n # Use the SplitManager's custom queryset's split() method to get the\n # amount to be billed for each split\n splits = self.recurring_cost.splits.all().split(amount)\n\n # Create the transaction leg for the outbound funds\n # (normally to an expense account)\n self.transaction.legs.add(Leg.objects.create(\n transaction=self.transaction,\n amount=Money(amount, self.recurring_cost.currency),\n account=self.recurring_cost.to_account,\n ))\n\n for split, split_amount in splits:\n # Create the transaction legs for the inbound funds\n # (from housemate accounts)\n if split_amount:\n self.transaction.legs.add(Leg.objects.create(\n transaction=self.transaction,\n amount=Money(split_amount * -1, self.recurring_cost.currency),\n account=split.from_account,\n ))\n\n return self.transaction", "title": "" }, { "docid": "ba47d041e96bc2b902f7e951534eb141", "score": "0.5434562", "text": "def _cost(self, action):\n raise NotImplementedError", "title": "" }, { "docid": "0e3085b8493d861cc1b7d398c776e29a", "score": "0.54062164", "text": "def is_recurring(self):\n return self.__is_recurring", "title": "" }, { "docid": "c624f7dcb4ff52e7b73c58f67c9e6318", "score": "0.5402047", "text": "def test_request_cancel_already_pending_cancel(self):\n self.braintree_customer.subscription_id = \"1234\"\n self.braintree_customer.pending_cancel = True\n self.braintree_customer.save()\n\n self.assertFalse(SubscriptionManager.request_cancel(self.braintree_customer))\n self.assertTrue(self.braintree_customer.pending_cancel)", "title": "" }, { "docid": "33fbd06b632f15c7255f0e18c5897445", "score": "0.53812355", "text": "def is_invited_pending_activation(self):\n if self.registration_method == self.INVITED \\\n and self.is_pending_activation():\n return True\n else:\n return False", "title": "" }, { "docid": "ceb71fff2f7605e3d2c71500830dcbfa", "score": "0.53572863", "text": "def give_raise(self):\r\n self.hourly_pay = 12.00", "title": "" }, { "docid": "a1633c21b982abbaba97996b7c87877d", "score": "0.5347127", "text": "def charge(self, other):\n if self.flag:\n self.credit += other\n return \"{} Tomans has been added to your card credit and now the credit of your card is {} Tomans\".format(\n other, self.credit)\n else:\n return \"Sorry, your card has expired.\"", "title": "" }, { "docid": "8a0bca7e754388a34640f20c91afa68b", "score": "0.534086", "text": "def payment_approval(self, house_cost: (int, float)):\n if self.money_available >= house_cost: # Person has enough available money to make a deal with Realtor\n self.money_available -= house_cost\n print(f'Payment from {self.name} was approved')\n return True\n print(f'{self.name} doesn\\'t have enough money to buy this house')\n return False", "title": "" }, { "docid": "a34fc2ccd08cf094015a4a7a71287468", "score": "0.53131855", "text": "def test_request_cancel_active_subscription(self):\n self.braintree_customer.subscription_id = \"1234\"\n self.braintree_customer.pending_cancel = False\n self.braintree_customer.save()\n self.assertTrue(SubscriptionManager.request_cancel(self.braintree_customer))\n self.assertTrue(self.braintree_customer.pending_cancel)", "title": "" }, { "docid": "3e8c9b966e3614e15ccd1ab4a2563ef9", "score": "0.5306632", "text": "def test_overlap(self):\r\n t = Expense(name = \"fake lunch\",\r\n amount = 1.,\r\n on = (WeeklyRecurring(FR,\r\n fromdt = self.fromdt,\r\n todt = self.todt),\r\n DailyRecurring(fromdt = self.fromdt, \r\n todt = self.todt)))\r\n\r\n self.m.addTransaction(t)\r\n self.assertEqual(self.m.totalSaved(self.fromdt, self.todt), -365.)", "title": "" }, { "docid": "fa3965aee40434d2d49b4ec667e75657", "score": "0.5294787", "text": "def reports_editable(self):\n end_plus_time = self.datetime_end + datetime.timedelta(days=CCR_DELTA)\n return timezone.now() < end_plus_time", "title": "" }, { "docid": "bc118ce9f8f76044d0d8341c976cc671", "score": "0.5265665", "text": "def make_payment(self, cost):\n self.process_coins()\n if self.money_received >= cost:\n change = round(self.money_received - cost, 2)\n print(f\"Here is {self.CURRENCY}{change} in change.\")\n self.profit += cost\n self.money_received = 0\n return True\n else:\n print(\"Sorry that's not enough money. Money refunded.\")\n self.money_received = 0\n return False", "title": "" }, { "docid": "58a11d04b563ecf30411d522d7a9987d", "score": "0.5261785", "text": "def test_create_sugar_cpn_exp_task(self):\n offset_days = 2\n coupon = Coupon.objects.get(id=416)\n coupon.expiration_date = (datetime.date.today() + \n datetime.timedelta(days=offset_days))\n coupon.save()\n self.assertEqual(create_sugar_coupon_expire_task(MockSugar(), \n offset_days=offset_days), None)", "title": "" }, { "docid": "27b0305fb2f568ecd0568b90978b95e3", "score": "0.52602494", "text": "async def resign(self, ctx):\n currency = await bank.get_currency_name(ctx.guild)\n await self.config.user(ctx.author).gameRole.set(\"User\")\n await ctx.send(\n f\"{ctx.author} has spent 10,000 {currency}- to resign from their current job.\"\n )", "title": "" }, { "docid": "8491f0b61b51e158a8d7c69100f4d319", "score": "0.5213568", "text": "def test_subscription_transaction_declined_suspend(self):\n\n # We'll create a plan that starts here\n start_date = dt.date(2019, 1, 1)\n\n # And the trial date ends here too\n trial_end_date = dt.date(2019, 1, 1)\n\n # The customer will use some metered features here\n metered_usage_on = dt.date(2019, 1, 10)\n\n # Docs will be generated to bill here.\n prev_billing_date = dt.date(2019, 1, 3)\n\n # So, the customer grace period ends here.\n # First billing interval: dt.date(2019, 2, 1)\n # \n billing_grace_exp = dt.date(2019, 2, 3)\n\n # The next billing check should discover that the subscription\n # is unpaid.\n # Billing due date is: dt.date(2019, 2, 6)\n # With the grace period: dt.date(2019, 2, 9)\n # \n billing_post_grace_check = dt.date(2019, 2, 10)\n\n # Create a customer\n #\n customer = CustomerFactory.create(sales_tax_percent=Decimal('0.00'),\n payment_due_days=3)\n PaymentMethodFactory.create(\n payment_processor=triggered_processor, customer=customer,\n canceled=False,\n verified=True,\n )\n\n # Create a metered feature\n #\n mf_price = Decimal('2.5')\n metered_feature = MeteredFeatureFactory(\n included_units_during_trial=Decimal('0.00'),\n price_per_unit=mf_price)\n currency = 'USD'\n\n # Crate a plan with metered features. Generate the invoice after\n # the 5 day trial period, the plan will be billed every 30 days.\n # \n generate_after = 5\n plan = PlanFactory.create(interval=Plan.INTERVALS.DAY,\n interval_count=30,\n generate_after=generate_after,\n enabled=True,\n amount=Decimal('20.00'),\n trial_period_days=1,\n metered_features=[metered_feature],\n currency=currency)\n\n # Subscribe the customer\n #\n subscription = SubscriptionFactory.create(\n plan=plan, start_date=start_date, customer=customer)\n subscription.activate()\n subscription.save()\n\n # Log some metered usage\n consumed_1 = Decimal('5.00')\n consumed_2 = Decimal('5.00')\n mf_log = MeteredFeatureUnitsLogFactory.create(\n subscription=subscription,\n metered_feature=metered_feature,\n start_date=metered_usage_on,\n end_date=subscription.trial_end,\n consumed_units=consumed_1)\n\n # Generate the docs\n call_command('generate_docs',\n billing_date=prev_billing_date,\n stdout=self.output)\n\n proforma = Proforma.objects.first()\n\n assert proforma.proforma_entries.count() != 0\n assert Subscription.objects.all().count() == 1\n assert Invoice.objects.all().count() == 0\n assert Proforma.objects.all()[0].total > Decimal('0.00')\n\n # Consume more units\n mf_log.consumed_units += consumed_2\n mf_log.save()\n\n call_command('generate_docs',\n billing_date=billing_grace_exp,\n stdout=self.output)\n\n assert Proforma.objects.all().count() != 0\n assert Invoice.objects.all().count() == 0\n\n for pf in Proforma.objects.all():\n # # Issue the proforma to generate transactions\n # proforma = Proforma.objects.all()[1]\n pf.issue()\n pf.save()\n\n self.assertEqual(pf.state, Proforma.STATES.ISSUED)\n # Fail the transaction\n for tx in pf.transactions:\n # tx = proforma.transactions[0]\n tx.fail()\n tx.save()\n self.assertEqual(tx.state, Transaction.States.Failed)\n\n assert Transaction.objects.all().count() != 0\n\n call_command('check_subscriptions',\n billing_date=billing_post_grace_check,\n stdout=self.output)\n\n subscr = Subscription.objects.first()\n\n # Scan for subscriptions with unpaid documents\n logging.debug(\"subscr %s\" % subscr)\n self.assertEqual(subscr.state, Subscription.STATES.CANCELED)", "title": "" }, { "docid": "5417e376b07167245209d7488eceae5c", "score": "0.5208941", "text": "def test_get_active_coupon(self):\n coupon = COUPON_FACTORY.create_coupon()\n slot = SLOT_FACTORY.create_slot(coupon=coupon)\n future_date = datetime.date.today() + datetime.timedelta(weeks=1)\n coupon.expiration_date = future_date\n coupon.save()\n self.assertEqual(slot.get_active_coupon(), coupon)", "title": "" }, { "docid": "257ba26e9929795ad5f5632d4cddacf1", "score": "0.52071846", "text": "def test_create_sugar_cc_exp_task(self):\n current_date = datetime.datetime.now()\n order = Order.objects.create(billing_record_id=114)\n credit_card = CreditCard.objects.get(id=500)\n credit_card.exp_month = int(current_date.strftime(\"%m\"))\n credit_card.exp_year = int(current_date.strftime(\"%y\"))\n credit_card.encrypt_cc('4111111111111111')\n credit_card.clean()\n credit_card.save()\n Payment.objects.create(order=order, credit_card=credit_card,\n amount='50.00', method='C', status='p')\n self.assertEqual(create_sugar_cc_expire_task(MockSugar(), \n test_mode=True), None)", "title": "" }, { "docid": "eb78960a18fbea5f420521a73cc9d91e", "score": "0.5201274", "text": "def save(self, *args, **kwargs):\n if not self.pk:\n self.start_time_rent = datetime.date.today()\n self.end_time_rent = self.start_time_rent + datetime.timedelta(days=7)\n self.reservation.isrented = True\n self.reservation.save()\n return super(Rental, self).save(*args, **kwargs)", "title": "" }, { "docid": "f499e3c7140c536233b59aa5018b00e0", "score": "0.5198995", "text": "def life_insurance_to_recive_total(self):\n pass", "title": "" }, { "docid": "93e3ba91aae2b3005b29c825e9df6617", "score": "0.51950544", "text": "def pay_costs(self):\r\n cost = self.cost\r\n if cost:\r\n self.owner.player.char_ob.pay_money(cost)\r\n self.owner.player.msg(\"You pay %s coins for the event.\" % cost)", "title": "" }, { "docid": "4a554eae4772018c64970255353249ec", "score": "0.5190392", "text": "def accepting(self):\n quote = self.get_quotes('labor')[0]\n quantity = max(0, quote.price - 14)\n self.create('labor', quantity)\n self.accept_quote_partial(quote, quantity)", "title": "" }, { "docid": "da7b38cc190f388ab6d0549317e33158", "score": "0.51902777", "text": "def recap(self, request, pk=None):\n retreat = self.get_object()\n # This is a hard-coded limitation to allow anonymous users to call\n # the function.\n time_limit = retreat.end_time - timedelta(days=1)\n if timezone.now() < time_limit:\n response_data = {\n 'detail': \"Retreat ends in more than 1 day.\"\n }\n return Response(response_data, status=status.HTTP_200_OK)\n\n # Notify a user for every reserved seat\n emails = []\n for reservation in retreat.reservations.filter(\n is_active=True,\n post_event_send=False):\n send_post_retreat_email(reservation.user, retreat)\n reservation.post_event_send = True\n reservation.save()\n emails.append(reservation.user.email)\n\n response_data = {\n 'stop': True,\n 'emails': emails\n }\n return Response(response_data, status=status.HTTP_200_OK)", "title": "" }, { "docid": "40e1634797064f5c392aeeff54879a26", "score": "0.5186644", "text": "def check_costs(self):\r\n if self.cost > self.owner.player.char_ob.currency:\r\n self.add_error(\r\n \"celebration_tier\",\r\n \"You cannot afford to pay the cost of %s.\" % self.cost,\r\n )", "title": "" }, { "docid": "d10a00c4c4117a81dd34a039b5016bec", "score": "0.51843166", "text": "def test_no_credit_change_for_same_subscription(self):\n credits = 20\n\n current_plan = Subscription.get_plan_by_id('pro')\n new_plan = Subscription.get_plan_by_id('pro')\n\n may_29_2015 = datetime.datetime(2015, 5, 29, 0, 0, 0)\n may_29_2015 = pytz.utc.localize(may_29_2015)\n\n credits = add_subscription_credits(credits, current_plan, new_plan,\n may_29_2015)\n\n assert credits == 20", "title": "" }, { "docid": "f6cfe667dbc270d1f0777d11ac37720c", "score": "0.51762897", "text": "def test_remove_recurring_schedule(self):\n pass", "title": "" }, { "docid": "ffd0405d5d1dcce4d6e9af01483ebde3", "score": "0.5167032", "text": "def confirmed(self):", "title": "" }, { "docid": "eb1bb3838c74ae2e2fc33261a8bcad5d", "score": "0.51638985", "text": "def test_retire_rate_plan(self):\n pass", "title": "" }, { "docid": "acd5fc643f60a16857bf0dbbe97c12f5", "score": "0.5128787", "text": "def add_to_excess(self) -> None:\n if self.msg.value <= 0:\n revert(\"No amount added to excess\")\n self._treasury_balance.set(self.icx.get_balance(self.address))\n self.FundReceived(self.msg.sender, self.msg.value, f\"{self.msg.value} added to excess\")", "title": "" }, { "docid": "e170852adf1c091d578eada0184631b9", "score": "0.51178837", "text": "async def admin_credit(self, ctx, target: discord.Member, sum: int = 100):\n if is_registered(target.id):\n \n inventories = get_file(\"inventories\")\n inventories[str(target.id)][\"balance\"] += sum\n update_file(\"inventories\", inventories)\n\n embed = discord.Embed(color=admin_color)\n embed.set_author(name=\"🛠️ Admin\")\n embed.add_field(name=\"💰 Credit\",\n value=f\"{ctx.author.mention}, {target.mention} a été crédité de `{sum}` PO (pièces d'or)\")\n embed = set_footer(embed, ctx)\n await ctx.send(embed=embed)", "title": "" }, { "docid": "41e253144f1d44f70f72725a3cdfd381", "score": "0.5104723", "text": "def test_plan_autorenew_at_expire(self):\n up = baker.make(\"UserPlan\", expire=date(2020, 1, 1))\n self.assertEqual(up.plan_autorenew_at(), date(2020, 1, 1))", "title": "" }, { "docid": "6ba629d8534d92877f8ce279d15aafa1", "score": "0.5101618", "text": "def trigger_expiration_notices(at_time=None, nb_days=15, dry_run=False):\n\n def _handle_organization_notices(organization):\n if organization.processor_card_key:\n card = organization.retrieve_card()\n try:\n exp_month, exp_year = card['exp_date'].split('/')\n exp_date = datetime(year=int(exp_year),\n month=int(exp_month), day=1, tzinfo=at_time.tzinfo)\n if lower >= exp_date:\n LOGGER.info(\"payment method expires soon for %s\",\n organization)\n if not dry_run:\n signals.card_expires_soon.send(\n sender=__name__, organization=organization,\n nb_days=nb_days)\n except (KeyError, ValueError):\n # exp info is missing or the format is incorrect\n pass\n else:\n LOGGER.info(\"%s doesn't have a payment method attached\",\n organization)\n if not dry_run:\n signals.payment_method_absent.send(sender=__name__,\n organization=organization)\n\n at_time = datetime_or_now(at_time)\n lower = at_time + relativedelta(days=nb_days)\n upper = at_time + relativedelta(days=nb_days + 1)\n LOGGER.info(\n \"trigger notifications for subscription expiring within [%s,%s[ ...\",\n lower, upper)\n prev_organization = None\n subscription = None\n for subscription in Subscription.objects.valid_for(ends_at__gte=lower,\n ends_at__lt=upper).order_by('organization'):\n org = subscription.organization\n plan = subscription.plan\n\n try:\n if subscription.auto_renew:\n if plan.renewal_type == plan.AUTO_RENEW:\n if org.id != prev_organization:\n _handle_organization_notices(org)\n\n prev_organization = org.id\n else:\n if plan.renewal_type == plan.ONE_TIME:\n LOGGER.info(\"trigger upgrade soon for %s\", subscription)\n if not dry_run:\n signals.subscription_upgrade.send(sender=__name__,\n subscription=subscription, nb_days=nb_days)\n\n elif plan.renewal_type == plan.REPEAT:\n LOGGER.info(\"trigger expires soon for %s\", subscription)\n if not dry_run:\n signals.expires_soon.send(sender=__name__,\n subscription=subscription, nb_days=nb_days)\n\n except Exception as err: #pylint:disable=broad-except\n # We use `Exception` because the email server might be\n # unavailable but ConnectionRefusedError is not a subclass\n # of RuntimeError.\n LOGGER.exception(\"error: %s\", err)\n\n # flushing the last organization\n if subscription and subscription.organization.id != prev_organization:\n if subscription.auto_renew:\n if subscription.plan.renewal_type == subscription.plan.AUTO_RENEW:\n _handle_organization_notices(subscription.organization)", "title": "" }, { "docid": "0c66ea0657cc0bf9e28af5aaeb6647c4", "score": "0.5100977", "text": "def redeem(self, instance, customer, save=True):\n start = timezone.now().date()\n end = start + relativedelta(months=self.duration)\n discount = Discount(instance=instance,\n coupon=self,\n start=start,\n end=end,\n customer=customer)\n discount.full_clean()\n if save:\n discount.save()\n return discount", "title": "" }, { "docid": "be729754fa4102aa1c8b61477d0cdb82", "score": "0.50970876", "text": "def customer_wants_condiments(self):\n return True", "title": "" }, { "docid": "58052ca7885a95388e8a943ad79fc9ee", "score": "0.50891936", "text": "def test_dont_cancel_for_events_with_no_cost(self, mock_tz):\n mock_tz.now.return_value = datetime(\n 2015, 2, 11, 10, tzinfo=dt_timezone.utc\n )\n self.ticketed_event.ticket_cost = 0\n self.ticketed_event.save()\n self.assertFalse(self.unpaid.cancelled)\n self.assertFalse(self.paid.cancelled)\n\n management.call_command('cancel_unpaid_ticket_bookings')\n # emails are sent to user per cancelled booking and studio once for all\n # cancelled bookings\n self.unpaid.refresh_from_db()\n self.paid.refresh_from_db()\n self.assertEqual(len(mail.outbox), 0)\n self.assertFalse(self.unpaid.cancelled)\n self.assertFalse(self.paid.cancelled)", "title": "" }, { "docid": "c02baa7b4f7d3c8e78826cc17010a89a", "score": "0.5078882", "text": "def register_withdraw(self, withdraw_intent): \n if withdraw_intent > 0:\n self.teo.register_withdraw(self, withdraw_intent)", "title": "" }, { "docid": "87a75db34bf124af171ddcacdf0705d9", "score": "0.5062819", "text": "def reckon(self):", "title": "" }, { "docid": "3ef1115a68c9a4958c538af0517b7e7f", "score": "0.5059867", "text": "def reimport_eligible(self) -> bool:\n return pulumi.get(self, \"reimport_eligible\")", "title": "" }, { "docid": "cabd4c0d5435e6616abe559534230b62", "score": "0.5054001", "text": "def test_add_taxation_strategy_to_rate_plan(self):\n pass", "title": "" }, { "docid": "e3d7067817879f0fa2eb4466e3f35086", "score": "0.50444573", "text": "def can_act(self) -> bool:\n return self.cooldown < 1", "title": "" }, { "docid": "e3d7067817879f0fa2eb4466e3f35086", "score": "0.50444573", "text": "def can_act(self) -> bool:\n return self.cooldown < 1", "title": "" }, { "docid": "57a314d8c47829fe7aea4b854dab54cb", "score": "0.5037591", "text": "def restock(self):\n self.money = 9999", "title": "" }, { "docid": "5b99cdf09d4409ff6cacc373e96ffd86", "score": "0.5028068", "text": "def may_send_escalation(self, source_type: str, escalation_reminder_cadence: timedelta) -> bool:\n with self.session.begin() as session:\n last_escalated = (\n session.query(EventRecord.escalated_at)\n .filter(EventRecord.source_type == source_type)\n .order_by(EventRecord.escalated_at.desc())\n .limit(1)\n .one_or_none()\n )\n\n if not last_escalated[0]:\n return True\n\n return last_escalated[0] <= datetime.utcnow() - escalation_reminder_cadence", "title": "" }, { "docid": "846e2e0abaea6b50e2948f1d6060a39c", "score": "0.50160515", "text": "def filter(self, event, *args, **kwargs):\n return event[\"expiration_datetime\"] <= datetime.now()", "title": "" }, { "docid": "201aa010ced8cf71f548ecf6d393575b", "score": "0.5011571", "text": "def test_dont_cancel_for_events_with_no_cost(self, mock_tz):\n mock_tz.now.return_value = datetime(\n 2015, 2, 10, 10, tzinfo=dt_timezone.utc\n )\n self.event.cost = 0\n self.event.save()\n self.assertEqual(\n self.unpaid.status, 'OPEN', self.unpaid.status\n )\n self.assertEqual(\n self.paid.status, 'OPEN', self.paid.status\n )\n management.call_command('cancel_unpaid_bookings')\n # emails are sent to user per cancelled booking and studio once for all\n # cancelled bookings\n unpaid_booking = Booking.objects.get(id=self.unpaid.id)\n paid_booking = Booking.objects.get(id=self.paid.id)\n self.assertEqual(len(mail.outbox), 0)\n self.assertEqual(\n unpaid_booking.status, 'OPEN', unpaid_booking.status\n )\n self.assertEqual(\n paid_booking.status, 'OPEN', paid_booking.status\n )\n\n # auto_cancelled set to True only on cancelled bookings\n self.assertFalse(unpaid_booking.auto_cancelled)\n self.assertFalse(paid_booking.auto_cancelled)", "title": "" }, { "docid": "820d1f3307e066ab24821548eb04bf4c", "score": "0.50115657", "text": "def isOnSaleSoon(ticket):\n return ticket.start_time > timezone.now()", "title": "" }, { "docid": "99f1843e58d485cff565bd73b3ba806c", "score": "0.500794", "text": "def _claim_iscore(self) -> None:\n iscore_details_dict = self._system.queryIScore(self.address)\n if iscore_details_dict['estimatedICX'] != 0:\n amount = iscore_details_dict[\"estimatedICX\"]\n self._system.claimIScore()\n self._daily_reward.set(amount)\n self._rate.set(self.getRate())\n self._total_lifetime_reward.set(self.getLifetimeReward() + amount)\n self._distributing.set(True)", "title": "" }, { "docid": "800b7a21c89f88bfe6327f7bb4361f22", "score": "0.5006686", "text": "def should_pay_attention(self):\n return random.randint(1,100) > self.wander", "title": "" }, { "docid": "d3c1c2ca303c52e34683b990d4edc7da", "score": "0.50048196", "text": "def test_authorize_pending_payment(self):\n pass", "title": "" }, { "docid": "e9eeb30737a0056e7591f58ee9369f45", "score": "0.5003403", "text": "def acknowledge_visitor(visitor):\n visitor.acknowledged = True\n visitor.acknowledged_time = datetime.now()\n visitor.save()", "title": "" }, { "docid": "a4fcf332d5161239a6433b289ae158ed", "score": "0.50032485", "text": "def resent(self):\n return 'Resent-Date' in self", "title": "" }, { "docid": "42ef7fe9adf9b416d5c880d173273e84", "score": "0.49881572", "text": "def pre_approve(self, cr, uid, ids, context={}):\n \tfor voucher in self.browse(cr, uid, ids, context=context):\n \t if not voucher.department_id.analytic_account_id:\n \t raise osv.except_osv(_('Configration Check!'), _(\"Please add cost center for your department!\"))\n \t periods = self.pool.get('account.period').search(cr, uid, [('date_start','<=',voucher.date),('date_stop','>=',voucher.date),('company_id','=',voucher.company_id.id)], context=context)\n\n\n res=0.0\n if voucher.purpose:\n if not voucher.purpose.account_id: raise osv.except_osv(_('Warning!'), _('Please configure account for this purpose!')) \n voucher_line = {\n \t\t'voucher_id': voucher.id,\n \t\t'partner_id': voucher.partner_id.id,\n \t\t'untax_amount': voucher.amount,\n \t\t'amount': voucher.amount,\n 'name': voucher.narration,\n \t\t'type': 'dr',\n \t\t'account_analytic_id': voucher.department_id.analytic_account_id and voucher.department_id.analytic_account_id.id,\n 'account_id': voucher.purpose.account_id.id,\n \t }\n new_amount = res and res or voucher.amount \n voucher_line.update({'amount':new_amount,'untax_amount':new_amount})\n \t if voucher.line_ids :\n for line in voucher.line_ids:\n \t\t self.pool.get('account.voucher.line').write(cr, uid, line.id, {\n \t\t'voucher_id': voucher.id,\n \t\t'partner_id': voucher.partner_id.id,\n \t\t'untax_amount': res or line.amount,\n \t\t'amount': line.amount,\n 'name': voucher.narration,\n \t\t'type': 'dr',\n \t\t'account_analytic_id': line.account_analytic_id and line.account_analytic_id.id or voucher.department_id.analytic_account_id.id,\n 'account_id': voucher.purpose.account_id.id or line.account_id.id,\n \t }, context=context)\n \t else:\n\n \t\t new_voucher_line = self.pool.get('account.voucher.line').create(cr, uid, voucher_line, context=context)\n context.update({'purchase':True})\n self.create_budget_confirmation(cr, uid, [voucher.id], context)\n \tself.write(cr, uid, ids,{'state': 'preapprove','type':'purchase','ratification':True}, context=context)\n #cxt = context.copy()\n #cxt.update({'type':'ratification'})\n if not super(account_voucher, self).create_budget_confirmation(cr, uid, ids, context=context):\n self.write(cr, uid, ids, {'state': 'approved'}, context=context)\n\n \t'''self.write(cr, uid, ids, {'state': 'preapprove'})\n if not super(account_voucher, self).create_budget_confirmation(cr, uid, ids, context=context):\n self.write(cr, uid, ids, {'state': 'approve','type':'purchase','ratification':True}, context=context)'''\n return True", "title": "" }, { "docid": "9bcdcd6a69d116b1f4b370a2e026f1a4", "score": "0.49817756", "text": "def test_update_rate_plan(self):\n pass", "title": "" }, { "docid": "4acf948659222da3835bbf26fc3a9017", "score": "0.49740785", "text": "def test_carryover(self):\n carryover = Money(100)\n available = {Decimal(0.5): carryover}\n self.forecast(available)\n self.assertEqual(\n self.forecast.carryover,\n carryover)", "title": "" }, { "docid": "3c2504305e30cfc1efa4189640cffd33", "score": "0.4970885", "text": "def recurring(self):\n return self.filter(total_billing_cycles__isnull=True)", "title": "" }, { "docid": "29d0e95adc90beb142a910e5cbf8e54c", "score": "0.4959831", "text": "def activate_card(**kwargs):\n for card in ClientClubCard.objects.filter(\n date_begin__isnull=True, is_paid_activate=False):\n # period for activate\n days = card.club_card.period_activation\n if card.date_start + timedelta(days - 1) < date.today():\n card.activate()", "title": "" }, { "docid": "12d51add250d35f7917a5213649c725c", "score": "0.49592566", "text": "def aspiration_criteria_satisfied(self,index):\n\t\tnewEnergy = self.currentEnergy + self._diffEi[index]#self.count_energy(state)\n\t\t# newTax = self._taxes[index]#self.count_tax(state=state)\n\t\ttmpState = copy(self.currentState)\n\t\ttmpState[index] = 1 - tmpState[index]\n\t\tnewTax = self.count_tax(state=tmpState)\n\t\toutput(\"Checking aspiration criteria: oldEnergy={}, current tax = {}; new energy = {}, new tax = {}\"\\\n\t\t\t\t\t \t\t\t\t.format(self.currentEnergy,self.currentTax,newEnergy,newTax),isDebug=True,tabsNum=1)\n\t\tif newEnergy < self.localMinimumEnergy or newTax < self.localMinimumTax:\n\t\t\treturn True\n\t\treturn False", "title": "" }, { "docid": "b5cc8f4771dd58297a6e3b452182daef", "score": "0.4951388", "text": "def agree_to_tos(self, regr):\n return self.update_registration(\n regr.update(\n body=regr.body.update(\n agreement=regr.terms_of_service)))", "title": "" }, { "docid": "75ab2bff6b751ae042892e7659f71902", "score": "0.49507055", "text": "def test_deny_pending_payment(self):\n pass", "title": "" }, { "docid": "3edd57c4e43f7b72baf285f6459805fb", "score": "0.49433905", "text": "def test_4(self):\n c1 = Store.Customer(\"harold\", \"qcf\", False)\n self.assertFalse(c1.is_premium_member(), \"IS premium member\")", "title": "" }, { "docid": "e9b6b7553ae5fcebc14a9b407f801a55", "score": "0.4942659", "text": "def test_rate_cost_type_valid(self):\n self.ocp_data[\"rates\"][0][\"tiered_rates\"] = [\n {\n \"unit\": \"USD\",\n \"value\": 0.22,\n \"usage\": {\"usage_start\": None, \"usage_end\": None},\n \"cost_type\": \"Infrastructure\",\n }\n ]\n\n with tenant_context(self.tenant):\n serializer = CostModelSerializer(data=self.ocp_data, context=self.request_context)\n if serializer.is_valid(raise_exception=True):\n serializer.save()\n\n self.ocp_data[\"rates\"][0][\"tiered_rates\"] = [\n {\n \"unit\": \"USD\",\n \"value\": 0.22,\n \"usage\": {\"usage_start\": None, \"usage_end\": None},\n \"cost_type\": \"Supplementary\",\n }\n ]\n\n with tenant_context(self.tenant):\n serializer = CostModelSerializer(data=self.ocp_data, context=self.request_context)\n if serializer.is_valid(raise_exception=True):\n serializer.save()", "title": "" }, { "docid": "89995bce1c766439b816fae14b3a212a", "score": "0.49375176", "text": "def test_cancel_on_braintree_success(self):\n self.braintree_customer.pending_cancel = True\n self.braintree_customer.active = True\n self.braintree_customer.subscription_id = self.cancellable_id\n\n SubscriptionManager.cancel_on_braintree(self.braintree_customer)\n\n self.assertEqual(\"\",self.braintree_customer.subscription_id)\n self.assertFalse(self.braintree_customer.pending_cancel)\n self.assertFalse(self.braintree_customer.active)", "title": "" }, { "docid": "ceb541e8c03f9188a6ad804fc06af67f", "score": "0.49262586", "text": "def test_create_new_order_VIES_fault(self):\n rup = baker.make(\n \"RecurringUserPlan\",\n user_plan__user__billinginfo__country=\"CZ\",\n user_plan__user__billinginfo__tax_number=\"CZ0123\",\n amount=10,\n tax=11,\n )\n with no_connection():\n order = rup.create_renew_order()\n self.assertEqual(order.tax, 11)", "title": "" }, { "docid": "b1d65a3322432bdf78bb8926583c4261", "score": "0.49252683", "text": "def test_charge_correct_for_regular_after_close_1_day(self):\n rental = create_test_rental(\n book=self.book3,\n customer=self.user1,\n date_borrowed=\"2019-05-24 00:00:00.400952+00:00\",\n )\n close_rental_url = reverse(\"close_rental\", kwargs={\"pk\": rental.pk})\n\n data = {\"date_returned\": \"2019-05-25 13:46:57.249145+03:00\"}\n response = self.client.put(close_rental_url, data=data, format=\"json\")\n\n self.assertEqual(response.data[\"amount_charged\"], \"2.00\")\n self.assertEqual(response.data[\"rental_status\"], \"Closed\")\n self.assertEqual(response.data[\"currency\"], CURRENCY)", "title": "" }, { "docid": "01d505b0decf38a647955a28599ddb50", "score": "0.4904093", "text": "def modify_expiration(self, new_expiration):\n if self.is_cancellable:\n if self.expiration!=new_expiration:\n log.info(\"bo#%s: modify expiration of pending order\" % self.ticket)\n dt = get_datetime()\n new_expiration = dt_td_to_dt(new_expiration, dt)\n not_implemented_error(\"Can't modify modify expiration of pending order\")\n else:\n return", "title": "" }, { "docid": "8dbfc818a7ffbd6d8f09400f973bae44", "score": "0.49022606", "text": "def contribute(k):\n global amount\n global _alarm\n amount = amount + k\n # remove the alarm\n if amount >= initial_amount * 0.3:\n _alarm = False", "title": "" }, { "docid": "d2a8259def53576441e2c3cbf731cff5", "score": "0.48961106", "text": "def test_3(self):\n c1 = Store.Customer(\"harold\", \"qcf\", True)\n self.assertTrue(c1.is_premium_member(), \"not premium member\")", "title": "" }, { "docid": "ed609928d752e18a160ececd58722048", "score": "0.48943877", "text": "def done(self,cr,uid,ids,context=None):\n for record in self.browse(cr, uid, ids, context=context):\n search_result = self.pool.get('payment.enrich').browse(cr, uid,record.enrich_id.id)\n if record.cost < 1 :\n raise osv.except_osv(_('Invalid Action Error'), _('The Entered Cost Is Wrong!'))\n if record.cost > search_result.residual_amount :\n raise osv.except_osv(_('Invalid Action Error'), _('Your Residual Balance Is Less Than Your Cost!'))\n if context:\n if 'owner' in context and 'model_id' in context:\n owner = context['owner']\n owner = int(owner)\n model_id = context['model_id']\n if str(model_id) == 'fleet.vehicle.log.contract':\n model_obj = self.pool.get(model_id)\n model = model_obj.browse(cr, uid, owner, context=context)\n model.write({'state':'open'})\n return self.write(cr, uid, ids, {'state':'done'},context=context)", "title": "" }, { "docid": "69a162a79de7a75785f9e6b2d9f07588", "score": "0.48916712", "text": "def use(self):\n if self.flag:\n if self.credit < self.price_of_trip:\n return \"Your credit is not enough, please increase your credit\"\n else:\n self.credit -= self.price_of_trip\n return \"Done\"\n else:\n return \"Sorry, your card has expired.\"", "title": "" }, { "docid": "8f42aed0cc21bdb3278ce55b720546c9", "score": "0.48887736", "text": "def save(self, *args, **kwargs): #pylint: disable=W0221\n created = not self.uuid\n if created:\n existing_balance = self.account.calculated_balance\n else:\n all_other_active_transactions = [x for x in self.account.transactions.all() if x != self and x.active]\n existing_balance = sum(x.amount for x in all_other_active_transactions)\n\n if not self.active:\n pass\n elif (existing_balance + self.amount) < 0:\n raise AccountBalanceError(\n 'Balance of account {} would be brought below 0'.format(self.account)\n )\n\n instance = super().save(*args, **kwargs)\n self.account.update_balance()\n return instance", "title": "" }, { "docid": "b363d7bccb6a832fb10e43ce7a25224e", "score": "0.48865578", "text": "def test_check_cost():", "title": "" }, { "docid": "5414a8c5839816743bf0cbd8ac9e56b2", "score": "0.4879177", "text": "def test_subscribe_no_previous_subscription(self):\n self.braintree_customer.payment_method_token = 'valid_payment_token'\n SubscriptionManager.subscribe(self.braintree_customer)\n\n # fetch customer again\n self.braintree_customer = BraintreeUser.objects.get(user=self.user)\n self.assertTrue(self.braintree_customer.active)\n self.assertFalse(self.braintree_customer.pending_cancel)\n self.assertEqual(self.braintree_customer.expiry_date, timezone.make_aware(datetime.datetime(2017,7,25,0,0),pytz.utc))", "title": "" }, { "docid": "8ca9f8a29d701d9d48f8e45102180e20", "score": "0.48787516", "text": "def use(self):\n if self.credit < self.price_of_trip:\n print(\"Your credit is not enough, please increase your credit\")\n else:\n self.credit -= self.price_of_trip\n print(\"Done\")", "title": "" }, { "docid": "9cd45ad080c1c56c8cbfc9207b5b5102", "score": "0.48765975", "text": "def user_added_credit(self):\n return (self.user.Credit > 0)", "title": "" }, { "docid": "062c77ab3749342bb1564bcd2adac432", "score": "0.48746642", "text": "def can_accept_credit(self, value):\n return value >= 0", "title": "" }, { "docid": "8122483a63fd15da82f48566f50c3229", "score": "0.48686194", "text": "def can_reschedule(self) -> bool:\n return pulumi.get(self, \"can_reschedule\")", "title": "" }, { "docid": "16f6380e73fd49f8b86458e7fc9e0b33", "score": "0.48665464", "text": "def evaluate_cost(self, msg):\n raise NotImplementedError()", "title": "" }, { "docid": "6f89aace960b6791c1f3e1ff7d144a06", "score": "0.4865596", "text": "def event_charge(order_type):\r\n if order_type == \"Promo event\":\r\n return 400\r\n\r\n else:\r\n return 0\r\n # promo event adds $400\r", "title": "" }, { "docid": "47d5396dbeb716ea766bf4f1d486f78b", "score": "0.48641136", "text": "def accept_venue_transferring(venue_transferring_id, notice_id):\n result = False\n try:\n venue_transferring = VenueAccountTransferring.objects.get(pk=venue_transferring_id)\n except VenueAccountTransferring.DoesNotExist:\n venue_transferring = None\n\n if venue_transferring and venue_transferring.target:\n venue_account = venue_transferring.venue_account\n from_user = venue_transferring.venue_account.account.user\n target = venue_transferring.target.user\n\n venue_account.account = venue_transferring.target\n venue_account.save()\n\n venue_account.venue.user = venue_transferring.target.user\n venue_account.venue.save()\n\n Event.events.filter(venue_account_owner_id=venue_account.id).update(owner=venue_transferring.target.user.id)\n\n venue_transferring.delete()\n try:\n notice = Notice.objects.get(pk=notice_id)\n except Notice.DoesNotExist:\n notice = None\n\n if notice:\n notice_data = json.loads(notice.log)\n notice_data['state'] = 'Accepted'\n notice.log = json.dumps(notice_data)\n notice.read = True\n notice.save()\n\n target_name = target.username\n target_link = reverse('userena_profile_detail', kwargs={'username': target.username})\n notice_service.create_notice('venue_transferring_accepting', from_user, {\n 'subject': 'Cityfusion: transferring of your venue has been accepted.',\n 'user': from_user,\n 'venue_account': venue_account,\n 'target_name': target_name,\n 'target_link': target_link,\n }, {\n 'venue_name': venue_account.venue.name,\n 'venue_link': reverse('public_venue_account', kwargs={'slug': venue_account.slug}),\n 'target_name': target_name,\n 'target_link': target_link,\n 'date': datetime.datetime.now().strftime('%A, %b. %d, %I:%M %p'),\n })\n\n result = True\n return result", "title": "" }, { "docid": "49498e9ce2c5bce44185577c91b23796", "score": "0.48574036", "text": "def test_calendar_query_expanded_recurring(self):\n raise SkipTest(\"test unimplemented\")", "title": "" }, { "docid": "c6324c28d862c00ffdc2ae0dc5e70e6b", "score": "0.48518384", "text": "def canAct(self) -> bool:\n return self.cooldown < 1", "title": "" }, { "docid": "f45779ffbce56bfee3e2be30e2f8f871", "score": "0.48489207", "text": "def can_act(self, **kwargs):\n return True", "title": "" }, { "docid": "93a9c89b27742aba82795030c9f364b2", "score": "0.48466724", "text": "def allowed(self, user, amount):\n return True", "title": "" }, { "docid": "03ed1b16924abba4e92036f3818dbe89", "score": "0.48446918", "text": "def accept_transfer(self):\n self.is_accepted = True\n self.date_time_accepted = models.DateTimeField(auto_now=True)", "title": "" }, { "docid": "1840fd1a32ea102c2e1a3f9b81b39e3c", "score": "0.4842639", "text": "def reservation_mark_entrance(user: User, reservation: Reservation):\n owns_restaurant = reservation.restaurant.operator == user\n if owns_restaurant and reservation.status is ReservationState.ACCEPTED and reservation.reservation_time <= datetime.datetime.now():\n #Might want to add user notification\n reservation.entrance_time = datetime.datetime.now()\n reservation.status = ReservationState.SEATED\n db.session.commit()\n return True\n\n return False", "title": "" }, { "docid": "feb54b24ff87cfae41c9d6f70d27ac2f", "score": "0.48392415", "text": "def is_pending_approval(self):\n if self.registration_method == self.REQUESTED \\\n and self.is_pending_activation():\n return True\n else:\n return False", "title": "" }, { "docid": "19cfca091b1463122f1d72df8cd26df0", "score": "0.48392242", "text": "def save(self, *args, **kwargs):\n self.expire_date = self.sent_date + timedelta(days=self.period)\n super(Referral, self).save(*args, **kwargs)", "title": "" }, { "docid": "a0cc51ab46ff66425dd3db7cba10015c", "score": "0.48387", "text": "def test_no_credit_change_for_subscription_downgrade(self):\n credits = 20\n\n current_plan = Subscription.get_plan_by_id('pro')\n new_plan = Subscription.get_plan_by_id('standard')\n\n credits = add_subscription_credits(credits, current_plan, new_plan, None)\n\n assert credits == 20", "title": "" } ]
ff858a38afa1ebd09c9cc6d02fb8e698
r""" Computes the reduced density matrix representation of the state. May be numerical or symbolic.
[ { "docid": "5571e02f616db6f7dcabfb10903524cd", "score": "0.6066919", "text": "def reduced_dm(self, modes, **kwargs):\n if modes == list(range(self.num_modes)):\n # reduced state is full state\n return self.dm(**kwargs)\n\n if isinstance(modes, int):\n modes = [modes]\n if modes != sorted(modes):\n raise ValueError(\"The specified modes cannot be duplicated.\")\n if len(modes) > self.num_modes:\n raise ValueError(\n \"The number of specified modes cannot \" \"be larger than the number of subsystems.\"\n )\n\n reduced = self.dm(**kwargs)\n reduced = reduced_density_matrix(reduced, modes, False, batched=self.batched)\n\n s = tf.identity(reduced, name=\"reduced_density_matrix\")\n return s", "title": "" } ]
[ { "docid": "364f54a74c07f748ea0080ffc36d9b1d", "score": "0.6417854", "text": "def subsystem_reduced_density_matrix(self, bit_labels):\n bit_labels_inds = [i-1 for i in bit_labels]\n other_labels_inds = list(set(range(self.system_size)) - set(bit_labels_inds))\n state_reshape = np.reshape(self.state, tuple([2]*self.system_size))\n state_reshape = np.transpose(state_reshape, bit_labels_inds + other_labels_inds)\n state_reshape = np.reshape(state_reshape, (2**len(bit_labels_inds), 2**len(other_labels_inds)))\n state_reshape = np.asmatrix(state_reshape)\n rho = state_reshape*state_reshape.H\n return rho", "title": "" }, { "docid": "f06c3a022b5422d6e7db9221dbeeb3a6", "score": "0.6406758", "text": "def purity(state):\n mat = np.reshape(state, (2, 2))\n reduced_rho = np.dot(mat, np.conj(mat.T))\n return np.trace(np.dot(reduced_rho, reduced_rho))", "title": "" }, { "docid": "06ee8f9e55a76e8517b4a14e76094cf4", "score": "0.6384309", "text": "def reduced_density_matrix(self, left_bits, right_bits, which='left'):\n reshaped_state = np.reshape(self.state, (2**left_bits,\n 2**right_bits)) \n reshaped_state = np.asmatrix(reshaped_state)\n if which == 'left':\n rho = reshaped_state*reshaped_state.H\n return rho", "title": "" }, { "docid": "85162c067d7fa099823f5501dad6eedc", "score": "0.57764155", "text": "def _derivatives(self, state):\n ad_state = ad.adnumber(state)\n state_deriv = self.force_model(0, ad_state)\n a_matrix = jacobian(state_deriv, ad_state)\n # remove ad number from all state values before returning\n state_deriv = [state.real for state in state_deriv]\n\n return state_deriv, a_matrix", "title": "" }, { "docid": "79cf5a3c466bbd9f31a3da7ef43a8265", "score": "0.5732339", "text": "def get_state_realspace_density(self, b, k, s, dim=None):\n\n\t\tself.check_c_projectors()\n\t\tif dim is not None:\n\t\t\tself.update_dim(np.array(dim)//2)\n\t\treturn self._get_realspace_state_density(b, k, s)", "title": "" }, { "docid": "e4bb90809375f1c7d4c2865bd2ccec94", "score": "0.558464", "text": "def get_density(self):\n return nx.density(self.G)", "title": "" }, { "docid": "05d106b220b1ee6e4f25244506b6018c", "score": "0.55698353", "text": "def quantum_density(self):\n d12 = self._D12.sum()\n\n rho = np.array(\n [[self._D11.sum(), d12], [d12.conjugate(), self._D22.sum()]]\n )\n\n rho *= self.dqdp\n\n # For the following consistency checks, get eigenvalues\n p = np.linalg.eigvalsh(rho)\n\n assert np.allclose(p[p < 0], 0), \"Quantum density matrix must be non-negative\"\n\n assert np.allclose(p.sum(), 1), \"Trace of quantum density matrix must be one\"\n\n return rho", "title": "" }, { "docid": "1c698a987e20025f57dbbd45c3e3d824", "score": "0.5514214", "text": "def NE2001_dens():\n pass", "title": "" }, { "docid": "954fe278d25fb5a651c45cd128595f60", "score": "0.5507463", "text": "def _density(self):\n prof_copy = self.profile.copy()\n ptype = prof_copy.pop('type', \"NFW\")\n return density_factory(ptype, **prof_copy)", "title": "" }, { "docid": "1dc786523a52221b43a05731b29580a9", "score": "0.5466165", "text": "def stateDensity(self, Eps):\n return self._stateDensity(Eps)\n '''\n # Alternative calculaltion without interpolation\n if not isinstance(Eps, (collections.Sequence, np.ndarray)):\n return 16.*np.pi**2 * quad(lambda r: r**2 * np.sqrt(2.*self.potential(r) - 2.*Eps), 0., self.r_of_Eps(Eps))[0]\n return 16.*np.pi**2 *np.array([quad(lambda r: r**2 * np.sqrt(2.*self.potential(r) - 2.*Eps), 0., self.r_of_Eps(Eps))[0] for Eps in Eps])\n '''", "title": "" }, { "docid": "1e7e001fdb35f9e2b0b2b37b56ddebaa", "score": "0.54642904", "text": "def __call__(self, state: tf.Tensor, is_density_matrix: bool = False\n ) -> tf.Tensor:\n if self._nqubits is None:\n if is_density_matrix:\n self.nqubits = len(tuple(state.shape)) // 2\n else:\n self.nqubits = len(tuple(state.shape))\n\n if self.is_controlled_by:\n return self._controlled_by_call(state, is_density_matrix)\n\n if is_density_matrix:\n state = self.einsum(self.calculation_cache.right, state,\n tf.math.conj(self.matrix))\n state = self.einsum(self.calculation_cache.left, state, self.matrix)\n return state\n\n return self.einsum(self.calculation_cache.vector, state, self.matrix)", "title": "" }, { "docid": "73ac4e6dcdd8dfcf436fc9926b6cf18a", "score": "0.54595035", "text": "def testDensityOfStates(self):\n Elist = numpy.arange(0, 100000, 2000, numpy.float)\n densStates = self.mode.getDensityOfStates(Elist)\n self.mode.fourier = None; self.mode.energies = None\n densStates = self.mode.getDensityOfStates(Elist)", "title": "" }, { "docid": "f6cec97371d2389325ad11ed52f664f0", "score": "0.54263574", "text": "def density(self, state: c_void_p, temperature: float) -> float:\n return self.api.glycolDensity(state, self.instance, temperature)", "title": "" }, { "docid": "b5a522d307701bceef7662e3d7225273", "score": "0.53983563", "text": "def mass_density(self):\n return self._get_data('density') * pq.g / pq.cm**3", "title": "" }, { "docid": "efcf24d459728bfefcbcfc4a9558562f", "score": "0.53568625", "text": "def calcStatePopulationDensity(self) -> None:\n\t\t# selecting rows that correspond with the state\n\t\tsumPopArea = self.stateCensusTractGDF['DP0010001'].sum(axis=0)\n\t\tsumLandArea = self.stateCensusTractGDF['ALAND10'].sum(axis=0)\n\t\t# sums_population_area= self.state.sum(axis=0)\n\t\t# pop_density_calclulated= sums_population_area.DP001001/sums_population_area.LAND_AREA\n\t\tpopDensityCalculated = sumPopArea / sumLandArea\n\t\t# adding new col\n\t\t# self.nationalCensusTractsGDF['Population Density']=None\n\t\tself.nationalCensusTractsGDF['statePopDensity'] = popDensityCalculated", "title": "" }, { "docid": "fc4ed56fbc646f9391728c57ddf0c7cb", "score": "0.53507805", "text": "def W_red(W):\n assert issparse(W), \"Matrix needs to be sparse\"\n D_inv = degree_matrix(W, squared=False, indegree=False) # Assumes undireced graph, thus in-degree = out-degree.\n D_inv.data[:] = np.power(D_inv.data, -0.5) # D^(-1/2)\n return D_inv.dot(W).dot(D_inv)", "title": "" }, { "docid": "24ad33e776fd43339cbc1a24518cc4b1", "score": "0.5348118", "text": "def saturated_density(self, state: c_void_p, temperature: float, quality: float) -> float:\n return self.api.refrigerantSaturatedDensity(state, self.instance, temperature, quality)", "title": "" }, { "docid": "166bdca3b0bc2ee9a22ccd5d071e23ae", "score": "0.5335536", "text": "def cc_dense(self):\n cc_dense = self.iweights[[\"pid\", \"STATE\", \"iweight_state\"]].copy()\n # Use PID to match initial weights with weighted targets\n cc_dense = cc_dense.merge(self.puf_2017_filter, on=\"pid\")\n for var in self.targ_list:\n state_var = var + \"_targ\"\n cc_dense[state_var] = cc_dense[var] * cc_dense[\"iweight_state\"]\n cc_dense = cc_dense.drop(labels=self.var_list, axis=1)\n cc_dense = cc_dense.drop(labels=[\"s006\", \"AGI_STUB\"], axis=1)\n # j is an index for x, the variable we will solve for\n cc_dense[\"j\"] = np.arange(len(cc_dense)) + 1\n return cc_dense", "title": "" }, { "docid": "89e3b690833477628c973f2500d5ee5c", "score": "0.5307281", "text": "def density(self):\n return DensityModel(a=14.1 * units.gram /\n units.cm**3, model='constant')", "title": "" }, { "docid": "614c386f70be9342154c0d4d615222e9", "score": "0.530172", "text": "def testDensityOfStates(self):\n Elist = numpy.arange(0, 100000, 2000, numpy.float)\n densStates = self.mode.getDensityOfStates(Elist)", "title": "" }, { "docid": "614c386f70be9342154c0d4d615222e9", "score": "0.530172", "text": "def testDensityOfStates(self):\n Elist = numpy.arange(0, 100000, 2000, numpy.float)\n densStates = self.mode.getDensityOfStates(Elist)", "title": "" }, { "docid": "614c386f70be9342154c0d4d615222e9", "score": "0.530172", "text": "def testDensityOfStates(self):\n Elist = numpy.arange(0, 100000, 2000, numpy.float)\n densStates = self.mode.getDensityOfStates(Elist)", "title": "" }, { "docid": "e30eb831acf5f41270a6137d9ea4fed5", "score": "0.5297523", "text": "def reduced_state(self, wires):\n if len(wires) == self.num_wires:\n # reduced state is full state\n return self._state\n\n # translate to wire labels used by device\n device_wires = self.map_wires(wires)\n\n # reduce rho down to specified subsystems\n ind = np.concatenate([device_wires.toarray(), device_wires.toarray() + self.num_wires])\n rows = ind.reshape(-1, 1)\n cols = ind.reshape(1, -1)\n\n return self._state[0][rows, cols], self._state[1][ind]", "title": "" }, { "docid": "956672841395e0ef4f499aa106f010f1", "score": "0.52795523", "text": "def density_matrix(psi):\n return np.dot(psi[:, np.newaxis], psi[np.newaxis, :])", "title": "" }, { "docid": "825591400c59d579f1da6146edd1c41e", "score": "0.52540666", "text": "def compute_density(self):\n return density(self.coef_)", "title": "" }, { "docid": "825591400c59d579f1da6146edd1c41e", "score": "0.52540666", "text": "def compute_density(self):\n return density(self.coef_)", "title": "" }, { "docid": "84dfdc84274e9b5827a102a50f6fb572", "score": "0.52432853", "text": "def _discrete_state(self):\n return np.array(int(round(self.S * self.N)))", "title": "" }, { "docid": "72b2749f4888f4104463f042150bf700", "score": "0.5217364", "text": "def DO(self):\n # statevector.py and operators.py have circular import\n from . import operators\n return operators.DensityOperator(self ^ self.conj(), self.basis)", "title": "" }, { "docid": "fe31db5ab488dc0f4a5f72a53c34cf52", "score": "0.520201", "text": "def deq_mm(ssa):\n return 1e3 * 6 / (917 * np.asarray(ssa))", "title": "" }, { "docid": "03b2c137509c41dff51bf722abe37482", "score": "0.5191413", "text": "def reactive_density(\n assignments, sinks, source=None, all_reactive=False, verbose=False):\n if not all_reactive:\n pathways = reactive_pathways(assignments, sinks, verbose=verbose)\n else:\n pathways = assignments\n\n state_counts = np.array(\n np.bincount(np.concatenate(pathways)))\n densities = state_counts / np.sum(state_counts)\n\n return densities", "title": "" }, { "docid": "c10bea322cdeeb893df7b384935f3487", "score": "0.5177796", "text": "def __getDressedMatrixElements(self,UNmat):\n UNmatdimension = len(UNmat.toarray())\n #print(UNmat.toarray())\n n0 = self.n\n j0 = self.j\n l0 = self.l\n m0 = self.m1\n state_main = [self.nn,self.ll,self.jj,self.m2]\n d0 = self.atom1.getDipoleMatrixElement(n0,l0,j0,m0,self.nn,self.ll,self.jj,self.m2,0)\n Omega_array = []\n Omg0 = self.Omega0 \n for i in range(UNmatdimension):\n d = 0 \n if (self.basisStates[i][:4] == state_main) or (self.basisStates[i][4:] == state_main):\n d = self.atom1.getDipoleMatrixElement(n0,l0,j0,m0,self.nn,self.ll,self.jj,self.m2,0)\n Omega_array = np.append(Omega_array,0.5*Omg0*d/d0)\n row = np.zeros(UNmatdimension) + 1\n col = np.arange(UNmatdimension)\n mat = csr_matrix((Omega_array, (row, col)), shape=(2, UNmatdimension))\n UNmat = vstack([mat,UNmat])\n row = np.arange(UNmatdimension+2)\n row = np.concatenate((np.array([1]), row))\n col = np.zeros(UNmatdimension+2) + 1\n col = np.concatenate((np.array([0]), col))\n Omega_array = np.concatenate((np.array([Omg0*0.5,Omg0*0.5,self.Delta0]), Omega_array))\n mat = csr_matrix((Omega_array, (row, col)), shape=(UNmatdimension+2, 2))\n UNmat = hstack([mat,UNmat])\n UNmat = csr_matrix(UNmat)\n #print(UNmat.toarray())\n return UNmat", "title": "" }, { "docid": "c93bea98cc7212c6e4a8d5a3c7305310", "score": "0.51725036", "text": "def dm(self, **kwargs):\n if self.is_pure:\n if self.batched:\n batch_index = indices[0]\n free_indices = indices[1:]\n else:\n batch_index = \"\"\n free_indices = indices\n ket = self.data\n left_str = [batch_index] + [free_indices[i] for i in range(0, 2 * self.num_modes, 2)]\n right_str = [batch_index] + [free_indices[i] for i in range(1, 2 * self.num_modes, 2)]\n out_str = [batch_index] + [free_indices[: 2 * self.num_modes]]\n einstr = \"\".join(left_str + [\",\"] + right_str + [\"->\"] + out_str)\n s = tf.einsum(einstr, ket, tf.math.conj(ket))\n else:\n s = tf.identity(self.data, name=\"density_matrix\")\n\n return s", "title": "" }, { "docid": "9c48980a00fc054b50cebc53e52fd805", "score": "0.515427", "text": "def to_valid_density_matrix(\n density_matrix_rep: Union[int, np.ndarray],\n num_qubits: Optional[int] = None,\n *, # Force keyword arguments\n qid_shape: Optional[Tuple[int, ...]] = None,\n dtype: Type[np.number] = np.complex64,\n atol: float = 1e-7) -> np.ndarray:\n qid_shape = _qid_shape_from_args(num_qubits, qid_shape)\n if (isinstance(density_matrix_rep, np.ndarray) and\n density_matrix_rep.ndim == 2):\n if density_matrix_rep.shape != (np.prod(qid_shape, dtype=int),) * 2:\n raise ValueError(\n 'Density matrix was not square and of size 2 ** num_qubit, '\n 'instead was {}'.format(density_matrix_rep.shape))\n if not np.allclose(density_matrix_rep,\n np.transpose(np.conj(density_matrix_rep)),\n atol=atol):\n raise ValueError('The density matrix is not hermitian.')\n if not np.isclose(np.trace(density_matrix_rep), 1.0, atol=atol):\n raise ValueError(\n 'Density matrix did not have trace 1 but instead {}'.format(\n np.trace(density_matrix_rep)))\n if density_matrix_rep.dtype != dtype:\n raise ValueError(\n 'Density matrix had dtype {} but expected {}'.format(\n density_matrix_rep.dtype, dtype))\n if not np.all(np.linalg.eigvalsh(density_matrix_rep) > -atol):\n raise ValueError('The density matrix is not positive semidefinite.')\n return density_matrix_rep\n\n state_vector = to_valid_state_vector(density_matrix_rep,\n len(qid_shape),\n qid_shape=qid_shape,\n dtype=dtype)\n return np.outer(state_vector, np.conj(state_vector))", "title": "" }, { "docid": "63f5bd7522ce30ae82d10f2d485eea53", "score": "0.5153912", "text": "def density(self):\n return(self.mass() / ((4.0/3.0)*scipy.pi*(self.radius()**3)))", "title": "" }, { "docid": "9ce4558b5432a7aad8b52addf8a14f14", "score": "0.51530224", "text": "def density_matrix_from_state_vector(\n state_vector: Sequence,\n indices: Optional[Iterable[int]] = None,\n qid_shape: Optional[Tuple[int, ...]] = None,\n) -> np.ndarray:\n shape = validate_qid_shape(state_vector, qid_shape)\n n_qubits = len(shape)\n\n if indices is None:\n return np.outer(state_vector, np.conj(state_vector))\n\n indices = list(indices)\n validate_indices(n_qubits, indices)\n\n state_vector = np.asarray(state_vector).reshape(shape)\n\n sum_inds = np.array(range(n_qubits))\n sum_inds[indices] += n_qubits\n\n rho = np.einsum(state_vector, list(range(n_qubits)), np.conj(state_vector),\n sum_inds.tolist(), indices + sum_inds[indices].tolist())\n new_shape = np.prod([shape[i] for i in indices], dtype=int)\n\n return rho.reshape((new_shape, new_shape))", "title": "" }, { "docid": "6a3b6f070d782e552643309a0e086854", "score": "0.51415163", "text": "def density_model(self) -> BaseModel1D:\n return self._dens_model", "title": "" }, { "docid": "134a3fd7dc38484a5d225c4c8a41acc5", "score": "0.5096904", "text": "def spd_decomposition(S):\n\tP = force_symmetric_to_symmetric_positive_definite(S)\n\tM = symmetric_positive_definite_decomposition(P)\n\treturn M", "title": "" }, { "docid": "0771801c8c7fff1b3ad82e4b2a1da3ff", "score": "0.5084259", "text": "def diagonalize(reduced_density_matrix):\n try:\n eigenvals, eigenvecs = np.linalg.eigh(reduced_density_matrix)\t\n except LinAlgError:\n\traise DMRGException(\"Error diagonalizing the reduced DM\")\n\n return (eigenvals, eigenvecs)", "title": "" }, { "docid": "7ef40d865d68440d621ba2b4df6efb3b", "score": "0.50785637", "text": "def deco_symmatrix ( m ) :\n\n if m in LinAlg.decorated_matrices : return m \n\n LinAlg.deco_matrix ( m )\n \n R = m.rep_type\n \n m.__str__ = LinAlg.MS_STR\n m.__repr__ = LinAlg.MS_STR\n m.table = LinAlg.MS_STR\n\n m.correlations = LinAlg.MS_CORR\n \n m.Sim = LinAlg.SIM\n m.sim = LinAlg.SIM\n m.SimT = LinAlg.SIMT\n m.simT = LinAlg.SIMT\n \n m.eigenValues = LinAlg.MS_EIGENVALUES \n m.eigenVectors = LinAlg.MS_EIGENVECTORS\n m.eigen_values = LinAlg.MS_EIGENVALUES \n m.eigen_vectors = LinAlg.MS_EIGENVECTORS\n\n m.__reduce__ = LinAlg.MS_REDUCE \n m.rep_size = classgetter ( lambda cls : cls.rep_type.kSize ) \n \n return m", "title": "" }, { "docid": "d02996822e121457dbadc79e77833684", "score": "0.50629157", "text": "def normalized(self):\n # swap cols and rows and row reduce\n A = sp.Matrix(self.mat).transpose().rref()[0]\n # ensure all entries are integral by multiplying by the LCD\n A *= reduce(lcm, map(lambda x: x.as_numer_denom()[1], # get denominators\n sp.utilities.iterables.flatten(A)))\n # see https://groups.google.com/forum/#!topic/sympy/e8hcF4QAldc\n A = np.array(sp.lambdify((), A, 'numpy')()) # convert to numpy array\n A = A[np.any(A,1),:].transpose() # remove 0 rows, swap rows and columns\n return HashableMatrix(np.matrix(A))", "title": "" }, { "docid": "e9d2948fdd0278c0960eac13394ff777", "score": "0.50607157", "text": "def test_dephasing(self):\n\n # Check for identity on |11> state\n test_density_matrix = (dephasing_channel(self.density_matrix, 1, 1))\n self.assertAlmostEqual(norm(self.density_matrix - test_density_matrix),\n 0.0)\n\n test_density_matrix = (dephasing_channel(self.density_matrix,\n 1,\n 1,\n transpose=True))\n\n correct_matrix = np.array([[0., 0., 0., 0.], [0., 0., 0., 0.],\n [0., 0., 0.5, -0.5], [0., 0., -0.5, 1.]])\n self.assertAlmostEqual(norm(correct_matrix - test_density_matrix), 0.0)\n\n # Check for correct action on cat state\n # With probability = 0\n test_density_matrix = (dephasing_channel(self.cat_matrix, 0, 1))\n self.assertAlmostEqual(norm(self.cat_matrix - test_density_matrix), 0.0)\n # With probability = 1\n\n correct_matrix = np.array([[0.50, 0.25, 0.00, 0.00],\n [0.25, 0.25, 0.00, -0.25],\n [0.00, 0.00, 0.00, 0.00],\n [0.00, -0.25, 0.00, 0.50]])\n test_density_matrix = (dephasing_channel(self.cat_matrix, 1, 1))\n self.assertAlmostEqual(norm(correct_matrix - test_density_matrix), 0.0)", "title": "" }, { "docid": "3fc538d60bed88cd11599c3c6959f4b5", "score": "0.5043117", "text": "def density(self):\r\n graph = self.__graph_dict.copy()\r\n vertexes = len(graph.keys())\r\n edges = len(self.edges())\r\n\r\n return 2.0 * edges / (vertexes *(vertexes - 1))", "title": "" }, { "docid": "e5f9f9e4020c8f8330863b342a455e0c", "score": "0.50127727", "text": "def density(compound, state, value_index=0):\n table = density_table(compound)\n state_idx = state_index(state)\n try:\n return list(table.loc[(table['state'] == state_idx), 'value'])[value_index]\n except IndexError:\n print('Invalid state or compound')", "title": "" }, { "docid": "3f9417401b3ef118e225d0562a146075", "score": "0.50119233", "text": "def dd(self, mu: int, nu: int):\n\t\tif self.evaluated[mu,nu]: return self.Ric[mu,nu]\n\t\tRic = 0\n\t\tfor rho in range(self.dim):\n\t\t\tRic += self.R.uddd(rho,mu,rho,nu)\n\t\tself.Ric[mu,nu] = Ric.simplify()\n\t\tself.evaluated[mu,nu] = True\n\t\treturn self.Ric[mu,nu]", "title": "" }, { "docid": "ef79b4384b1a00538764f5c98445ce4d", "score": "0.50119007", "text": "def density(self):\n return sum(np.array(mass(self.symbols))) / self.volume", "title": "" }, { "docid": "b87cc4a601fab0d64ed28e35a4405a96", "score": "0.50103897", "text": "def density(self, state: c_void_p, barometric_pressure: float, dry_bulb_temp: float, humidity_ratio: float) -> float:\n return self.api.psyRhoFnPbTdbW(state, barometric_pressure, dry_bulb_temp, humidity_ratio)", "title": "" }, { "docid": "fe0da1191fb5e7d644d2e8a32e1b3aed", "score": "0.5010093", "text": "def classical_density(self):\n rho = self._D11 + self._D22\n\n # Check for the normalization condition\n assert np.allclose(rho.sum() * self.dqdp, 1), \"Classical density must be normalized.\"\n\n # Check for the positivity of the density\n assert np.allclose(rho[rho < 0], 0), \"Classical density mus be nonnegative\"\n\n assert not np.isnan(rho).any(), \"Classical density contains NaNs.\"\n\n # Make sure all values are real\n assert isinstance(rho[0, 0], np.float), \"Classical density must be real\"\n\n return rho", "title": "" }, { "docid": "2fe6a799ba948be43589b718b1442f2a", "score": "0.5002014", "text": "def get_reversible_rate_matrix(nstates):\n M = np.exp(np.random.randn(nstates, nstates))\n pre_Q = 0.5 * (M + M.T)\n v = sample_distn(nstates)\n v_sqrt = np.sqrt(v)\n pre_Q = (pre_Q.T / v_sqrt).T * v_sqrt\n Q = pre_Q - np.diag(np.sum(pre_Q, axis=1))\n assert_reversibility(Q, v)\n return Q, v", "title": "" }, { "docid": "080916809178ad11c99fcca08377dc79", "score": "0.49931666", "text": "def density(self):\n return self._density", "title": "" }, { "docid": "ea7bff0064c282b34ff4c47214f0f4ed", "score": "0.49819955", "text": "def evaluate_density_vec(self, x):\r\n return self._evaluate_scalar_quantity(x[:,0], x[:,1], x[:,2], MNnModel.mn_density)", "title": "" }, { "docid": "8fe41ebade879197650c9af2761a6dfc", "score": "0.4976917", "text": "def get_density(self,wavefunction): \n if wavefunction is None:\n return self.density\n \n density=np.zeros(self.shape,dtype=np.float)\n for k in range(self.NK):\n for n in range(self.nbands):\n for m in range(self.nbands):\n density+=2*self.wk[k]*self.f[k,n]*np.abs(wavefunction[k,m,n]*self.ukn[k,m])**2\n return density", "title": "" }, { "docid": "5d65c01ac6a9758392a6b3882f17e447", "score": "0.49748877", "text": "def get_density(self, x):\n \n return self.get_logdensity(x).exp()", "title": "" }, { "docid": "c272faaf51fe765b66950a3fcb109cce", "score": "0.4974089", "text": "def generate_reduced_sim_mat(self):\n self.reduced_sim_mat = np.zeros((self.k, self.k), dtype='float32')\n\n for r in range(2, self.k + 1):\n for s in (range(1, r) if not self.drop_edges_between_irregular_pairs else self.regularity_list[r - 2]):\n if self.is_weighted:\n cl_pair = WeightedClassesPair(self.sim_mat, self.adj_mat, self.classes, r, s, self.epsilon)\n else:\n cl_pair = ClassesPair(self.adj_mat, self.classes, r, s, self.epsilon)\n self.reduced_sim_mat[r - 1, s - 1] = self.reduced_sim_mat[s - 1, r - 1] = cl_pair.bip_density", "title": "" }, { "docid": "30bb90332888e55a02603a36f21e30ef", "score": "0.4962091", "text": "def initialize_density(x,dx,normalization=1):\n rho=exp(-x**2)\n A=simps(rho,x)\n return normalization/A*rho", "title": "" }, { "docid": "c9ef96029d241edd7a4d21334f51ce44", "score": "0.4958901", "text": "def density(self, r):\n pass", "title": "" }, { "docid": "1ebd7cc640cd6a72cce91981a2d5ea69", "score": "0.49578607", "text": "def mass_matrix(self):\r\n if not self._fr or not self._frstar:\r\n raise ValueError('Need to compute Fr, Fr* first.')\r\n return Matrix([self._k_d, self._k_dnh])", "title": "" }, { "docid": "5af34f7f073abed1d0aba606c4e3d4c5", "score": "0.49560684", "text": "def _matrix(self):\n n, d = self._x.shape\n if n < d:\n return np.dot(self._x, self._x.T) / float(n - 1)\n return np.dot(self._x.T, self._x) / float(n - 1)", "title": "" }, { "docid": "b72faf3dd76737395397e71108fe09c0", "score": "0.49559936", "text": "def depurify(self, eps):\n probs = np.random.random(self.num_rows)\n probs = probs/np.sum(probs)\n arr = (self.arr + eps*np.diag(probs)) / (self.trace() + eps)\n return DenMat(self.num_rows, self.row_shape, arr)", "title": "" }, { "docid": "a5db345c5c728b77651f7354563b650c", "score": "0.49555916", "text": "def irreducible(self):\r\n\t\treturn farq((self.num/MCD(self.den,self.num)),(self.den/MCD(self.den,self.num)))", "title": "" }, { "docid": "0acd9a2c48d8dba88077ac1ef6e038df", "score": "0.4938685", "text": "def compute_diagonalizing_gates(\n basis_state, wires\n ): # pylint: disable=arguments-differ,unused-argument\n return []", "title": "" }, { "docid": "22cbc58a6fc66c8903026332ba3d82ec", "score": "0.49319252", "text": "def distribution_matrix(mat):\n return np.diag(stationary(mat))", "title": "" }, { "docid": "b9ae00ca62821f82771a40d1b404a03f", "score": "0.49295992", "text": "def normalize_adj(adj):\n # adj: dense adj\n adj = sp.coo_matrix(adj)\n rowsum = np.array(adj.sum(1)) # D\n d_inv_sqrt = np.power(rowsum, -0.5).flatten() # D^-0.5\n d_inv_sqrt[np.isinf(d_inv_sqrt)] = 0.\n d_mat_inv_sqrt = sp.diags(d_inv_sqrt) # D^-0.5\n return adj.dot(d_mat_inv_sqrt).transpose().dot(d_mat_inv_sqrt).tocoo() # D^-0.5AD^0.5", "title": "" }, { "docid": "a94ce23a0f9d4ffeea5706804d3a5d4d", "score": "0.49190745", "text": "def densityFactorDry(weather):\n temperature = extractTemperature(weather, useKelvin=True)\n waterVaporPressure = humidityToPressure(weather)\n airPressure = weather.getAirPressure()*units.pascal\n dryPressure = airPressure - waterVaporPressure\n eqn = dryPressure.to_value(cds.mbar)*(57.90E-8 - 9.3250E-4/temperature.to_value(units.Kelvin)\n + 0.25844/temperature.to_value(units.Kelvin)**2.)\n densityFactor = (1. + eqn)*dryPressure.to_value(cds.mbar)/temperature.to_value(units.Kelvin)\n return densityFactor", "title": "" }, { "docid": "8fd36f9555dc547e0a2e2a0b67384f63", "score": "0.49190095", "text": "def strain_displacement_matrix(self):\n L = np.zeros((3,4)) # matrix used for mapping shape function derivatives into strains\n L[0,0] = 1; L[1,3] = 1; L[2,1:3] = 1;\n xi = 0; eta = 0; # evaluation point\n _,_,dNmat = self.mesh.eval_shape_func(xi,eta) # differentiated shape functions evaluated in (xi,eta)\n Jac = np.array([[0.5,0],[0,0.5]]) # mapping back to global coordinates\n detJ = np.linalg.det(Jac)\n invJ = 1/detJ*np.array([[Jac[1,1], -Jac[0,1]], [-Jac[1,0], Jac[0,0]]])\n G = np.zeros((4,4)) # mapping matrix between rectangular element space coordinates and global coordinates\n G[:2,:2] = invJ; G[-2:,-2:] = invJ\n B0 = np.matmul(np.matmul(L,G),dNmat) # strain displacement matrix\n\n return B0", "title": "" }, { "docid": "7acb1f9b47bb5ec27070f96bdb361bc2", "score": "0.491127", "text": "def evaluate_density(self, x, y, z):\r\n return self._evaluate_scalar_quantity(x, y, z, MNnModel.mn_density)", "title": "" }, { "docid": "9e7e01cd867d1edacc06d2d11684496c", "score": "0.48791188", "text": "def _densityfunction(\n r: float,\n core_wavenumber: floatcomplex,\n shell_wavenumber: floatcomplex,\n core_width: float,\n shell_width: float,\n) -> float:\n return (\n abs(\n _wavefunction(r, core_wavenumber, shell_wavenumber, core_width, shell_width)\n )\n ** 2\n )", "title": "" }, { "docid": "7ef32ca4bb9d73ed6bf0304dda863126", "score": "0.4866826", "text": "def cost_down(states):\n return np.linalg.norm(states)", "title": "" }, { "docid": "7873db806f156abc30c0f468865452ee", "score": "0.48629946", "text": "def getDensity(self):\n return self.__density", "title": "" }, { "docid": "9936c5f50045750f35b6449fe5ae7d59", "score": "0.4831887", "text": "def _d_sigma(self):\n try:\n dd = self.d_derivs\n except ValueError:\n return None\n den = self.density\n dv = np.matrix(np.zeros((len(den.deriv_params))))\n for i, pname in enumerate(den.deriv_params):\n dv[0, i] = dd[pname].angularIntegral(self.psi_max)[0]\n return np.sqrt((dv * self.density.covar * dv.T)[0, 0])", "title": "" }, { "docid": "0743541340bf045e3787e5333449e532", "score": "0.48263004", "text": "def renormalise(self):\n\t\trn = self.compute_partition_function_fast()\n\t\tfor prod in self.productions:\n\t\t\tif rn[prod[0]] == 0:\n\t\t\t\tself.parameters[prod] = 0\n\t\t\telse:\n\t\t\t\tif len(prod) == 3:\n\t\t\t\t\ta,b,c = prod\n\t\t\t\t\tself.parameters[prod] *= (rn[b] * rn[c])/rn[a] \n\t\t\t\telse:\n\t\t\t\t\tself.parameters[prod] *= 1.0/rn[prod[0]]\n\t\tself.trim_zeros()\n\t\tself.set_log_parameters()\n\t\treturn self", "title": "" }, { "docid": "0e562d84de910866b7f2e9ce09c62f04", "score": "0.48175946", "text": "def densFunc(z):\t\n\t\treturn rho00 + 0.5 * (rho01-rho00) * (1 + np.tanh((z-ze)/we))", "title": "" }, { "docid": "12b2038a3047cfa21d8adad453bad6a9", "score": "0.4816128", "text": "def build_distribution_normalization_matrix(self):\n vector = np.array((self.distribution_matrix * self.xtable_matrix).sum(axis=1)).T\n mask = vector > 0\n vector[mask] = 1 / vector[mask]\n return diags(vector, [0], format=\"csr\", dtype=np.float32)", "title": "" }, { "docid": "a495785d1aad265edf5f3d89225b4319", "score": "0.4811534", "text": "def d2matrix(dx):\n N, Nngb, dim = dx.shape\n N_derivs = 2*dim + comb(dim, 2, exact=True)\n A = np.empty((N, Nngb, N_derivs), dtype=np.float64) \n for k in range(N):\n for i in range(Nngb):\n for j in range(N_derivs):\n if j < dim:\n A[k,i,j] = dx[k,i,j] \n elif j < 2*dim:\n A[k,i,j] = dx[k,i,j-dim] * dx[k,i,j-dim] / 2\n else:\n A[k,i,j] = dx[k,i,(j+1)%dim] * dx[k,i,(j+2)%dim] # this does the cross-terms, e.g. xy, xz, yz\n return A", "title": "" }, { "docid": "170bba8658ede3ab821a07eb00ffc8e7", "score": "0.48088142", "text": "def d(self):\n\n # Calculate and return the local displacement vector\n return matmul(self.T, self.D())", "title": "" }, { "docid": "b836ed4e795f661348350e710cb2bb65", "score": "0.48069096", "text": "def density(self, q, t=0.0):\n q = self._remove_units_prepare_shape(q)\n orig_shape, q = self._get_c_valid_arr(q)\n t = self._validate_prepare_time(t, q)\n ret_unit = self.units[\"mass\"] / self.units[\"length\"] ** 3\n return (self._density(q, t=t).T * ret_unit).to(self.units[\"mass density\"])", "title": "" }, { "docid": "8cc5445f9fee2382cf934dd683ea59bf", "score": "0.48019257", "text": "def calculate_density(self):\n wavelet_density = len(self.phi_matrices[0].nonzero()[0])/(self.graph.number_of_nodes()**2)\n wavelet_density = str(round(100*wavelet_density, 2))\n inverse_wavelet_density = len(self.phi_matrices[1].nonzero()[0])/(self.graph.number_of_nodes()**2)\n inverse_wavelet_density = str(round(100*inverse_wavelet_density, 2))\n print(\"Density of wavelets: \"+wavelet_density+\"%.\")\n print(\"Density of inverse wavelets: \"+inverse_wavelet_density+\"%.\\n\")", "title": "" }, { "docid": "a22dc3d06834558bc2cdfe95c4af1565", "score": "0.48018953", "text": "def DesignMatrix(states):\n N = np.size(states, 0)\n size3 = (N, L ** 2)\n X = np.zeros(size3)\n\n for i in range(0, N):\n X[i] = np.outer(states[i, :], states[i, :]).reshape(1, -1) # .ravel()\n return X", "title": "" }, { "docid": "92a80ae3e4b8a7df27f667e195552e8e", "score": "0.4795498", "text": "def _normalize_adj_diag_enhance_dense(adj, diag_lambda:float, improved=False):\n fill_value = 1 if not improved else 2\n \n adj = add_self_loop_dense(adj, fill_value=fill_value)\n degree = torch.sum(adj, dim=-1)\n degree_inv = 1 / degree\n degree_inv[degree_inv == float(\"inf\")] = 0\n adj = adj * degree_inv.reshape(-1, 1)\n\n adj += (diag_lambda * adj.diag()).diag()\n\n return adj", "title": "" }, { "docid": "b0377a23d2b9b5ab5ed62edfd4e05488", "score": "0.4789015", "text": "def _dcorss_dm(self):\n\n dcdm = np.empty((self.num_mog_comp, self.num_latent_proc, self.num_inducing))\n for j in range(self.num_latent_proc):\n dcdm[:, j, :] = -cho_solve((self.chol[j, :, :], True), self.MoG.m[:, j, :].T).T * self.MoG.pi[:, np.newaxis]\n return dcdm", "title": "" }, { "docid": "eb66346d8ffa513bf4e6829ee29cc39d", "score": "0.478672", "text": "def FssInvDiag(self, recompute = True):\r\n raise NotImplementedError", "title": "" }, { "docid": "bc864306e26f90c471f48a40ac16a0ec", "score": "0.4782814", "text": "def rh_density(self, init_dens, gamma, mach):\n\n dens_ratio = ((gamma+1)*mach**2)/(2+(gamma-1)*mach**2)\n\n final_dens = dens_ratio*init_dens\n\n return final_dens", "title": "" }, { "docid": "039502c9e7c7e8d4ad0fd52010a922ed", "score": "0.47798538", "text": "def evalulate_function(self, disk_density: Callable[[torch.Tensor, torch.Tensor], torch.Tensor]) -> torch.Tensor:\n rmat, zmat = torch.meshgrid(self.rmid, self.zmid)\n return disk_density(rmat, zmat)", "title": "" }, { "docid": "9564f033e5d7d266c571edf867268ed7", "score": "0.4779669", "text": "def _dcross_ds(self):\n\n dc_ds = np.empty((self.num_mog_comp, self.num_latent_proc, self.MoG.get_sjk_size()))\n for j in range(self.num_latent_proc):\n dc_ds[:, j] = -1. / 2 * np.array(\n [self.MoG.dAinvS_dS(self.chol[j, :, :], k, j) * self.MoG.pi[k] for k in range(self.num_mog_comp)])\n return dc_ds", "title": "" }, { "docid": "43d44e17c8cab7dfd74d9dfe4bbc03ce", "score": "0.47766522", "text": "def compute_dci_d(importances):\n\n # normalize across the state_variable dimension, so we get the importance of\n # each state variable for a slot\n slot_normalized_importances = importances / importances.sum(axis=0)\n sni = slot_normalized_importances\n\n # each entropy computed is the entropy for a slot over all the state variables\n slot_entropy = entropy(sni, base=importances.shape[0])\n dci_ds = 1 - slot_entropy\n dci_d = dci_ds.mean()\n\n return dci_d", "title": "" }, { "docid": "63108ffb70e437a436de9e0fe5091598", "score": "0.47744197", "text": "def current_density(self, xyz):\n return self.sigma * self.electric_field(xyz)", "title": "" }, { "docid": "63108ffb70e437a436de9e0fe5091598", "score": "0.47744197", "text": "def current_density(self, xyz):\n return self.sigma * self.electric_field(xyz)", "title": "" }, { "docid": "63108ffb70e437a436de9e0fe5091598", "score": "0.47744197", "text": "def current_density(self, xyz):\n return self.sigma * self.electric_field(xyz)", "title": "" }, { "docid": "a8697247f45a6c2c24dd1e3ea7b3ffc0", "score": "0.47690842", "text": "def stiffness_matrix_assembly(self,rho,sparse=True):\n rho = rho.flatten(order='F')\n if sparse is False:\n self.stiffness_matrix = np.zeros((self.mesh.ndof,self.mesh.ndof))\n mat_idx = np.indices((8,8))\n I = mat_idx[0].flatten()\n J = mat_idx[1].flatten()\n # assemble stiffness matrix\n for ele in range(len(self.mesh.edofMat)):\n edof = self.mesh.edofMat[ele]\n self.stiffness_matrix[edof[I],edof[J]] += (self.Emin+(rho[ele])**self.penal*(self.Emax-self.Emin))*self.KE[I,J]\n elif sparse is True:\n iK = np.kron(self.mesh.edofMat,np.ones((8,1))).flatten()\n jK = np.kron(self.mesh.edofMat,np.ones((1,8))).flatten()\n sK=((self.KE.flatten()[np.newaxis]).T*(self.Emin+(rho)**self.penal*(self.Emax-self.Emin))).flatten(order='F')\n self.stiffness_matrix = sp.coo_matrix((sK,(iK,jK)),shape=(self.mesh.ndof,self.mesh.ndof)).tocsc()\n\n return self.stiffness_matrix", "title": "" }, { "docid": "af13d60edf934fd69ff211074ce62cb7", "score": "0.47683695", "text": "def simplify(self):\r\n fact = greatest_common_divisor(self.__numerator, self.__denominator)\r\n self.__numerator = self.__numerator//fact\r\n self.__denominator = self.__denominator//fact", "title": "" }, { "docid": "2e023b8c609745368bc2f84375d6a06c", "score": "0.47601324", "text": "def simplify_state(state: SDFGState, remove_views: bool = False) -> MultiDiGraph:\n\n sdfg = state.parent\n\n # Copy the whole state\n G = MultiDiGraph()\n for n in state.nodes():\n G.add_node(n)\n for n in state.nodes():\n for e in state.all_edges(n):\n G.add_edge(e.src, e.dst)\n # Collapse all mappings and their scopes into one node\n scope_children = state.scope_children()\n for n in scope_children[None]:\n if isinstance(n, nodes.EntryNode):\n G.add_edges_from([(n, x) for (y, x) in G.out_edges(state.exit_node(n))])\n G.remove_nodes_from(scope_children[n])\n # Remove all nodes that are not AccessNodes or have incoming\n # wcr edges and connect their predecessors and successors\n for n in state.nodes():\n if n in G.nodes():\n if (not isinstance(n, nodes.AccessNode) or (remove_views and isinstance(sdfg.arrays[n.data], data.View))):\n for p in G.predecessors(n):\n for c in G.successors(n):\n G.add_edge(p, c)\n G.remove_node(n)\n else:\n for e in state.all_edges(n):\n if e.data.wcr is not None:\n for p in G.predecessors(n):\n for s in G.successors(n):\n G.add_edge(p, s)\n G.remove_node(n)\n break\n\n return G", "title": "" }, { "docid": "5852be77521a54263f39fb8e44ef4dce", "score": "0.47589034", "text": "def ddx(n):\n return sp.spdiags((np.ones((n + 1, 1)) * [-1, 1]).T, [0, 1], n, n + 1, format=\"csr\")", "title": "" }, { "docid": "1305563ab5d730662b91cba0c765426e", "score": "0.4755412", "text": "def compute(self):\n m, n = self.D.shape\n\n # Allocate memory.\n # We need +2 because we use indices starting from 1\n # and to deal with edge cases in the backward recursion.\n self.R_ = np.zeros((m+2, n+2), dtype=np.float64)\n\n _soft_dtw(self.D, self.R_, gamma=self.gamma)\n\n return self.R_[m, n]", "title": "" }, { "docid": "2ff928f269c5ac8d7b3d2ae000cd42a5", "score": "0.47552603", "text": "def density(self, r):\n return np.where(r > self.r_min, self.rho_s / (r/self.r_s) / (1. + r/self.r_s)**3, 0.)", "title": "" }, { "docid": "779305611eb50458e49f536acc482e7f", "score": "0.47546223", "text": "def to_dense(self):\n if self.size is None:\n size = max(self.d.keys()) + 1\n else:\n size = self.size\n dense = np.zeros(size)\n for k,v in self.d.items():\n dense[k] = v\n return dense", "title": "" }, { "docid": "1db22c46cf4d3e4a22ca0797871bab30", "score": "0.4754565", "text": "def density(self, r):\n return np.where(r > self.r_min, self.rho_s / (r/self.r_s) / (1. + r/self.r_s)**2, 0.)", "title": "" }, { "docid": "be8eb30b81846b14a296dec1b1e7d2ea", "score": "0.47464868", "text": "def reduced_formula(self):\n if self.structure is not None:\n return self.composition.reduced_formula\n return \"Unkonwn\"", "title": "" }, { "docid": "efbd2abf113aed04471eebea96b78baf", "score": "0.47410783", "text": "def s_d(self) -> float:\n return self.s0 + 1 / (self.w * self.k_x) * self.v_t", "title": "" }, { "docid": "f15bc534a6a908e854b492720e967cf5", "score": "0.47379398", "text": "def diagonalize_del_dm_in_sbasis_in_degenerate_spaces(self):\n classes = ut.get_equiv_classes(list(self.dm0_eigen_sys[0]))\n umat = np.eye(self.del_dm.num_rows, dtype=complex)\n # eq = equivalence\n for eq_class in classes:\n dim = len(eq_class)\n if dim > 1:\n if self.verbose:\n print(\"non-trivial eq class\", eq_class)\n eq_class = sorted(eq_class)\n arr = np.empty((dim, dim), dtype=complex)\n for k1, kk1 in enumerate(eq_class):\n for k2, kk2 in enumerate(eq_class):\n arr[k1, k2] = self.del_dm_in_sbasis[kk1, kk2]\n norm_arr = np.linalg.norm(arr)\n # print('norm arr', norm_arr)\n if norm_arr > 1e-4:\n _, evec_cols = np.linalg.eigh(arr)\n assert ut.is_unitary_arr(evec_cols)\n # print('bbbbbbbb', np.around(np.dot(np.dot(\n # evec_cols.conj().T,\n # arr), evec_cols).real,4))\n for k1, kk1 in enumerate(eq_class):\n for k2, kk2 in enumerate(eq_class):\n umat[kk1, kk2] = evec_cols[k1, k2]\n assert ut.is_unitary_arr(umat)\n # print('ccccccccnorm of rotated_del_dm', np.linalg.norm(\n # self.del_dm_in_sbasis.arr))\n rotated_del_dm = np.dot(\n np.dot(umat.conj().T, self.del_dm_in_sbasis.arr), umat)\n # print('norm of rotated_del_dm', np.linalg.norm(rotated_del_dm))\n self.del_dm_in_sbasis.arr = rotated_del_dm", "title": "" }, { "docid": "b666a78a3c4f8253b314a53a530136dd", "score": "0.4735516", "text": "def pgradient(self):\n d = {}\n\n # Det coeff\n curr_val = self.value()\n nonzero = curr_val[0] != 0.0\n\n # dets[spin][ (phase,log), configuration, determinant]\n dets = (\n self._dets[0][:, :, self._det_map[0]],\n self._dets[1][:, :, self._det_map[1]],\n )\n\n d[\"det_coeff\"] = gpu.cp.zeros(dets[0].shape[1:], dtype=dets[0].dtype)\n d[\"det_coeff\"][nonzero, :] = (\n dets[0][0, nonzero, :]\n * dets[1][0, nonzero, :]\n * gpu.cp.exp(\n dets[0][1, nonzero, :]\n + dets[1][1, nonzero, :]\n - gpu.cp.array(curr_val[1][nonzero, np.newaxis])\n )\n / gpu.cp.array(curr_val[0][nonzero, np.newaxis])\n )\n\n for s, parm in zip([0, 1], [\"mo_coeff_alpha\", \"mo_coeff_beta\"]):\n ao = self._aovals[\n :, :, s * self._nelec[0] : self._nelec[s] + s * self._nelec[0], :\n ]\n\n split, aos = self.orbitals.pgradient(ao, s)\n mos = gpu.cp.split(gpu.cp.arange(split[-1]), gpu.asnumpy(split).astype(int))\n # Compute dj Diu/Diu\n nao = aos[0].shape[-1]\n nconf = aos[0].shape[0]\n nmo = int(split[-1])\n deriv = gpu.cp.zeros(\n (len(self._det_occup[s]), nconf, nao, nmo), dtype=curr_val[0].dtype\n )\n for det, occ in enumerate(self._det_occup[s]):\n for ao, mo in zip(aos, mos):\n for i in mo:\n if i in occ:\n col = occ.index(i)\n deriv[det, :, :, i] = self._testcol(det, col, s, ao)\n\n # now we reduce over determinants\n d[parm] = gpu.cp.zeros(deriv.shape[1:], dtype=curr_val[0].dtype)\n for di, coeff in enumerate(self.parameters[\"det_coeff\"]):\n whichdet = self._det_map[s][di]\n d[parm] += (\n deriv[whichdet]\n * coeff\n * d[\"det_coeff\"][:, di, np.newaxis, np.newaxis]\n )\n\n for k, v in d.items():\n d[k] = gpu.asnumpy(v)\n\n for k in list(d.keys()):\n if np.prod(d[k].shape) == 0:\n del d[k]\n return d", "title": "" }, { "docid": "5b62944f2f5f14daf76514400d4af117", "score": "0.4733377", "text": "def __repr__(self):\n stats = self.population.getStats(self.truncFrac)\n outStr = (\"\"\"DeMin state:\n Generation: %s\n best trial solution's cost value: %s\n best trial solution: %s\\n\n worst trial solution's cost value: %s\n worst trial solution: %s\\n\n mean cost value: %s\n standard deviation of cost values: %s \n fractional tolerance: %s\n chi-square: %s\n ndf: %s\n np_trunc: %s\\n\n Parameter mean values:\\n%s\\n\n Covariance Matrix for np_trunc members:\\n%s\n \"\"\" % (self.population.generation,\n stats[0],\n self.population[self.population.ibest].x,\n stats[1],\n self.population[self.population.iworst].x,\n stats[2],\n stats[3],\n stats[4],\n stats[5],\n stats[6],\n self.population.getNpTrunc(self.truncFrac),\n stats[7],\n self.population.getCovMatRepr(stats[8])))\n return outStr", "title": "" } ]
8c117fa933aa0261b2f095be44a1acc1
Datatype of data in the Column.
[ { "docid": "05b88be13604b1f61d82f087e7106449", "score": "0.0", "text": "def type(self) -> str:\n return pulumi.get(self, \"type\")", "title": "" } ]
[ { "docid": "5b8088fbe0ac252d28bc703bdde1e7c3", "score": "0.8037336", "text": "def datatype(self):\n return self._datatype", "title": "" }, { "docid": "48de7eb74ceee9fe56fd65453b09294b", "score": "0.7955723", "text": "def dtype(self):\n return self._datatype", "title": "" }, { "docid": "eb12f10ae87cbd514e47d2c2795b615f", "score": "0.78110397", "text": "def data_type(self):\n return self.__data_type", "title": "" }, { "docid": "6d4ed81bb5320d6c2423bb3d69df574e", "score": "0.7808144", "text": "def data_type(self):\n return self._data_type", "title": "" }, { "docid": "40c7e089bbf793d39ec281cc356feabe", "score": "0.778775", "text": "def data_type(self):\n\n return self._data_type", "title": "" }, { "docid": "52e16ba8c58e3994b0c6693904280082", "score": "0.75562376", "text": "def getDataType(self):\n return self.data_type", "title": "" }, { "docid": "fe38f5416da9523ff5363ef844ee1eee", "score": "0.7527811", "text": "def dtype(self):\n return self.data.dtype", "title": "" }, { "docid": "fe38f5416da9523ff5363ef844ee1eee", "score": "0.7527811", "text": "def dtype(self):\n return self.data.dtype", "title": "" }, { "docid": "fe38f5416da9523ff5363ef844ee1eee", "score": "0.7527811", "text": "def dtype(self):\n return self.data.dtype", "title": "" }, { "docid": "3e025b354e1a7c9e829d89648c7c1087", "score": "0.7440406", "text": "def dtype(self):\n return self._data.dtype", "title": "" }, { "docid": "3d7efb8bff7b892e5d43fe3f26645ce4", "score": "0.74388254", "text": "def get_data_type(self):\n return self._data_type", "title": "" }, { "docid": "35c0a3df1a58449164ab6d9e7873efef", "score": "0.7437267", "text": "def GetColumnType(self, col):", "title": "" }, { "docid": "35c0a3df1a58449164ab6d9e7873efef", "score": "0.7437267", "text": "def GetColumnType(self, col):", "title": "" }, { "docid": "b0e1c4ffe8d888a052cd055327d933a5", "score": "0.74280405", "text": "def type(self):\n return self.data_type", "title": "" }, { "docid": "66e145ddf40c8ad79cf7f5ccd449da7c", "score": "0.74093187", "text": "def _derive_data_type(column: Dict[str, Any]) -> str:\n data_type = column['data_type']\n\n if data_type in ['bigint',\n 'int',\n 'smallint',\n 'tinyint',\n 'bit',\n 'money',\n 'smallmoney',\n 'float',\n 'real',\n 'date',\n 'datetime',\n 'datetime2',\n 'datetimeoffset',\n 'smalldatetime',\n 'time',\n 'text',\n 'ntext',\n 'binary',\n 'image',\n 'xml',\n 'geography',\n 'geometry',\n 'sysname'\n ]:\n return data_type\n\n if data_type in ['decimal', 'numeric']:\n return 'decimal({0:d},{1:d})'.format(column['precision'], column['scale'])\n\n if data_type == 'char':\n return 'char({0:d})'.format(column['max_length'])\n\n if data_type == 'varchar':\n if column['max_length'] == -1:\n return 'varchar(max)'\n\n return 'varchar({0:d})'.format(column['max_length'])\n\n if data_type == 'nchar':\n return 'nchar({0:d})'.format(int(column['max_length'] / 2))\n\n if data_type == 'nvarchar':\n if column['max_length'] == -1:\n return 'nvarchar(max)'\n\n return 'nvarchar({0:d})'.format(int(column['max_length'] / 2))\n\n if data_type == 'varbinary':\n return 'varbinary({0:d})'.format(column['max_length'])\n\n raise Exception(\"Unexpected data type '{0}'\".format(data_type))", "title": "" }, { "docid": "a879494c135a9817e6cda2d6c14a3a40", "score": "0.7405552", "text": "def type(self):\n return self.dtype().value", "title": "" }, { "docid": "a1c3a4c4cfae33d72a8a32d47fd78116", "score": "0.7386811", "text": "def dtype(self):\r\n return self._dtype", "title": "" }, { "docid": "73d5c050d0850073e083a35754827e37", "score": "0.73845667", "text": "def dtype(self):\n return self._dtype", "title": "" }, { "docid": "e01c3da06324ecb23284e4c7ccb2cf30", "score": "0.7310465", "text": "def get_data_type(self) -> np.dtype:\n return self.type", "title": "" }, { "docid": "c5f63fcfec5353a3291c309f11fa05fd", "score": "0.7302992", "text": "def dtype(self):\n return self._dtype", "title": "" }, { "docid": "c5f63fcfec5353a3291c309f11fa05fd", "score": "0.7302992", "text": "def dtype(self):\n return self._dtype", "title": "" }, { "docid": "c5f63fcfec5353a3291c309f11fa05fd", "score": "0.7302992", "text": "def dtype(self):\n return self._dtype", "title": "" }, { "docid": "c5f63fcfec5353a3291c309f11fa05fd", "score": "0.7302992", "text": "def dtype(self):\n return self._dtype", "title": "" }, { "docid": "c5f63fcfec5353a3291c309f11fa05fd", "score": "0.7302992", "text": "def dtype(self):\n return self._dtype", "title": "" }, { "docid": "2908046b45c31bc75a4a2a8806286407", "score": "0.72685397", "text": "def get_dtype(self):\n pass", "title": "" }, { "docid": "d574b68629284c0f77e3c5f614641524", "score": "0.7255222", "text": "def getDataType(self):\n\n return 'Scalar'", "title": "" }, { "docid": "8759282617828147bf72a15054f23221", "score": "0.72273684", "text": "def dtype(self):\n return self.__dtype", "title": "" }, { "docid": "af54a1addb8847db50a38cb5f6a5ec52", "score": "0.7204325", "text": "def dtype(self):\n if self.view_attr is None and self.schema.nattr > 1:\n raise NotImplementedError(\"Multi-attribute does not have single dtype!\")\n return self.schema.attr(0).dtype", "title": "" }, { "docid": "ffd9c01758dc76f6ddf927924726b4c1", "score": "0.71553457", "text": "def dtype(self):\n return self._dtype", "title": "" }, { "docid": "ffd9c01758dc76f6ddf927924726b4c1", "score": "0.71553457", "text": "def dtype(self):\n return self._dtype", "title": "" }, { "docid": "a846b510367f7c1ed56f736981e17466", "score": "0.7122086", "text": "def getDataType(self) -> ghidra.program.model.data.DataType:\n ...", "title": "" }, { "docid": "8fb6045337f96701627acbda5c3ae2ae", "score": "0.71182996", "text": "def dtype(self):\n return self._impl.dtype", "title": "" }, { "docid": "115f152d1e6edb1fa8e49d3d0c8a01b5", "score": "0.71133626", "text": "def dtype(self):\n\n return self.description._v_dtype", "title": "" }, { "docid": "95ea02a66e8954707b382f68edbb70e0", "score": "0.7089385", "text": "def get_dtype(self):\n return self.dtype", "title": "" }, { "docid": "2d9d34d1426fa35423718cbbb253ae45", "score": "0.7053333", "text": "def export_datatype(self):\n raise NotImplementedError", "title": "" }, { "docid": "64f92513c029247743fde4da8da4c601", "score": "0.70321596", "text": "def data_type(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"data_type\")", "title": "" }, { "docid": "cdce282b1c2cb466e62f311a8e837196", "score": "0.69717824", "text": "def data_type(self):\n # datetime and date to int\n date_cols = ['visit_date', 'prev_visit_date', 'prev_item_move_date',\n 'last_edit_date', 'creation_date']\n for col in date_cols:\n # convert multiple time formats into single string format\n self.df[col] = pd.to_datetime(self.df[col]).dt.strftime('%Y-%m-%d %H:%M:%S')\n # make time features specific data type in order to distinguish from other numberic values\n self.df['{}_int'.format(col)] = self.df.apply(self._date_to_int, col=col, axis=1).astype(np.float32)\n # convert string format back into datetime\n self.df[col] = pd.to_datetime(self.df[col])\n # objects\n obj_cols = ['ship_id', 'address1', 'customer_id', 'sales_rep_id', 'item_id', 'old_item_id',\n 'item_UPC', 'old_item_UPC', 'ship_list_pk', 'sales_rep_id_2', 'list_header_id']\n for col in obj_cols:\n self.df[col] = self.df[col].astype(object)", "title": "" }, { "docid": "970d2776f6fcf0c468381762f0026068", "score": "0.69492733", "text": "def dtype(x):\n\t return x.dtype.name", "title": "" }, { "docid": "a4d35de6d937f89f506dd8465fa7e55f", "score": "0.69273543", "text": "def dtype(self):\n return self._dats[0].dtype", "title": "" }, { "docid": "cc13474be25c43ba2a4f847adfad3c1e", "score": "0.69220364", "text": "def data_type(self) -> Optional[str]:\n return pulumi.get(self, \"data_type\")", "title": "" }, { "docid": "cc13474be25c43ba2a4f847adfad3c1e", "score": "0.69220364", "text": "def data_type(self) -> Optional[str]:\n return pulumi.get(self, \"data_type\")", "title": "" }, { "docid": "2e581d5c45d038177e9e16ff2d3f0f65", "score": "0.69217163", "text": "def getDtype(self):\n return self.dtype_str", "title": "" }, { "docid": "4b20ec86c0865de94c347bbe146ffe34", "score": "0.6917254", "text": "def get_data_type(self, df, col):\n if col not in df.columns:\n raise KeyError(f'Column \"{col:s}\" not in input dataframe.')\n dt = dict(df.dtypes)[col]\n # spark conversions to numpy or python equivalent\n if dt == \"string\":\n dt = \"str\"\n elif dt in [\"timestamp\", \"date\"]:\n dt = np.datetime64\n elif dt == \"boolean\":\n dt = bool\n elif dt == \"bigint\":\n dt = np.int64\n elif dt.startswith(\"decimal(\"):\n return np.dtype(float, metadata={\"decimal\": True})\n\n return np.dtype(dt)", "title": "" }, { "docid": "6e6a4dbea08f1e018c1f3e584fd04e63", "score": "0.6893777", "text": "def dtype(self) -> torch.dtype:\n pass # pragma: no cover", "title": "" }, { "docid": "6e11f987f48a548c782a79d62674f408", "score": "0.6882582", "text": "def ctype(self):\n return self.data.ctype", "title": "" }, { "docid": "5d70b338904a1e355f32e2c3c8c9575d", "score": "0.68728685", "text": "def data_type(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"data_type\")", "title": "" }, { "docid": "f17bd447341f0fed24483c839c5da49f", "score": "0.6854025", "text": "def as_type(self):\n\t\ttypes = {\n\t\t\t'float': self.numeric_f,\n\t\t\t'bool': self.binary_class_f,\n\t\t\t'str': self.multi_class_f + self.id_f,\n\t\t\t'datetime64[ns]': self.datetime_f\n\t\t}\n\n\t\tfor i in types.keys():\n\t\t\tif len(types[i]) > 0:\n\t\t\t\tif i == 'bool':\n\t\t\t\t\tself.dataset.loc[:, types[i]] = self.dataset.loc[:, types[i]].astype(i).astype('float')\n\t\t\t\telse:\n\t\t\t\t\tself.dataset.loc[:, types[i]] = self.dataset.loc[:, types[i]].astype(i)", "title": "" }, { "docid": "2240a99a83a8ce6962bb7e2d8dfc4adb", "score": "0.68359935", "text": "def transform_datatype(self, datatype):\n return datatype", "title": "" }, { "docid": "698bd5a61e5e91af828f4d065d7a5ca1", "score": "0.68257904", "text": "def dtype(self):\n return self._snode._dtype", "title": "" }, { "docid": "8e59b8ca93a41098a598cdf8ae49cfdb", "score": "0.6806045", "text": "def _get_column_type(self) -> None:\n rows = self._dl.get_all_table_columns()\n for row in rows:\n key = '@{0}.{1}.{2}%type@'.format(row['schema_name'], row['table_name'], row['column_name'])\n key = key.lower()\n\n value = self._derive_data_type(row)\n\n self._replace_pairs[key] = value\n\n self._io.text('Selected {0} column types for substitution'.format(len(rows)))", "title": "" }, { "docid": "34c9b7ff2489d05df3d53ede2b72a675", "score": "0.6774951", "text": "def table_column_type(self, i):\n with self.free_cstring(_lib.TableColumnType(self.table, i)) as typ:\n return typ", "title": "" }, { "docid": "4efb21d1ed016e4286e0138e960e1427", "score": "0.6770946", "text": "def dtype(self):\n return self.tensor.dtype", "title": "" }, { "docid": "c44f8e10a39e055c04ffe566290dff5b", "score": "0.67646646", "text": "def ctype(self):\n return as_cstr(self.dtype)", "title": "" }, { "docid": "3de070e4c310a5a07ad0e41eb0e5af3a", "score": "0.6740433", "text": "def form_datatype(self):\n dt = self.dataype_name()\n nD = str(len(self.nda.shape))\n et = get_lh5_element_type(self)\n return dt + '<' + nD + '>{' + et + '}'", "title": "" }, { "docid": "7ab6124b611861e8f7261b35fba0b7ba", "score": "0.67115027", "text": "def get_type(self):\n return self.header.data_type", "title": "" }, { "docid": "30eb89c3b7c6e89211db4d8a2626c2ae", "score": "0.6709729", "text": "def dtype(self):\n return self._get_component(\"array\").dtype", "title": "" }, { "docid": "fbc6a7de0d3fa23da85fef18a59483ac", "score": "0.66972804", "text": "def _get_predefined_datatype(self):\n return None", "title": "" }, { "docid": "d72ee64ed3cb0350c1e14d545c185955", "score": "0.669686", "text": "def convert_datatype(coldatatype):\n\n datatype = SqlType.text()\n def_value = ''\n \n if 'datetime' in coldatatype.lower():\n datatype = SqlType.timestamp()\n elif 'str' in coldatatype.lower():\n datatype = SqlType.text()\n elif 'boolean' in coldatatype.lower():\n datatype = SqlType.bool()\n elif 'int' in coldatatype.lower():\n datatype = SqlType.int()\n def_value = 0\n elif 'float' in coldatatype.lower():\n datatype = SqlType.double()\n def_value = 0\n elif 'period' in coldatatype.lower():\n datatype = SqlType.interval()\n elif 'object' in coldatatype.lower():\n datatype = SqlType.text()\n else:\n datatype = SqlType.text()\n \n return (datatype,def_value)", "title": "" }, { "docid": "7d21875c687169cd72cd948c52e38935", "score": "0.66671395", "text": "def dtypes_of(data):\n return data.dtype", "title": "" }, { "docid": "1e6961728ac2e377ee3f8d845ee1a929", "score": "0.6664429", "text": "def data_type(self) -> pulumi.Input['InstructionDataType']:\n return pulumi.get(self, \"data_type\")", "title": "" }, { "docid": "2eb54b93b32955b17f8f324a6815ba54", "score": "0.66327494", "text": "def dtype(self):\n\n hdr = self.header()\n\n # Get numpy data type\n dtype = {'char': np.byte,\n 'short': np.short,\n 'int': np.intc,\n 'long': np.int_,\n 'ushort': np.ushort,\n 'uint': np.uintc,\n 'float': np.single,\n 'vec3f': np.single,\n 'vec9f': np.single,\n 'double': np.double}[hdr['type']]\n dtype = np.dtype(dtype)\n\n # Get python byte order string\n endian = {'little_endian': '<',\n 'big_endian': '>'}[hdr['systeme']]\n\n # Return data type with correct order\n return dtype.newbyteorder(endian)", "title": "" }, { "docid": "d3ff223c19791fe116cb325d66c1bf29", "score": "0.6632438", "text": "def dtype(self) -> 'np.dtype':\n pass", "title": "" }, { "docid": "81f1542bddac468ee8c73316b63c956b", "score": "0.66217273", "text": "def value_type(self):\n return self._value_type", "title": "" }, { "docid": "ca8e1547158b09092fcee6c52435901b", "score": "0.6602762", "text": "def get_data_type(cls) -> str:\n raise NotImplementedError", "title": "" }, { "docid": "afccd0b6a180a8c7c07bab235818ef65", "score": "0.65588313", "text": "def dtype(self) -> np.dtype:\n return self.__dtype", "title": "" }, { "docid": "54fd9c1f535c68e9c4072ec355200553", "score": "0.65281224", "text": "def detect_datatype(data):\n if type(data) == type(np.empty(0)):\n return DataType.NUMPY\n elif type(data) == type(pd.DataFrame({'A': []})):\n return DataType.DATAFRAME", "title": "" }, { "docid": "abbb449013006308c392dbf38126154f", "score": "0.6517055", "text": "def dtype(self):\n\n return self.descr._v_dtypes[self.name].base # Get rid of shape info", "title": "" }, { "docid": "ce26c164a3ccce72b91a16f49eb2d172", "score": "0.65145123", "text": "def dtypes(self) -> List[str]:\n pass", "title": "" }, { "docid": "ad950edaae613c0833cc9ef9121bb241", "score": "0.65138894", "text": "def dtype(self):\n return type(self._get_sequence_iloc_(0)) if len(self) > 0 else None", "title": "" }, { "docid": "4dabbabfbf6955d71241653343428b38", "score": "0.6507724", "text": "def getSizeDataType(self) -> ghidra.program.model.data.DataType:\n ...", "title": "" }, { "docid": "cb0db954e693796b7be6c46f5f716fce", "score": "0.65049326", "text": "def _determine_column_type(data_types):\n # Allow None which will be converted to NaN.\n if all(\n issubclass(t, (_numbers.Number, type(None))) and not issubclass(t, bool)\n for t in data_types\n ):\n return 'number'\n return 'string'", "title": "" }, { "docid": "491f6c839537d99f47524e1b16fea22e", "score": "0.6501615", "text": "def type_value(ctype: Any, value: Any):\n\n # Boolean\n if ctype == platform.xlrd.XL_CELL_BOOLEAN:\n return bool(value)\n\n # Excel numbers are only float\n # Float with no decimals can be cast into int\n if ctype == platform.xlrd.XL_CELL_NUMBER and value == value // 1:\n return int(value)\n\n # Datetime\n if ctype == platform.xlrd.XL_CELL_DATE:\n return platform.xlrd.xldate.xldate_as_datetime(value, book.datemode)\n\n return value", "title": "" }, { "docid": "eff8705d409d3f0d737ed36858dddd70", "score": "0.6487331", "text": "def get_field_type(self, data_type, row):\n return DatabaseIntrospection.data_types_reverse[row[7]]", "title": "" }, { "docid": "d6e1e39e63e4d422f5e3267c1fd821fd", "score": "0.64852417", "text": "def _get_dtype(self) -> np.dtype:\n return self._dtype", "title": "" }, { "docid": "06216a5ce712b85ec41a8045033e184f", "score": "0.64840126", "text": "def get_data_type(self, column, table):\n\t\ttry:\n\t\t\ttype_override = self.type_override[column[\"column_type\"]]\n\t\t\toverride_to = type_override[\"override_to\"]\n\t\t\toverride_tables = type_override[\"override_tables\"]\n\t\t\t\n\t\t\tif override_tables[0] == '*' or table in override_tables:\n\t\t\t\tcolumn_type = override_to\n\t\t\telse:\n\t\t\t\tcolumn_type = self.type_dictionary[column[\"data_type\"]]\n\t\texcept:\n\t\t\tcolumn_type = self.type_dictionary[column[\"data_type\"]]\n\t\treturn column_type", "title": "" }, { "docid": "e43c2ffb38d4ac01066a916166a7693b", "score": "0.64505047", "text": "def data_types(self) -> dict:\n return self._data_types", "title": "" }, { "docid": "963156aaa9eba32adec44835ab8da0ba", "score": "0.6433906", "text": "def evaluateColumnTypeV2(valColumn):\r\n res = \"numerical\"\r\n\r\n # get a column without missing val\r\n l = [val for val in valColumn if val != \"\"]\r\n\r\n if (len(l) == 0):\r\n res = \"empty\"\r\n else:\r\n try:\r\n for val in l:\r\n float(val)\r\n\r\n res = \"numerical\"\r\n pass\r\n except ValueError:\r\n res = \"categorical\"\r\n pass\r\n \r\n return res", "title": "" }, { "docid": "6463851be879cbcc32965429551efc01", "score": "0.64146054", "text": "def test_datatype(self):\n self.assertTrue(lib.INTEGER.kind_of(lib.NUMBER))\n self.assertTrue(lib.FLOAT.kind_of(lib.NUMBER))\n self.assertTrue(lib.INTEGER.kind_of(env.ANY))", "title": "" }, { "docid": "af0d2ae1fbf4adc2260cf4b13988c771", "score": "0.64124054", "text": "def data_type(a):\n\ta_type =type(a)\n\tif a_type == str:\n\t\tif len(a) == 0:\n\t\t\treturn 'no value'\n\t\telse:\n\t\t\treturn len(a)\n\t\t\n\telif a_type ==bool:\n\t\treturn a\n\telif a_type ==int:\n\n\t\tif a == 100:\n\t\t\treturn 'equal to 100'\n\t\telif a < 100:\n\t\t\treturn 'less than 100'\n\n\t\telse:\n\t\t\treturn 'more than 100'\n\n\telif a_type ==list:\n\t\ttry:\n\t\t\tif a[2]:\n\t\t\t\treturn a[2]\n\t\texcept(IndexError):\n\t\t\treturn None\n\t\telse:\n\t\t\treturn 'no value'", "title": "" }, { "docid": "49436abf6ea1757f0b1750bdd28bcbc4", "score": "0.63909143", "text": "def column_data(self, column_name):\n for col in self.raw_frame.columns:\n if col.name == column_name:\n if col.dtype == fpb.INTEGER:\n return col.ints\n elif col.dtype == fpb.FLOAT:\n return col.floats\n elif col.dtype == fpb.STRING:\n return col.strings\n elif col.dtype == fpb.TIME:\n return col.times\n elif col.dtype == fpb.BOOLEAN:\n return col.bools\n else:\n raise ReadError('{} - unsupported type - {}'.format(column_name, col.dtype))\n\n raise ReadError('No column named \"{}\"'.format(column_name))", "title": "" }, { "docid": "6d8ac16c1589d50950dd97a4040f2144", "score": "0.63876945", "text": "def column_types(self):\n return self._column_types", "title": "" }, { "docid": "822ed2a73282d41b745f1ac6b8c604d7", "score": "0.6382394", "text": "def dtype(self) -> StringDtype: # type: ignore[override]\n return self._dtype", "title": "" }, { "docid": "aba956cd8d21abd0d782256aba8e033a", "score": "0.63798404", "text": "def cell_type(self, row, col):\n if col == 0: return 'heading'\n else: return 'data'", "title": "" }, { "docid": "ccedb8e1a1ae08ee787daf348c6814b6", "score": "0.6374998", "text": "def dtype(self) -> torch.dtype:\n return self.mvn.loc.dtype", "title": "" }, { "docid": "94113e48de1e1265adaed5b57226040f", "score": "0.6372429", "text": "def generate_col_data(self, col: dict) -> Any:\n if self.null_column_value(col):\n return None\n\n elif col.get(\"enum\"):\n return self.fake_enum(col.get(\"enum\"))\n\n elif col[\"type\"] in [\"int\", \"long\"]:\n return self.fake_int(col)\n\n elif col[\"type\"] in [\"double\", \"float\"]:\n return self.fake_double(col)\n\n elif col[\"type\"] in [\"date\", \"datetime\"]:\n return self.fake_datetime(col)\n\n elif col[\"type\"] == \"boolean\":\n return self.fake_bool()\n\n elif col[\"type\"] == \"character\":\n return self.fake_character(special_type=self.special_cols.get(col[\"name\"]))\n else:\n n = col[\"name\"]\n t = col[\"type\"]\n raise TypeError(f\"Column: {n} has unsupported type: {t}\")", "title": "" }, { "docid": "20b0117e6714bcff7701ddac17787f5e", "score": "0.6360994", "text": "def return_type(self) -> pyspark.sql.connect.proto.types_pb2.DataType:", "title": "" }, { "docid": "0618311691836a44481f571981a2ebb3", "score": "0.63605905", "text": "def getDtype(self):\n return self.params.get(\"dtype\", str)", "title": "" }, { "docid": "1bd90187a38fff80a194a06ed3175960", "score": "0.63502675", "text": "def dtype(self):\n return self._sparseMap.dtype", "title": "" }, { "docid": "c3e8c8137c68f40a9b1a3039362769d5", "score": "0.6344556", "text": "def validate_datatype(self, datatype):\n pass", "title": "" }, { "docid": "0a5f42bc17246b9f9362049fe4a978ec", "score": "0.63274246", "text": "def dtype(self, cid):\n\n # grab a small piece of data\n ind = tuple([slice(0, 1)] * self.ndim)\n arr = self.get_data(cid, view=ind)\n return arr.dtype", "title": "" }, { "docid": "658ed7647a1702558e7c7d63c4b3b069", "score": "0.63121635", "text": "def dtype(self) -> torch.dtype:\n return self.test_mean.dtype", "title": "" }, { "docid": "dff00028133ea3dbd1628442aca30d65", "score": "0.63052446", "text": "def convert_column_type(self,column,type=type(1)):\n self.Data_Frame[column]=self.Data_Frame[column].astype(type)", "title": "" }, { "docid": "d49e169d9add8732b4a23006a99fb576", "score": "0.63043797", "text": "def GetTypeOfCol(self, *args):\n return _gdal.RasterAttributeTable_GetTypeOfCol(self, *args)", "title": "" }, { "docid": "4c97ff9a9f5608d6e124c9fe865478e4", "score": "0.63002914", "text": "def get_data_type(self):\n if self.dtype == 0:\n return(\"Pumping rate\")\n elif self.dtype == 1:\n return(\"Drawdown vs time\")\n elif self.dtype == 2:\n return(\"First drawdown derivative\")\n elif self.dtype == 3:\n return(\"First drawdown derivative\")\n elif self.dtype == 4:\n return(\"Drawdown vs distance\")", "title": "" }, { "docid": "5c4d3ad159d19f5bc6e660839c39499f", "score": "0.62953544", "text": "def get_datatype(self):\n return \"SET\"", "title": "" }, { "docid": "1647d03568832267144127b0bb767492", "score": "0.62765014", "text": "def dtype(self):\n bitpix = self._obj._orig_bitpix\n if self._obj._orig_bscale == 1 and self._obj._orig_bzero == 0:\n dtype = fits.BITPIX2DTYPE[bitpix]\n else:\n # this method from astropy will return the dtype if the data\n # needs to be converted to unsigned int or scaled to float\n dtype = self._obj._dtype_for_bitpix()\n\n if dtype is None:\n if bitpix < 0:\n dtype = np.dtype('float{}'.format(abs(bitpix)))\n if (self._obj.header['EXTNAME'] == 'DQ' or self._obj._uint and\n self._obj._orig_bscale == 1 and bitpix == 8):\n dtype = np.uint16\n return dtype", "title": "" }, { "docid": "921a4592765aa88fca4a8f065c3ef7ed", "score": "0.62666196", "text": "def _get_column_type(data, column_index):\n data_types = set()\n for row in data:\n cell = row[column_index]\n t = type(_get_value(cell))\n is_known_type = (\n cell is None or issubclass(t, _numbers.Number) or issubclass(t, str)\n )\n if not is_known_type:\n t = str\n data_types.add(t)\n return _determine_column_type(data_types)", "title": "" }, { "docid": "487a3388fa1a65d48de03b5828ea56b3", "score": "0.62574834", "text": "def db_type(self):\n return 'varchar(100)'", "title": "" }, { "docid": "cece28ed05ef7b4c4b3b2971a00c4bb5", "score": "0.6256657", "text": "def dtype(self):\n if hasattr(self, \"_dtype\"):\n return self._dtype\n\n self._dtype = self.get_frame(0).dtype\n return self._dtype", "title": "" }, { "docid": "b5097c8f62ad072974a1356fb844be87", "score": "0.6234462", "text": "def current_datatype(self):\n app.notify(subtitle=f\"Current C lang datatype: {self.datatype}\")", "title": "" }, { "docid": "5f57a9754a423eaa3d67e11234e443c1", "score": "0.62313956", "text": "def get_column_type(self,column_name):\n #import pdb;pdb.set_trace()\n out = None\n cols = self._get_table_info()\n \n for col in cols:\n if col[1] == column_name:\n out = col['type'].upper()\n break\n \n if out == None:\n raise KeyError(\"That column name is not in this table\")\n \n return out", "title": "" } ]